]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | #include <sys/queue.h> |
2 | #include <kern/backtrace.h> | |
3 | #include <kern/kalloc.h> | |
4 | #include <kern/assert.h> | |
5 | #include <kern/debug.h> | |
6 | #include <kern/zalloc.h> | |
7 | #include <kern/simple_lock.h> | |
8 | #include <kern/locks.h> | |
9 | #include <machine/machine_routines.h> | |
10 | #include <libkern/libkern.h> | |
11 | #include <libkern/tree.h> | |
12 | #include <libkern/kernel_mach_header.h> | |
13 | #include <libkern/OSKextLib.h> | |
14 | #include <mach-o/loader.h> | |
15 | #include <mach-o/nlist.h> | |
16 | ||
17 | #include "kasan.h" | |
18 | #include "kasan_internal.h" | |
19 | ||
20 | #if KASAN_DYNAMIC_BLACKLIST | |
21 | ||
22 | #define MAX_FRAMES 8 | |
23 | #define HASH_NBUCKETS 128U | |
24 | #define HASH_MASK (HASH_NBUCKETS-1) | |
25 | #define HASH_CACHE_NENTRIES 128 | |
26 | ||
27 | struct blacklist_entry { | |
28 | const char *kext_name; | |
29 | const char *func_name; | |
a39ff7e2 | 30 | access_t type_mask; |
5ba3f43e A |
31 | |
32 | /* internal */ | |
33 | uint64_t count; | |
34 | }; | |
35 | ||
36 | #include "kasan_blacklist_dynamic.h" | |
a39ff7e2 | 37 | /* defines 'blacklist' and 'blacklist_entries' */ |
5ba3f43e A |
38 | |
39 | decl_simple_lock_data(static, _dybl_lock); | |
a39ff7e2 | 40 | static access_t blacklisted_types; /* bitmap of access types with blacklist entries */ |
5ba3f43e A |
41 | |
42 | static void | |
43 | dybl_lock(boolean_t *b) | |
44 | { | |
45 | *b = ml_set_interrupts_enabled(false); | |
46 | simple_lock(&_dybl_lock); | |
47 | } | |
48 | ||
49 | static void | |
50 | dybl_unlock(boolean_t b) | |
51 | { | |
52 | simple_unlock(&_dybl_lock); | |
53 | ml_set_interrupts_enabled(b); | |
54 | } | |
55 | ||
56 | ||
57 | /* | |
58 | * blacklist call site hash table | |
59 | */ | |
60 | ||
61 | struct blacklist_hash_entry { | |
62 | SLIST_ENTRY(blacklist_hash_entry) chain; // next element in chain | |
63 | struct blacklist_entry *ble; // blacklist entry that this caller is an instance of | |
64 | uintptr_t addr; // callsite address | |
65 | uint64_t count; // hit count | |
66 | }; | |
67 | ||
68 | struct hash_chain_head { | |
69 | SLIST_HEAD(, blacklist_hash_entry); | |
70 | }; | |
71 | ||
72 | unsigned cache_next_entry = 0; | |
73 | struct blacklist_hash_entry blhe_cache[HASH_CACHE_NENTRIES]; | |
74 | struct hash_chain_head hash_buckets[HASH_NBUCKETS]; | |
75 | ||
76 | static struct blacklist_hash_entry * | |
77 | alloc_hash_entry(void) | |
78 | { | |
79 | unsigned idx = cache_next_entry++; | |
80 | if (idx >= HASH_CACHE_NENTRIES) { | |
81 | cache_next_entry = HASH_CACHE_NENTRIES; // avoid overflow | |
82 | return NULL; | |
83 | } | |
84 | return &blhe_cache[idx]; | |
85 | } | |
86 | ||
87 | static unsigned | |
88 | hash_addr(uintptr_t addr) | |
89 | { | |
90 | addr ^= (addr >> 7); /* mix in some of the bits likely to select the kext */ | |
91 | return (unsigned)addr & HASH_MASK; | |
92 | } | |
93 | ||
94 | static struct blacklist_hash_entry * | |
95 | blacklist_hash_lookup(uintptr_t addr) | |
96 | { | |
97 | unsigned idx = hash_addr(addr); | |
98 | struct blacklist_hash_entry *blhe; | |
99 | ||
100 | SLIST_FOREACH(blhe, &hash_buckets[idx], chain) { | |
101 | if (blhe->addr == addr) { | |
102 | return blhe; | |
103 | } | |
104 | } | |
105 | ||
106 | return NULL; | |
107 | } | |
108 | ||
109 | static struct blacklist_hash_entry * | |
110 | blacklist_hash_add(uintptr_t addr, struct blacklist_entry *ble) | |
111 | { | |
112 | unsigned idx = hash_addr(addr); | |
113 | ||
114 | struct blacklist_hash_entry *blhe = alloc_hash_entry(); | |
115 | if (!blhe) { | |
116 | return NULL; | |
117 | } | |
118 | ||
119 | blhe->ble = ble; | |
120 | blhe->addr = addr; | |
121 | blhe->count = 1; | |
122 | ||
123 | SLIST_INSERT_HEAD(&hash_buckets[idx], blhe, chain); | |
124 | ||
125 | return blhe; | |
126 | } | |
127 | ||
128 | static void | |
129 | hash_drop(void) | |
130 | { | |
131 | if (cache_next_entry > 0) { | |
132 | bzero(&hash_buckets, sizeof(hash_buckets)); | |
133 | bzero(&blhe_cache, sizeof(struct blacklist_hash_entry) * cache_next_entry); | |
134 | cache_next_entry = 0; | |
135 | } | |
136 | } | |
137 | ||
138 | /* | |
139 | * kext range lookup tree | |
140 | */ | |
141 | ||
142 | struct range_tree_entry { | |
143 | RB_ENTRY(range_tree_entry) tree; | |
144 | ||
145 | uintptr_t base; | |
146 | ||
147 | struct { | |
148 | uint64_t size : 63; | |
149 | uint64_t accessed : 1; // blacklist entry exists in this range | |
150 | }; | |
151 | ||
152 | /* kext name */ | |
153 | const char *bundleid; | |
154 | ||
155 | /* mach header for corresponding kext */ | |
156 | kernel_mach_header_t *mh; | |
157 | }; | |
158 | ||
159 | static int NOINLINE | |
160 | range_tree_cmp(const struct range_tree_entry *e1, const struct range_tree_entry *e2) | |
161 | { | |
162 | if (e1->size == 0 || e2->size == 0) { | |
163 | /* lookup */ | |
164 | if (e1->base + e1->size < e2->base) { | |
165 | return -1; | |
166 | } else if (e1->base > e2->base + e2->size) { | |
167 | return 1; | |
168 | } else { | |
169 | return 0; | |
170 | } | |
171 | } else { | |
172 | /* compare */ | |
173 | if (e1->base + e1->size <= e2->base) { | |
174 | return -1; | |
175 | } else if (e1->base >= e2->base + e2->size) { | |
176 | return 1; | |
177 | } else { | |
178 | panic("bad compare\n"); | |
179 | return 0; | |
180 | } | |
181 | } | |
182 | } | |
183 | ||
184 | RB_HEAD(range_tree, range_tree_entry) range_tree_root; | |
185 | RB_PROTOTYPE(range_tree, range_tree_entry, tree, range_tree_cmp); | |
186 | RB_GENERATE(range_tree, range_tree_entry, tree, range_tree_cmp); | |
187 | ||
188 | /* for each executable section, insert a range tree entry */ | |
189 | void | |
190 | kasan_dybl_load_kext(uintptr_t addr, const char *kextname) | |
191 | { | |
192 | int i; | |
193 | ||
194 | struct load_command *cmd = NULL; | |
195 | kernel_mach_header_t *mh = (void *)addr; | |
196 | ||
197 | cmd = (struct load_command *)&mh[1]; | |
198 | ||
199 | for (i = 0; i < (int)mh->ncmds; i++) { | |
200 | if (cmd->cmd == LC_SEGMENT_KERNEL) { | |
201 | kernel_segment_command_t *seg = (void *)cmd; | |
202 | bool is_exec = seg->initprot & VM_PROT_EXECUTE; | |
203 | ||
204 | #if CONFIG_EMBEDDED | |
205 | if (is_exec && strcmp("__TEXT_EXEC", seg->segname) != 0) { | |
206 | is_exec = false; | |
207 | } | |
208 | #endif | |
209 | ||
210 | if (is_exec) { | |
211 | struct range_tree_entry *e = kalloc(sizeof(struct range_tree_entry)); | |
212 | bzero(e, sizeof(*e)); | |
213 | ||
214 | e->base = seg->vmaddr; | |
215 | e->size = seg->vmsize; | |
216 | e->bundleid = kextname; | |
217 | e->mh = mh; | |
218 | ||
219 | boolean_t flag; | |
220 | dybl_lock(&flag); | |
221 | RB_INSERT(range_tree, &range_tree_root, e); | |
222 | dybl_unlock(flag); | |
223 | } | |
224 | } | |
225 | ||
226 | cmd = (void *)((uintptr_t)cmd + cmd->cmdsize); | |
227 | } | |
228 | } | |
229 | ||
230 | void | |
231 | kasan_dybl_unload_kext(uintptr_t addr) | |
232 | { | |
233 | int i; | |
234 | ||
235 | struct load_command *cmd = NULL; | |
236 | kernel_mach_header_t *mh = (void *)addr; | |
237 | ||
238 | cmd = (struct load_command *)&mh[1]; | |
239 | ||
240 | for (i = 0; i < (int)mh->ncmds; i++) { | |
241 | if (cmd->cmd == LC_SEGMENT_KERNEL) { | |
242 | kernel_segment_command_t *seg = (void *)cmd; | |
243 | bool is_exec = seg->initprot & VM_PROT_EXECUTE; | |
244 | ||
245 | #if CONFIG_EMBEDDED | |
246 | if (is_exec && strcmp("__TEXT_EXEC", seg->segname) != 0) { | |
247 | is_exec = false; | |
248 | } | |
249 | #endif | |
250 | ||
251 | if (is_exec) { | |
252 | struct range_tree_entry key = { .base = seg->vmaddr, .size = 0 }; | |
253 | struct range_tree_entry *e; | |
254 | boolean_t flag; | |
255 | dybl_lock(&flag); | |
256 | e = RB_FIND(range_tree, &range_tree_root, &key); | |
257 | if (e) { | |
258 | RB_REMOVE(range_tree, &range_tree_root, e); | |
259 | if (e->accessed) { | |
260 | /* there was a blacklist entry in this range */ | |
261 | hash_drop(); | |
262 | } | |
263 | } | |
264 | dybl_unlock(flag); | |
265 | ||
266 | if (e) { | |
267 | kfree(e, sizeof(*e)); | |
268 | } | |
269 | } | |
270 | } | |
271 | ||
272 | cmd = (void *)((uintptr_t)cmd + cmd->cmdsize); | |
273 | } | |
274 | } | |
275 | ||
276 | /* | |
277 | * return the closest function name at or before addr | |
278 | */ | |
279 | static const NOINLINE char * | |
280 | addr_to_func(uintptr_t addr, const kernel_mach_header_t *mh) | |
281 | { | |
282 | int i; | |
283 | uintptr_t cur_addr = 0; | |
284 | ||
285 | const struct load_command *cmd = NULL; | |
286 | const struct symtab_command *st = NULL; | |
287 | const kernel_segment_command_t *le = NULL; | |
288 | const char *strings; | |
289 | const kernel_nlist_t *syms; | |
290 | const char *cur_name = NULL; | |
291 | ||
292 | cmd = (const struct load_command *)&mh[1]; | |
293 | ||
294 | /* | |
295 | * find the symtab command and linkedit segment | |
296 | */ | |
297 | for (i = 0; i < (int)mh->ncmds; i++) { | |
298 | if (cmd->cmd == LC_SYMTAB) { | |
299 | st = (const struct symtab_command *)cmd; | |
300 | } else if (cmd->cmd == LC_SEGMENT_KERNEL) { | |
301 | const kernel_segment_command_t *seg = (const void *)cmd; | |
302 | if (!strcmp(seg->segname, SEG_LINKEDIT)) { | |
303 | le = (const void *)cmd; | |
304 | } | |
305 | } | |
306 | cmd = (const void *)((uintptr_t)cmd + cmd->cmdsize); | |
307 | } | |
308 | ||
309 | /* locate the symbols and strings in the symtab */ | |
310 | strings = (const void *)((le->vmaddr - le->fileoff) + st->stroff); | |
311 | syms = (const void *)((le->vmaddr - le->fileoff) + st->symoff); | |
312 | ||
313 | /* | |
314 | * iterate the symbols, looking for the closest one to `addr' | |
315 | */ | |
316 | for (i = 0; i < (int)st->nsyms; i++) { | |
317 | ||
318 | uint8_t n_type = syms[i].n_type; | |
319 | const char *name = strings + syms[i].n_un.n_strx; | |
320 | ||
321 | if (n_type & N_STAB) { | |
322 | /* ignore debug entries */ | |
323 | continue; | |
324 | } | |
325 | ||
326 | n_type &= N_TYPE; | |
327 | if (syms[i].n_un.n_strx == 0 || !(n_type == N_SECT || n_type == N_ABS)) { | |
328 | /* only use named and defined symbols */ | |
329 | continue; | |
330 | } | |
331 | ||
332 | #if 0 | |
333 | if (mh != &_mh_execute_header) { | |
334 | printf("sym '%s' 0x%x 0x%lx\n", name, (unsigned)syms[i].n_type, (unsigned long)syms[i].n_value); | |
335 | } | |
336 | #endif | |
337 | ||
338 | if (*name == '_') { | |
339 | name += 1; | |
340 | } | |
341 | ||
342 | /* this symbol is closer than the one we had */ | |
343 | if (syms[i].n_value <= addr && syms[i].n_value > cur_addr) { | |
344 | cur_name = name; | |
345 | cur_addr = syms[i].n_value; | |
346 | } | |
347 | } | |
348 | ||
349 | /* best guess for name of function at addr */ | |
350 | return cur_name; | |
351 | } | |
352 | ||
d9a64523 | 353 | bool OS_NOINLINE |
a39ff7e2 | 354 | kasan_is_blacklisted(access_t type) |
5ba3f43e A |
355 | { |
356 | uint32_t nframes = 0; | |
357 | uintptr_t frames[MAX_FRAMES]; | |
358 | uintptr_t *bt = frames; | |
a39ff7e2 A |
359 | |
360 | assert(__builtin_popcount(type) == 1); | |
361 | ||
362 | if ((type & blacklisted_types) == 0) { | |
363 | /* early exit for types with no blacklist entries */ | |
364 | return false; | |
365 | } | |
366 | ||
367 | nframes = backtrace_frame(bt, MAX_FRAMES, __builtin_frame_address(0)); | |
5ba3f43e A |
368 | boolean_t flag; |
369 | ||
a39ff7e2 A |
370 | if (nframes >= 1) { |
371 | /* ignore direct caller */ | |
372 | nframes -= 1; | |
373 | bt += 1; | |
5ba3f43e A |
374 | } |
375 | ||
376 | struct blacklist_hash_entry *blhe = NULL; | |
377 | ||
378 | dybl_lock(&flag); | |
379 | ||
380 | /* First check if any frame hits in the hash */ | |
381 | for (uint32_t i = 0; i < nframes; i++) { | |
382 | blhe = blacklist_hash_lookup(bt[i]); | |
383 | if (blhe) { | |
a39ff7e2 | 384 | if ((blhe->ble->type_mask & type) != type) { |
5ba3f43e A |
385 | /* wrong type */ |
386 | continue; | |
387 | } | |
388 | ||
389 | /* hit */ | |
390 | blhe->count++; | |
391 | blhe->ble->count++; | |
392 | // printf("KASan: blacklist cache hit (%s:%s [0x%lx] 0x%x)\n", | |
393 | // ble->kext_name ?: "" , ble->func_name ?: "", VM_KERNEL_UNSLIDE(bt[i]), mask); | |
394 | dybl_unlock(flag); | |
395 | return true; | |
396 | } | |
397 | } | |
398 | ||
399 | /* no hits - slowpath */ | |
400 | for (uint32_t i = 0; i < nframes; i++) { | |
401 | ||
402 | const char *kextname = NULL; | |
403 | const char *funcname = NULL; | |
404 | ||
405 | struct range_tree_entry key = { .base = bt[i], .size = 0 }; | |
406 | struct range_tree_entry *e = RB_FIND(range_tree, &range_tree_root, &key); | |
407 | ||
408 | if (!e) { | |
409 | /* no match at this address - kinda weird? */ | |
410 | continue; | |
411 | } | |
412 | ||
413 | /* get the function and bundle name for the current frame */ | |
414 | funcname = addr_to_func(bt[i], e->mh); | |
415 | if (e->bundleid) { | |
416 | kextname = strrchr(e->bundleid, '.'); | |
417 | if (kextname) { | |
418 | kextname++; | |
419 | } else { | |
420 | kextname = e->bundleid; | |
421 | } | |
422 | } | |
423 | ||
424 | // printf("%s: a = 0x%016lx,0x%016lx f = %s, k = %s\n", __func__, bt[i], VM_KERNEL_UNSLIDE(bt[i]), funcname, kextname); | |
425 | ||
426 | /* check if kextname or funcname are in the blacklist */ | |
427 | for (size_t j = 0; j < blacklist_entries; j++) { | |
428 | struct blacklist_entry *ble = &blacklist[j]; | |
429 | uint64_t count; | |
430 | ||
a39ff7e2 | 431 | if ((ble->type_mask & type) != type) { |
5ba3f43e A |
432 | /* wrong type */ |
433 | continue; | |
434 | } | |
435 | ||
436 | if (ble->kext_name && kextname && strncmp(kextname, ble->kext_name, KMOD_MAX_NAME) != 0) { | |
437 | /* wrong kext name */ | |
438 | continue; | |
439 | } | |
440 | ||
441 | if (ble->func_name && funcname && strncmp(funcname, ble->func_name, 128) != 0) { | |
442 | /* wrong func name */ | |
443 | continue; | |
444 | } | |
445 | ||
446 | /* found a matching function or kext */ | |
447 | blhe = blacklist_hash_add(bt[i], ble); | |
448 | count = ble->count++; | |
449 | e->accessed = 1; | |
450 | ||
451 | dybl_unlock(flag); | |
452 | ||
453 | if (count == 0) { | |
454 | printf("KASan: ignoring blacklisted violation (%s:%s [0x%lx] %d 0x%x)\n", | |
a39ff7e2 | 455 | kextname, funcname, VM_KERNEL_UNSLIDE(bt[i]), i, type); |
5ba3f43e A |
456 | } |
457 | ||
458 | return true; | |
459 | } | |
460 | } | |
461 | ||
462 | dybl_unlock(flag); | |
463 | return false; | |
464 | } | |
465 | ||
a39ff7e2 A |
466 | static void |
467 | add_blacklist_entry(const char *kext, const char *func, access_t type) | |
468 | { | |
469 | assert(kext || func); | |
470 | struct blacklist_entry *ble = &blacklist[blacklist_entries++]; | |
471 | ||
472 | if (blacklist_entries > blacklist_max_entries) { | |
473 | panic("KASan: dynamic blacklist entries exhausted\n"); | |
474 | } | |
475 | ||
476 | if (kext) { | |
477 | size_t sz = __nosan_strlen(kext) + 1; | |
478 | if (sz > 1) { | |
479 | char *s = kalloc(sz); | |
480 | __nosan_strlcpy(s, kext, sz); | |
481 | ble->kext_name = s; | |
482 | } | |
483 | } | |
484 | ||
485 | if (func) { | |
486 | size_t sz = __nosan_strlen(func) + 1; | |
487 | if (sz > 1) { | |
488 | char *s = kalloc(sz); | |
489 | __nosan_strlcpy(s, func, sz); | |
490 | ble->func_name = s; | |
491 | } | |
492 | } | |
493 | ||
494 | ble->type_mask = type; | |
495 | } | |
496 | ||
497 | #define TS(x) { .type = TYPE_##x, .str = #x } | |
498 | ||
499 | static const struct { | |
500 | const access_t type; | |
501 | const char * const str; | |
502 | } typemap[] = { | |
503 | TS(LOAD), | |
504 | TS(STORE), | |
505 | TS(MEMR), | |
506 | TS(MEMW), | |
507 | TS(STRR), | |
508 | TS(STRW), | |
509 | TS(KFREE), | |
510 | TS(ZFREE), | |
511 | TS(FSFREE), | |
512 | TS(UAF), | |
513 | TS(POISON_GLOBAL), | |
514 | TS(POISON_HEAP), | |
515 | TS(MEM), | |
516 | TS(STR), | |
517 | TS(READ), | |
518 | TS(WRITE), | |
519 | TS(RW), | |
520 | TS(FREE), | |
521 | TS(NORMAL), | |
522 | TS(DYNAMIC), | |
523 | TS(POISON), | |
524 | TS(ALL), | |
525 | ||
526 | /* convenience aliases */ | |
527 | { .type = TYPE_POISON_GLOBAL, .str = "GLOB" }, | |
528 | { .type = TYPE_POISON_HEAP, .str = "HEAP" }, | |
529 | }; | |
530 | static size_t typemap_sz = sizeof(typemap)/sizeof(typemap[0]); | |
531 | ||
532 | static inline access_t | |
533 | map_type(const char *str) | |
534 | { | |
535 | if (strlen(str) == 0) { | |
536 | return TYPE_NORMAL; | |
537 | } | |
538 | ||
539 | /* convert type string to integer ID */ | |
540 | for (size_t i = 0; i < typemap_sz; i++) { | |
541 | if (strcasecmp(str, typemap[i].str) == 0) { | |
542 | return typemap[i].type; | |
543 | } | |
544 | } | |
545 | ||
546 | printf("KASan: unknown blacklist type `%s', assuming `normal'\n", str); | |
547 | return TYPE_NORMAL; | |
548 | } | |
549 | ||
5ba3f43e A |
550 | void |
551 | kasan_init_dybl(void) | |
552 | { | |
553 | simple_lock_init(&_dybl_lock, 0); | |
554 | ||
a39ff7e2 A |
555 | /* |
556 | * dynamic blacklist entries via boot-arg. Syntax is: | |
557 | * kasan.bl=kext1:func1:type1,kext2:func2:type2,... | |
558 | */ | |
559 | char buf[256] = {}; | |
560 | char *bufp = buf; | |
561 | if (PE_parse_boot_arg_str("kasan.bl", bufp, sizeof(buf))) { | |
562 | char *kext; | |
563 | while ((kext = strsep(&bufp, ",")) != NULL) { | |
564 | access_t type = TYPE_NORMAL; | |
565 | char *func = strchr(kext, ':'); | |
566 | if (func) { | |
567 | *func++ = 0; | |
568 | } | |
569 | char *typestr = strchr(func, ':'); | |
570 | if (typestr) { | |
571 | *typestr++ = 0; | |
572 | type = map_type(typestr); | |
573 | } | |
574 | add_blacklist_entry(kext, func, type); | |
575 | } | |
576 | } | |
577 | ||
578 | /* collect bitmask of blacklisted types */ | |
579 | for (size_t j = 0; j < blacklist_entries; j++) { | |
580 | struct blacklist_entry *ble = &blacklist[j]; | |
581 | blacklisted_types |= ble->type_mask; | |
582 | } | |
583 | ||
5ba3f43e A |
584 | /* add the fake kernel kext */ |
585 | kasan_dybl_load_kext((uintptr_t)&_mh_execute_header, "__kernel__"); | |
586 | } | |
587 | ||
588 | #else /* KASAN_DYNAMIC_BLACKLIST */ | |
589 | ||
590 | bool | |
a39ff7e2 | 591 | kasan_is_blacklisted(access_t __unused type) |
5ba3f43e A |
592 | { |
593 | return false; | |
594 | } | |
595 | #endif |