]> git.saurik.com Git - apple/xnu.git/blame - san/kasan_dynamic_blacklist.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / san / kasan_dynamic_blacklist.c
CommitLineData
5ba3f43e
A
1#include <sys/queue.h>
2#include <kern/backtrace.h>
3#include <kern/kalloc.h>
4#include <kern/assert.h>
5#include <kern/debug.h>
6#include <kern/zalloc.h>
7#include <kern/simple_lock.h>
8#include <kern/locks.h>
9#include <machine/machine_routines.h>
10#include <libkern/libkern.h>
11#include <libkern/tree.h>
12#include <libkern/kernel_mach_header.h>
13#include <libkern/OSKextLib.h>
14#include <mach-o/loader.h>
15#include <mach-o/nlist.h>
16
17#include "kasan.h"
18#include "kasan_internal.h"
19
20#if KASAN_DYNAMIC_BLACKLIST
21
22#define MAX_FRAMES 8
23#define HASH_NBUCKETS 128U
24#define HASH_MASK (HASH_NBUCKETS-1)
25#define HASH_CACHE_NENTRIES 128
26
27struct blacklist_entry {
28 const char *kext_name;
29 const char *func_name;
30 const unsigned type_mask;
31
32 /* internal */
33 uint64_t count;
34};
35
36#include "kasan_blacklist_dynamic.h"
37static const size_t blacklist_entries = sizeof(blacklist)/sizeof(blacklist[0]);
38
39decl_simple_lock_data(static, _dybl_lock);
40
41static void
42dybl_lock(boolean_t *b)
43{
44 *b = ml_set_interrupts_enabled(false);
45 simple_lock(&_dybl_lock);
46}
47
48static void
49dybl_unlock(boolean_t b)
50{
51 simple_unlock(&_dybl_lock);
52 ml_set_interrupts_enabled(b);
53}
54
55
56/*
57 * blacklist call site hash table
58 */
59
60struct blacklist_hash_entry {
61 SLIST_ENTRY(blacklist_hash_entry) chain; // next element in chain
62 struct blacklist_entry *ble; // blacklist entry that this caller is an instance of
63 uintptr_t addr; // callsite address
64 uint64_t count; // hit count
65};
66
67struct hash_chain_head {
68 SLIST_HEAD(, blacklist_hash_entry);
69};
70
71unsigned cache_next_entry = 0;
72struct blacklist_hash_entry blhe_cache[HASH_CACHE_NENTRIES];
73struct hash_chain_head hash_buckets[HASH_NBUCKETS];
74
75static struct blacklist_hash_entry *
76alloc_hash_entry(void)
77{
78 unsigned idx = cache_next_entry++;
79 if (idx >= HASH_CACHE_NENTRIES) {
80 cache_next_entry = HASH_CACHE_NENTRIES; // avoid overflow
81 return NULL;
82 }
83 return &blhe_cache[idx];
84}
85
86static unsigned
87hash_addr(uintptr_t addr)
88{
89 addr ^= (addr >> 7); /* mix in some of the bits likely to select the kext */
90 return (unsigned)addr & HASH_MASK;
91}
92
93static struct blacklist_hash_entry *
94blacklist_hash_lookup(uintptr_t addr)
95{
96 unsigned idx = hash_addr(addr);
97 struct blacklist_hash_entry *blhe;
98
99 SLIST_FOREACH(blhe, &hash_buckets[idx], chain) {
100 if (blhe->addr == addr) {
101 return blhe;
102 }
103 }
104
105 return NULL;
106}
107
108static struct blacklist_hash_entry *
109blacklist_hash_add(uintptr_t addr, struct blacklist_entry *ble)
110{
111 unsigned idx = hash_addr(addr);
112
113 struct blacklist_hash_entry *blhe = alloc_hash_entry();
114 if (!blhe) {
115 return NULL;
116 }
117
118 blhe->ble = ble;
119 blhe->addr = addr;
120 blhe->count = 1;
121
122 SLIST_INSERT_HEAD(&hash_buckets[idx], blhe, chain);
123
124 return blhe;
125}
126
127static void
128hash_drop(void)
129{
130 if (cache_next_entry > 0) {
131 bzero(&hash_buckets, sizeof(hash_buckets));
132 bzero(&blhe_cache, sizeof(struct blacklist_hash_entry) * cache_next_entry);
133 cache_next_entry = 0;
134 }
135}
136
137/*
138 * kext range lookup tree
139 */
140
141struct range_tree_entry {
142 RB_ENTRY(range_tree_entry) tree;
143
144 uintptr_t base;
145
146 struct {
147 uint64_t size : 63;
148 uint64_t accessed : 1; // blacklist entry exists in this range
149 };
150
151 /* kext name */
152 const char *bundleid;
153
154 /* mach header for corresponding kext */
155 kernel_mach_header_t *mh;
156};
157
158static int NOINLINE
159range_tree_cmp(const struct range_tree_entry *e1, const struct range_tree_entry *e2)
160{
161 if (e1->size == 0 || e2->size == 0) {
162 /* lookup */
163 if (e1->base + e1->size < e2->base) {
164 return -1;
165 } else if (e1->base > e2->base + e2->size) {
166 return 1;
167 } else {
168 return 0;
169 }
170 } else {
171 /* compare */
172 if (e1->base + e1->size <= e2->base) {
173 return -1;
174 } else if (e1->base >= e2->base + e2->size) {
175 return 1;
176 } else {
177 panic("bad compare\n");
178 return 0;
179 }
180 }
181}
182
183RB_HEAD(range_tree, range_tree_entry) range_tree_root;
184RB_PROTOTYPE(range_tree, range_tree_entry, tree, range_tree_cmp);
185RB_GENERATE(range_tree, range_tree_entry, tree, range_tree_cmp);
186
187/* for each executable section, insert a range tree entry */
188void
189kasan_dybl_load_kext(uintptr_t addr, const char *kextname)
190{
191 int i;
192
193 struct load_command *cmd = NULL;
194 kernel_mach_header_t *mh = (void *)addr;
195
196 cmd = (struct load_command *)&mh[1];
197
198 for (i = 0; i < (int)mh->ncmds; i++) {
199 if (cmd->cmd == LC_SEGMENT_KERNEL) {
200 kernel_segment_command_t *seg = (void *)cmd;
201 bool is_exec = seg->initprot & VM_PROT_EXECUTE;
202
203#if CONFIG_EMBEDDED
204 if (is_exec && strcmp("__TEXT_EXEC", seg->segname) != 0) {
205 is_exec = false;
206 }
207#endif
208
209 if (is_exec) {
210 struct range_tree_entry *e = kalloc(sizeof(struct range_tree_entry));
211 bzero(e, sizeof(*e));
212
213 e->base = seg->vmaddr;
214 e->size = seg->vmsize;
215 e->bundleid = kextname;
216 e->mh = mh;
217
218 boolean_t flag;
219 dybl_lock(&flag);
220 RB_INSERT(range_tree, &range_tree_root, e);
221 dybl_unlock(flag);
222 }
223 }
224
225 cmd = (void *)((uintptr_t)cmd + cmd->cmdsize);
226 }
227}
228
229void
230kasan_dybl_unload_kext(uintptr_t addr)
231{
232 int i;
233
234 struct load_command *cmd = NULL;
235 kernel_mach_header_t *mh = (void *)addr;
236
237 cmd = (struct load_command *)&mh[1];
238
239 for (i = 0; i < (int)mh->ncmds; i++) {
240 if (cmd->cmd == LC_SEGMENT_KERNEL) {
241 kernel_segment_command_t *seg = (void *)cmd;
242 bool is_exec = seg->initprot & VM_PROT_EXECUTE;
243
244#if CONFIG_EMBEDDED
245 if (is_exec && strcmp("__TEXT_EXEC", seg->segname) != 0) {
246 is_exec = false;
247 }
248#endif
249
250 if (is_exec) {
251 struct range_tree_entry key = { .base = seg->vmaddr, .size = 0 };
252 struct range_tree_entry *e;
253 boolean_t flag;
254 dybl_lock(&flag);
255 e = RB_FIND(range_tree, &range_tree_root, &key);
256 if (e) {
257 RB_REMOVE(range_tree, &range_tree_root, e);
258 if (e->accessed) {
259 /* there was a blacklist entry in this range */
260 hash_drop();
261 }
262 }
263 dybl_unlock(flag);
264
265 if (e) {
266 kfree(e, sizeof(*e));
267 }
268 }
269 }
270
271 cmd = (void *)((uintptr_t)cmd + cmd->cmdsize);
272 }
273}
274
275/*
276 * return the closest function name at or before addr
277 */
278static const NOINLINE char *
279addr_to_func(uintptr_t addr, const kernel_mach_header_t *mh)
280{
281 int i;
282 uintptr_t cur_addr = 0;
283
284 const struct load_command *cmd = NULL;
285 const struct symtab_command *st = NULL;
286 const kernel_segment_command_t *le = NULL;
287 const char *strings;
288 const kernel_nlist_t *syms;
289 const char *cur_name = NULL;
290
291 cmd = (const struct load_command *)&mh[1];
292
293 /*
294 * find the symtab command and linkedit segment
295 */
296 for (i = 0; i < (int)mh->ncmds; i++) {
297 if (cmd->cmd == LC_SYMTAB) {
298 st = (const struct symtab_command *)cmd;
299 } else if (cmd->cmd == LC_SEGMENT_KERNEL) {
300 const kernel_segment_command_t *seg = (const void *)cmd;
301 if (!strcmp(seg->segname, SEG_LINKEDIT)) {
302 le = (const void *)cmd;
303 }
304 }
305 cmd = (const void *)((uintptr_t)cmd + cmd->cmdsize);
306 }
307
308 /* locate the symbols and strings in the symtab */
309 strings = (const void *)((le->vmaddr - le->fileoff) + st->stroff);
310 syms = (const void *)((le->vmaddr - le->fileoff) + st->symoff);
311
312 /*
313 * iterate the symbols, looking for the closest one to `addr'
314 */
315 for (i = 0; i < (int)st->nsyms; i++) {
316
317 uint8_t n_type = syms[i].n_type;
318 const char *name = strings + syms[i].n_un.n_strx;
319
320 if (n_type & N_STAB) {
321 /* ignore debug entries */
322 continue;
323 }
324
325 n_type &= N_TYPE;
326 if (syms[i].n_un.n_strx == 0 || !(n_type == N_SECT || n_type == N_ABS)) {
327 /* only use named and defined symbols */
328 continue;
329 }
330
331#if 0
332 if (mh != &_mh_execute_header) {
333 printf("sym '%s' 0x%x 0x%lx\n", name, (unsigned)syms[i].n_type, (unsigned long)syms[i].n_value);
334 }
335#endif
336
337 if (*name == '_') {
338 name += 1;
339 }
340
341 /* this symbol is closer than the one we had */
342 if (syms[i].n_value <= addr && syms[i].n_value > cur_addr) {
343 cur_name = name;
344 cur_addr = syms[i].n_value;
345 }
346 }
347
348 /* best guess for name of function at addr */
349 return cur_name;
350}
351
352bool NOINLINE
353kasan_is_blacklisted(unsigned mask)
354{
355 uint32_t nframes = 0;
356 uintptr_t frames[MAX_FRAMES];
357 uintptr_t *bt = frames;
358 nframes = backtrace(bt, MAX_FRAMES);
359 boolean_t flag;
360
361 if (nframes >= 2) {
362 /* ignore self and direct caller */
363 nframes -= 2;
364 bt += 2;
365 }
366
367 struct blacklist_hash_entry *blhe = NULL;
368
369 dybl_lock(&flag);
370
371 /* First check if any frame hits in the hash */
372 for (uint32_t i = 0; i < nframes; i++) {
373 blhe = blacklist_hash_lookup(bt[i]);
374 if (blhe) {
375 if ((blhe->ble->type_mask & mask) != mask) {
376 /* wrong type */
377 continue;
378 }
379
380 /* hit */
381 blhe->count++;
382 blhe->ble->count++;
383 // printf("KASan: blacklist cache hit (%s:%s [0x%lx] 0x%x)\n",
384 // ble->kext_name ?: "" , ble->func_name ?: "", VM_KERNEL_UNSLIDE(bt[i]), mask);
385 dybl_unlock(flag);
386 return true;
387 }
388 }
389
390 /* no hits - slowpath */
391 for (uint32_t i = 0; i < nframes; i++) {
392
393 const char *kextname = NULL;
394 const char *funcname = NULL;
395
396 struct range_tree_entry key = { .base = bt[i], .size = 0 };
397 struct range_tree_entry *e = RB_FIND(range_tree, &range_tree_root, &key);
398
399 if (!e) {
400 /* no match at this address - kinda weird? */
401 continue;
402 }
403
404 /* get the function and bundle name for the current frame */
405 funcname = addr_to_func(bt[i], e->mh);
406 if (e->bundleid) {
407 kextname = strrchr(e->bundleid, '.');
408 if (kextname) {
409 kextname++;
410 } else {
411 kextname = e->bundleid;
412 }
413 }
414
415 // printf("%s: a = 0x%016lx,0x%016lx f = %s, k = %s\n", __func__, bt[i], VM_KERNEL_UNSLIDE(bt[i]), funcname, kextname);
416
417 /* check if kextname or funcname are in the blacklist */
418 for (size_t j = 0; j < blacklist_entries; j++) {
419 struct blacklist_entry *ble = &blacklist[j];
420 uint64_t count;
421
422 if ((ble->type_mask & mask) != mask) {
423 /* wrong type */
424 continue;
425 }
426
427 if (ble->kext_name && kextname && strncmp(kextname, ble->kext_name, KMOD_MAX_NAME) != 0) {
428 /* wrong kext name */
429 continue;
430 }
431
432 if (ble->func_name && funcname && strncmp(funcname, ble->func_name, 128) != 0) {
433 /* wrong func name */
434 continue;
435 }
436
437 /* found a matching function or kext */
438 blhe = blacklist_hash_add(bt[i], ble);
439 count = ble->count++;
440 e->accessed = 1;
441
442 dybl_unlock(flag);
443
444 if (count == 0) {
445 printf("KASan: ignoring blacklisted violation (%s:%s [0x%lx] %d 0x%x)\n",
446 kextname, funcname, VM_KERNEL_UNSLIDE(bt[i]), i, mask);
447 }
448
449 return true;
450 }
451 }
452
453 dybl_unlock(flag);
454 return false;
455}
456
457void
458kasan_init_dybl(void)
459{
460 simple_lock_init(&_dybl_lock, 0);
461
462 /* add the fake kernel kext */
463 kasan_dybl_load_kext((uintptr_t)&_mh_execute_header, "__kernel__");
464}
465
466#else /* KASAN_DYNAMIC_BLACKLIST */
467
468bool
469kasan_is_blacklisted(unsigned __unused mask)
470{
471 return false;
472}
473#endif