]> git.saurik.com Git - apple/xnu.git/blob - san/kasan.c
9ec9433dff3fbfc9ed3e9ab5b3e026ef0df7aa02
[apple/xnu.git] / san / kasan.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <kern/kalloc.h>
41 #include <kern/zalloc.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <libkern/libkern.h>
47 #include <libkern/OSAtomic.h>
48 #include <libkern/kernel_mach_header.h>
49 #include <sys/queue.h>
50 #include <sys/sysctl.h>
51 #include <kern/thread.h>
52 #include <machine/atomic.h>
53
54 #include <kasan.h>
55 #include <kasan_internal.h>
56 #include <memintrinsics.h>
57
58 const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_SHIFT;
59
60 static unsigned kexts_loaded;
61 unsigned shadow_pages_total;
62 unsigned shadow_pages_used;
63
64 vm_offset_t kernel_vbase;
65 vm_offset_t kernel_vtop;
66
67 static unsigned kasan_enabled;
68 static unsigned quarantine_enabled;
69 static unsigned enabled_checks = TYPE_ALL; /* bitmask of enabled checks */
70 static unsigned report_ignored; /* issue non-fatal report for disabled/blacklisted checks */
71 static unsigned free_yield = 0; /* ms yield after each free */
72
73 /* forward decls */
74 static void kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason);
75 static void kasan_log_report(uptr p, uptr width, access_t access, violation_t reason);
76
77 /* imported osfmk functions */
78 extern vm_offset_t ml_stack_base(void);
79 extern vm_size_t ml_stack_size(void);
80
81 /*
82 * unused: expected to be called, but (currently) does nothing
83 */
84 #define UNUSED_ABI(func, ...) \
85 _Pragma("clang diagnostic push") \
86 _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
87 void func(__VA_ARGS__); \
88 void func(__VA_ARGS__) {}; \
89 _Pragma("clang diagnostic pop") \
90
91 static const size_t BACKTRACE_BITS = 4;
92 static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1;
93
94 decl_simple_lock_data(, kasan_vm_lock);
95 static thread_t kasan_lock_holder;
96
97 /*
98 * kasan is called from the interrupt path, so we need to disable interrupts to
99 * ensure atomicity manipulating the global objects
100 */
101 void
102 kasan_lock(boolean_t *b)
103 {
104 *b = ml_set_interrupts_enabled(false);
105 simple_lock(&kasan_vm_lock, LCK_GRP_NULL);
106 kasan_lock_holder = current_thread();
107 }
108
109 void
110 kasan_unlock(boolean_t b)
111 {
112 kasan_lock_holder = THREAD_NULL;
113 simple_unlock(&kasan_vm_lock);
114 ml_set_interrupts_enabled(b);
115 }
116
117 /* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
118 * thread */
119 bool
120 kasan_lock_held(thread_t thread)
121 {
122 return thread && thread == kasan_lock_holder;
123 }
124
125 static inline bool
126 kasan_check_enabled(access_t access)
127 {
128 return kasan_enabled && (enabled_checks & access) && !kasan_is_blacklisted(access);
129 }
130
131 static inline bool
132 kasan_poison_active(uint8_t flags)
133 {
134 switch (flags) {
135 case ASAN_GLOBAL_RZ:
136 return kasan_check_enabled(TYPE_POISON_GLOBAL);
137 case ASAN_HEAP_RZ:
138 case ASAN_HEAP_LEFT_RZ:
139 case ASAN_HEAP_RIGHT_RZ:
140 case ASAN_HEAP_FREED:
141 return kasan_check_enabled(TYPE_POISON_HEAP);
142 default:
143 return true;
144 };
145 }
146
147 /*
148 * poison redzones in the shadow map
149 */
150 void NOINLINE
151 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz, uint8_t flags)
152 {
153 uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
154 uint8_t partial = size & 0x07;
155 vm_size_t total = leftrz + size + rightrz;
156 vm_size_t i = 0;
157
158 /* base must be 8-byte aligned */
159 /* any left redzone must be a multiple of 8 */
160 /* total region must cover 8-byte multiple */
161 assert((base & 0x07) == 0);
162 assert((leftrz & 0x07) == 0);
163 assert((total & 0x07) == 0);
164
165 if (!kasan_enabled || !kasan_poison_active(flags)) {
166 return;
167 }
168
169 leftrz /= 8;
170 size /= 8;
171 total /= 8;
172
173 uint8_t l_flags = flags;
174 uint8_t r_flags = flags;
175
176 if (flags == ASAN_STACK_RZ) {
177 l_flags = ASAN_STACK_LEFT_RZ;
178 r_flags = ASAN_STACK_RIGHT_RZ;
179 } else if (flags == ASAN_HEAP_RZ) {
180 l_flags = ASAN_HEAP_LEFT_RZ;
181 r_flags = ASAN_HEAP_RIGHT_RZ;
182 }
183
184 /*
185 * poison the redzones and unpoison the valid bytes
186 */
187 for (; i < leftrz; i++) {
188 shadow[i] = l_flags;
189 }
190 for (; i < leftrz + size; i++) {
191 shadow[i] = ASAN_VALID; /* XXX: should not be necessary */
192 }
193 if (partial && (i < total)) {
194 shadow[i] = partial;
195 i++;
196 }
197 for (; i < total; i++) {
198 shadow[i] = r_flags;
199 }
200 }
201
202 void
203 kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags)
204 {
205 /* base must be 8-byte aligned */
206 /* total region must cover 8-byte multiple */
207 assert((base & 0x07) == 0);
208 assert((size & 0x07) == 0);
209 kasan_poison(base, 0, 0, size, flags);
210 }
211
212 void NOINLINE
213 kasan_unpoison(void *base, vm_size_t size)
214 {
215 kasan_poison((vm_offset_t)base, size, 0, 0, 0);
216 }
217
218 void NOINLINE
219 kasan_unpoison_stack(vm_offset_t base, vm_size_t size)
220 {
221 assert(base);
222 assert(size);
223
224 /* align base and size to 8 bytes */
225 vm_offset_t align = base & 0x7;
226 base -= align;
227 size += align;
228 size = (size + 7) & ~0x7;
229
230 kasan_unpoison((void *)base, size);
231 }
232
233 /*
234 * write junk into the redzones
235 */
236 static void NOINLINE
237 kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
238 {
239 #if KASAN_DEBUG
240 vm_size_t i;
241 const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef };
242 const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 };
243 uint8_t *buf = (uint8_t *)base;
244
245 /* base must be 8-byte aligned */
246 /* any left redzone must be a multiple of 8 */
247 /* total region must cover 8-byte multiple */
248 assert((base & 0x07) == 0);
249 assert((leftrz & 0x07) == 0);
250 assert(((size + leftrz + rightrz) & 0x07) == 0);
251
252 for (i = 0; i < leftrz; i++) {
253 buf[i] = deadbeef[i % 4];
254 }
255
256 for (i = 0; i < rightrz; i++) {
257 buf[i + size + leftrz] = c0ffee[i % 4];
258 }
259 #else
260 (void)base;
261 (void)size;
262 (void)leftrz;
263 (void)rightrz;
264 #endif
265 }
266
267 /*
268 * Report a violation that may be disabled and/or blacklisted. This can only be
269 * called for dynamic checks (i.e. where the fault is recoverable). Use
270 * kasan_crash_report() for static (unrecoverable) violations.
271 *
272 * access: what we were trying to do when the violation occured
273 * reason: what failed about the access
274 */
275 static void
276 kasan_violation(uintptr_t addr, size_t size, access_t access, violation_t reason)
277 {
278 assert(__builtin_popcount(access) == 1);
279 if (!kasan_check_enabled(access)) {
280 if (report_ignored) {
281 kasan_log_report(addr, size, access, reason);
282 }
283 return;
284 }
285 kasan_crash_report(addr, size, access, reason);
286 }
287
288 void NOINLINE
289 kasan_check_range(const void *x, size_t sz, access_t access)
290 {
291 uintptr_t invalid;
292 uintptr_t ptr = (uintptr_t)x;
293 if (kasan_range_poisoned(ptr, sz, &invalid)) {
294 size_t remaining = sz - (invalid - ptr);
295 kasan_violation(invalid, remaining, access, 0);
296 }
297 }
298
299 /*
300 * Return true if [base, base+sz) is unpoisoned or has given shadow value.
301 */
302 bool
303 kasan_check_shadow(vm_address_t base, vm_size_t sz, uint8_t shadow)
304 {
305 sz -= 8 - (base % 8);
306 base += 8 - (base % 8);
307
308 vm_address_t end = base + sz;
309
310 while (base < end) {
311 uint8_t *sh = SHADOW_FOR_ADDRESS(base);
312 if (*sh && *sh != shadow) {
313 return false;
314 }
315 base += 8;
316 }
317 return true;
318 }
319
320 /*
321 *
322 * KASAN violation reporting
323 *
324 */
325
326 static const char *
327 access_str(access_t type)
328 {
329 if (type & TYPE_READ) {
330 return "load from";
331 } else if (type & TYPE_WRITE) {
332 return "store to";
333 } else if (type & TYPE_FREE) {
334 return "free of";
335 } else {
336 return "access of";
337 }
338 }
339
340 static const char *shadow_strings[] = {
341 [ASAN_VALID] = "VALID",
342 [ASAN_PARTIAL1] = "PARTIAL1",
343 [ASAN_PARTIAL2] = "PARTIAL2",
344 [ASAN_PARTIAL3] = "PARTIAL3",
345 [ASAN_PARTIAL4] = "PARTIAL4",
346 [ASAN_PARTIAL5] = "PARTIAL5",
347 [ASAN_PARTIAL6] = "PARTIAL6",
348 [ASAN_PARTIAL7] = "PARTIAL7",
349 [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
350 [ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
351 [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
352 [ASAN_STACK_FREED] = "STACK_FREED",
353 [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE",
354 [ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
355 [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
356 [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
357 [ASAN_HEAP_FREED] = "HEAP_FREED",
358 [0xff] = NULL
359 };
360
361 #define CRASH_CONTEXT_BEFORE 5
362 #define CRASH_CONTEXT_AFTER 5
363
364 static size_t
365 kasan_shadow_crashlog(uptr p, char *buf, size_t len)
366 {
367 int i,j;
368 size_t n = 0;
369 int before = CRASH_CONTEXT_BEFORE;
370 int after = CRASH_CONTEXT_AFTER;
371
372 uptr shadow = (uptr)SHADOW_FOR_ADDRESS(p);
373 uptr shadow_p = shadow;
374 uptr shadow_page = vm_map_round_page(shadow_p, HW_PAGE_MASK);
375
376 /* rewind to start of context block */
377 shadow &= ~((uptr)0xf);
378 shadow -= 16 * before;
379
380 n += snprintf(buf+n, len-n,
381 " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
382
383 for (i = 0; i < 1 + before + after; i++, shadow += 16) {
384 if ((vm_map_round_page(shadow, HW_PAGE_MASK) != shadow_page) && !kasan_is_shadow_mapped(shadow)) {
385 /* avoid unmapped shadow when crossing page boundaries */
386 continue;
387 }
388
389 n += snprintf(buf+n, len-n, " %16lx:", shadow);
390
391 char *left = " ";
392 char *right;
393
394 for (j = 0; j < 16; j++) {
395 uint8_t *x = (uint8_t *)(shadow + j);
396
397 right = " ";
398 if ((uptr)x == shadow_p) {
399 left = "[";
400 right = "]";
401 } else if ((uptr)(x + 1) == shadow_p) {
402 right = "";
403 }
404
405 n += snprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right);
406 left = "";
407 }
408 n += snprintf(buf+n, len-n, "\n");
409 }
410
411 n += snprintf(buf+n, len-n, "\n");
412 return n;
413 }
414
415 static void
416 kasan_report_internal(uptr p, uptr width, access_t access, violation_t reason, bool dopanic)
417 {
418 const size_t len = 4096;
419 static char buf[len];
420 size_t n = 0;
421
422 uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
423 uint8_t shadow_type = *shadow_ptr;
424 const char *shadow_str = shadow_strings[shadow_type];
425 if (!shadow_str) {
426 shadow_str = "<invalid>";
427 }
428 buf[0] = '\0';
429
430 if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
431 n += snprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p);
432 } else if (reason == REASON_MOD_AFTER_FREE) {
433 n += snprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p);
434 } else {
435 n += snprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n",
436 width, access_str(access), p, shadow_str);
437 }
438 n += kasan_shadow_crashlog(p, buf+n, len-n);
439
440 if (dopanic) {
441 panic("%s", buf);
442 } else {
443 printf("%s", buf);
444 }
445 }
446
447 static void NOINLINE OS_NORETURN
448 kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason)
449 {
450 kasan_handle_test();
451 kasan_report_internal(p, width, access, reason, true);
452 __builtin_unreachable(); /* we cant handle this returning anyway */
453 }
454
455 static void
456 kasan_log_report(uptr p, uptr width, access_t access, violation_t reason)
457 {
458 const size_t len = 256;
459 char buf[len];
460 size_t l = 0;
461 uint32_t nframes = 14;
462 uintptr_t frames[nframes];
463 uintptr_t *bt = frames;
464
465 kasan_report_internal(p, width, access, reason, false);
466
467 /*
468 * print a backtrace
469 */
470
471 nframes = backtrace_frame(bt, nframes, __builtin_frame_address(0)); /* ignore current frame */
472
473 buf[0] = '\0';
474 l += snprintf(buf+l, len-l, "Backtrace: ");
475 for (uint32_t i = 0; i < nframes; i++) {
476 l += snprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i]));
477 }
478 l += snprintf(buf+l, len-l, "\n");
479
480 printf("%s", buf);
481 }
482
483 #define REPORT_DECLARE(n) \
484 void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
485 void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
486 void UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
487 void UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
488
489 REPORT_DECLARE(1)
490 REPORT_DECLARE(2)
491 REPORT_DECLARE(4)
492 REPORT_DECLARE(8)
493 REPORT_DECLARE(16)
494
495 void OS_NORETURN __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD, 0); }
496 void OS_NORETURN __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE, 0); }
497
498 /* unpoison the current stack */
499 void NOINLINE
500 kasan_unpoison_curstack(bool whole_stack)
501 {
502 uintptr_t base = ml_stack_base();
503 size_t sz = ml_stack_size();
504 uintptr_t cur = (uintptr_t)&base;
505
506 if (whole_stack) {
507 cur = base;
508 }
509
510 if (cur >= base && cur < base + sz) {
511 /* unpoison from current stack depth to the top */
512 size_t unused = cur - base;
513 kasan_unpoison_stack(cur, sz - unused);
514 }
515 }
516
517 void NOINLINE
518 __asan_handle_no_return(void)
519 {
520 kasan_unpoison_curstack(false);
521
522 /*
523 * No need to free any fakestack objects because they must stay alive until
524 * we drop the real stack, at which point we can drop the entire fakestack
525 * anyway.
526 */
527 }
528
529 bool NOINLINE
530 kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
531 {
532 uint8_t *shadow;
533 vm_size_t i;
534
535 if (!kasan_enabled) {
536 return false;
537 }
538
539 size += base & 0x07;
540 base &= ~(vm_offset_t)0x07;
541
542 shadow = SHADOW_FOR_ADDRESS(base);
543 vm_size_t limit = (size + 7) / 8;
544
545 /* XXX: to make debugging easier, catch unmapped shadow here */
546
547 for (i = 0; i < limit; i++, size -= 8) {
548 assert(size > 0);
549 uint8_t s = shadow[i];
550 if (s == 0 || (size < 8 && s >= size && s <= 7)) {
551 /* valid */
552 } else {
553 goto fail;
554 }
555 }
556
557 return false;
558
559 fail:
560 if (first_invalid) {
561 /* XXX: calculate the exact first byte that failed */
562 *first_invalid = base + i*8;
563 }
564 return true;
565 }
566
567 static void NOINLINE
568 kasan_init_globals(vm_offset_t base, vm_size_t size)
569 {
570 struct asan_global *glob = (struct asan_global *)base;
571 struct asan_global *glob_end = (struct asan_global *)(base + size);
572 for (; glob < glob_end; glob++) {
573 /* handle one global */
574 kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
575 }
576 }
577
578 void NOINLINE
579 kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid)
580 {
581 unsigned long sectsz;
582 void *sect;
583
584 #if KASAN_DYNAMIC_BLACKLIST
585 kasan_dybl_load_kext(base, bundleid);
586 #endif
587
588 /* find the kasan globals segment/section */
589 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
590 if (sect) {
591 kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz);
592 kexts_loaded++;
593 }
594 }
595
596 void NOINLINE
597 kasan_unload_kext(vm_offset_t base, vm_size_t size)
598 {
599 unsigned long sectsz;
600 void *sect;
601
602 /* find the kasan globals segment/section */
603 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
604 if (sect) {
605 kasan_unpoison((void *)base, size);
606 kexts_loaded--;
607 }
608
609 #if KASAN_DYNAMIC_BLACKLIST
610 kasan_dybl_unload_kext(base);
611 #endif
612 }
613
614 /*
615 * Turn off as much as possible for panic path etc. There's no way to turn it back
616 * on.
617 */
618 void NOINLINE
619 kasan_disable(void)
620 {
621 __asan_option_detect_stack_use_after_return = 0;
622 fakestack_enabled = 0;
623 kasan_enabled = 0;
624 quarantine_enabled = 0;
625 enabled_checks = 0;
626 }
627
628 static void NOINLINE
629 kasan_init_xnu_globals(void)
630 {
631 const char *seg = KASAN_GLOBAL_SEGNAME;
632 const char *sect = KASAN_GLOBAL_SECTNAME;
633 unsigned long _size;
634 vm_offset_t globals;
635 vm_size_t size;
636 kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header;
637
638 if (!header) {
639 printf("KASan: failed to find kernel mach header\n");
640 printf("KASan: redzones for globals not poisoned\n");
641 return;
642 }
643
644 globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size);
645 if (!globals) {
646 printf("KASan: failed to find segment %s section %s\n", seg, sect);
647 printf("KASan: redzones for globals not poisoned\n");
648 return;
649 }
650 size = (vm_size_t)_size;
651
652 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
653 printf("KASan: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global));
654
655 kasan_init_globals(globals, size);
656 }
657
658 void NOINLINE
659 kasan_late_init(void)
660 {
661 #if KASAN_DYNAMIC_BLACKLIST
662 kasan_init_dybl();
663 #endif
664
665 kasan_init_fakestack();
666 kasan_init_xnu_globals();
667 }
668
669 void NOINLINE
670 kasan_notify_stolen(vm_offset_t top)
671 {
672 kasan_map_shadow(kernel_vtop, top - kernel_vtop, false);
673 }
674
675 static void NOINLINE
676 kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz)
677 {
678 #if KASAN_DEBUG
679 vm_size_t i;
680 uint8_t tmp1, tmp2;
681
682 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
683 for (i = 0; i < sz; i += sizeof(uint64_t)) {
684 vm_offset_t addr = base + i;
685 uint8_t *x = SHADOW_FOR_ADDRESS(addr);
686 tmp1 = *x;
687 asm volatile("" ::: "memory");
688 tmp2 = *x;
689 asm volatile("" ::: "memory");
690 assert(tmp1 == tmp2);
691 }
692 #else
693 (void)base;
694 (void)sz;
695 #endif
696 }
697
698 void NOINLINE
699 kasan_init(void)
700 {
701 unsigned arg;
702
703 simple_lock_init(&kasan_vm_lock, 0);
704
705 /* Map all of the kernel text and data */
706 kasan_map_shadow(kernel_vbase, kernel_vtop - kernel_vbase, false);
707
708 kasan_arch_init();
709
710 /*
711 * handle KASan boot-args
712 */
713
714 if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) {
715 enabled_checks = arg;
716 }
717
718 if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) {
719 if (arg & KASAN_ARGS_FAKESTACK) {
720 fakestack_enabled = 1;
721 }
722 if (arg & KASAN_ARGS_REPORTIGNORED) {
723 report_ignored = 1;
724 }
725 if (arg & KASAN_ARGS_NODYCHECKS) {
726 enabled_checks &= ~TYPE_DYNAMIC;
727 }
728 if (arg & KASAN_ARGS_NOPOISON_HEAP) {
729 enabled_checks &= ~TYPE_POISON_HEAP;
730 }
731 if (arg & KASAN_ARGS_NOPOISON_GLOBAL) {
732 enabled_checks &= ~TYPE_POISON_GLOBAL;
733 }
734 }
735
736 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) {
737 free_yield = arg;
738 }
739
740 /* kasan.bl boot-arg handled in kasan_init_dybl() */
741
742 quarantine_enabled = 1;
743 kasan_enabled = 1;
744 }
745
746 static void NOINLINE
747 kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool is_zero)
748 {
749 assert(address < VM_MAX_KERNEL_ADDRESS);
750
751 if (!kasan_enabled) {
752 return;
753 }
754
755 if (address < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
756 /* only map kernel addresses */
757 return;
758 }
759
760 if (!size) {
761 /* nothing to map */
762 return;
763 }
764
765 boolean_t flags;
766 kasan_lock(&flags);
767 kasan_map_shadow(address, size, is_zero);
768 kasan_unlock(flags);
769 kasan_debug_touch_mappings(address, size);
770 }
771
772 void
773 kasan_notify_address(vm_offset_t address, vm_size_t size)
774 {
775 kasan_notify_address_internal(address, size, false);
776 }
777
778 /*
779 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
780 */
781 void
782 kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size)
783 {
784 kasan_notify_address_internal(address, size, true);
785 }
786
787 /*
788 *
789 * allocator hooks
790 *
791 */
792
793 struct kasan_alloc_header {
794 uint16_t magic;
795 uint16_t crc;
796 uint32_t alloc_size;
797 uint32_t user_size;
798 struct {
799 uint32_t left_rz : 32 - BACKTRACE_BITS;
800 uint32_t frames : BACKTRACE_BITS;
801 };
802 };
803 _Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
804
805 struct kasan_alloc_footer {
806 uint32_t backtrace[0];
807 };
808 _Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
809
810 #define LIVE_XOR ((uint16_t)0x3a65)
811 #define FREE_XOR ((uint16_t)0xf233)
812
813 static uint16_t
814 magic_for_addr(vm_offset_t addr, uint16_t magic_xor)
815 {
816 uint16_t magic = addr & 0xFFFF;
817 magic ^= (addr >> 16) & 0xFFFF;
818 magic ^= (addr >> 32) & 0xFFFF;
819 magic ^= (addr >> 48) & 0xFFFF;
820 magic ^= magic_xor;
821 return magic;
822 }
823
824 static struct kasan_alloc_header *
825 header_for_user_addr(vm_offset_t addr)
826 {
827 return (void *)(addr - sizeof(struct kasan_alloc_header));
828 }
829
830 static struct kasan_alloc_footer *
831 footer_for_user_addr(vm_offset_t addr, vm_size_t *size)
832 {
833 struct kasan_alloc_header *h = header_for_user_addr(addr);
834 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
835 *size = rightrz;
836 return (void *)(addr + h->user_size);
837 }
838
839 /*
840 * size: user-requested allocation size
841 * ret: minimum size for the real allocation
842 */
843 vm_size_t
844 kasan_alloc_resize(vm_size_t size)
845 {
846 vm_size_t tmp;
847 if (os_add_overflow(size, 4 * PAGE_SIZE, &tmp)) {
848 panic("allocation size overflow (%lu)", size);
849 }
850
851 /* add left and right redzones */
852 size += KASAN_GUARD_PAD;
853
854 /* ensure the final allocation is an 8-byte multiple */
855 size += 8 - (size % 8);
856
857 return size;
858 }
859
860 extern vm_offset_t vm_kernel_slid_base;
861
862 static vm_size_t
863 kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip)
864 {
865 uintptr_t buf[BACKTRACE_MAXFRAMES];
866 uintptr_t *bt = buf;
867
868 sz /= sizeof(uint32_t);
869 vm_size_t frames = sz;
870
871 if (frames > 0) {
872 frames = min(frames + skip, BACKTRACE_MAXFRAMES);
873 frames = backtrace(bt, frames);
874
875 while (frames > sz && skip > 0) {
876 bt++;
877 frames--;
878 skip--;
879 }
880
881 /* only store the offset from kernel base, and cram that into 32
882 * bits */
883 for (vm_size_t i = 0; i < frames; i++) {
884 ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base);
885 }
886 }
887 return frames;
888 }
889
890 /* addr: user address of allocation */
891 static uint16_t
892 kasan_alloc_crc(vm_offset_t addr)
893 {
894 struct kasan_alloc_header *h = header_for_user_addr(addr);
895 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
896
897 uint16_t crc_orig = h->crc;
898 h->crc = 0;
899
900 uint16_t crc = 0;
901 crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz);
902 crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz);
903
904 h->crc = crc_orig;
905
906 return crc;
907 }
908
909 /*
910 * addr: base address of full allocation (including redzones)
911 * size: total size of allocation (include redzones)
912 * req: user-requested allocation size
913 * lrz: size of the left redzone in bytes
914 * ret: address of usable allocation
915 */
916 vm_address_t
917 kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
918 {
919 if (!addr) {
920 return 0;
921 }
922 assert(size > 0);
923 assert((addr % 8) == 0);
924 assert((size % 8) == 0);
925
926 vm_size_t rightrz = size - req - leftrz;
927
928 kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ);
929 kasan_rz_clobber(addr, req, leftrz, rightrz);
930
931 addr += leftrz;
932
933 /* stash the allocation sizes in the left redzone */
934 struct kasan_alloc_header *h = header_for_user_addr(addr);
935 h->magic = magic_for_addr(addr, LIVE_XOR);
936 h->left_rz = leftrz;
937 h->alloc_size = size;
938 h->user_size = req;
939
940 /* ... and a backtrace in the right redzone */
941 vm_size_t fsize;
942 struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
943 h->frames = kasan_alloc_bt(f->backtrace, fsize, 2);
944
945 /* checksum the whole object, minus the user part */
946 h->crc = kasan_alloc_crc(addr);
947
948 return addr;
949 }
950
951 /*
952 * addr: user pointer
953 * size: returns full original allocation size
954 * ret: original allocation ptr
955 */
956 vm_address_t
957 kasan_dealloc(vm_offset_t addr, vm_size_t *size)
958 {
959 assert(size && addr);
960 struct kasan_alloc_header *h = header_for_user_addr(addr);
961 *size = h->alloc_size;
962 return addr - h->left_rz;
963 }
964
965 /*
966 * return the original user-requested allocation size
967 * addr: user alloc pointer
968 */
969 vm_size_t
970 kasan_user_size(vm_offset_t addr)
971 {
972 struct kasan_alloc_header *h = header_for_user_addr(addr);
973 assert(h->magic == magic_for_addr(addr, LIVE_XOR));
974 return h->user_size;
975 }
976
977 /*
978 * Verify that `addr' (user pointer) is a valid allocation of `type'
979 */
980 void
981 kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type)
982 {
983 struct kasan_alloc_header *h = header_for_user_addr(addr);
984
985 /* map heap type to an internal access type */
986 access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE :
987 heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE :
988 heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0;
989
990 /* check the magic and crc match */
991 if (h->magic != magic_for_addr(addr, LIVE_XOR)) {
992 kasan_violation(addr, size, type, REASON_BAD_METADATA);
993 }
994 if (h->crc != kasan_alloc_crc(addr)) {
995 kasan_violation(addr, size, type, REASON_MOD_OOB);
996 }
997
998 /* check the freed size matches what we recorded at alloc time */
999 if (h->user_size != size) {
1000 kasan_violation(addr, size, type, REASON_INVALID_SIZE);
1001 }
1002
1003 vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
1004
1005 /* Check that the redzones are valid */
1006 if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
1007 !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
1008 kasan_violation(addr, size, type, REASON_BAD_METADATA);
1009 }
1010
1011 /* Check the allocated range is not poisoned */
1012 kasan_check_range((void *)addr, size, type);
1013 }
1014
1015 /*
1016 *
1017 * Quarantine
1018 *
1019 */
1020
1021 struct freelist_entry {
1022 uint16_t magic;
1023 uint16_t crc;
1024 STAILQ_ENTRY(freelist_entry) list;
1025 union {
1026 struct {
1027 vm_size_t size : 28;
1028 vm_size_t user_size : 28;
1029 vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */
1030 vm_size_t __unused : 8 - BACKTRACE_BITS;
1031 };
1032 uint64_t bits;
1033 };
1034 zone_t zone;
1035 uint32_t backtrace[];
1036 };
1037 _Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
1038
1039 struct quarantine {
1040 STAILQ_HEAD(freelist_head, freelist_entry) freelist;
1041 unsigned long entries;
1042 unsigned long max_entries;
1043 vm_size_t size;
1044 vm_size_t max_size;
1045 };
1046
1047 struct quarantine quarantines[] = {
1048 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
1049 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
1050 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
1051 };
1052
1053 static uint16_t
1054 fle_crc(struct freelist_entry *fle)
1055 {
1056 return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits));
1057 }
1058
1059 /*
1060 * addr, sizep: pointer/size of full allocation including redzone
1061 */
1062 void NOINLINE
1063 kasan_free_internal(void **addrp, vm_size_t *sizep, int type,
1064 zone_t *zone, vm_size_t user_size, int locked,
1065 bool doquarantine)
1066 {
1067 vm_size_t size = *sizep;
1068 vm_offset_t addr = *(vm_offset_t *)addrp;
1069
1070 assert(type >= 0 && type < KASAN_HEAP_TYPES);
1071 if (type == KASAN_HEAP_KALLOC) {
1072 /* zero-size kalloc allocations are allowed */
1073 assert(!zone);
1074 } else if (type == KASAN_HEAP_ZALLOC) {
1075 assert(zone && user_size);
1076 } else if (type == KASAN_HEAP_FAKESTACK) {
1077 assert(zone && user_size);
1078 }
1079
1080 /* clobber the entire freed region */
1081 kasan_rz_clobber(addr, 0, size, 0);
1082
1083 if (!doquarantine || !quarantine_enabled) {
1084 goto free_current;
1085 }
1086
1087 /* poison the entire freed region */
1088 uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED;
1089 kasan_poison(addr, 0, size, 0, flags);
1090
1091 struct freelist_entry *fle, *tofree = NULL;
1092 struct quarantine *q = &quarantines[type];
1093 assert(size >= sizeof(struct freelist_entry));
1094
1095 /* create a new freelist entry */
1096 fle = (struct freelist_entry *)addr;
1097 fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR);
1098 fle->size = size;
1099 fle->user_size = user_size;
1100 fle->frames = 0;
1101 fle->zone = ZONE_NULL;
1102 if (zone) {
1103 fle->zone = *zone;
1104 }
1105 if (type != KASAN_HEAP_FAKESTACK) {
1106 /* don't do expensive things on the fakestack path */
1107 fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
1108 fle->crc = fle_crc(fle);
1109 }
1110
1111 boolean_t flg;
1112 if (!locked) {
1113 kasan_lock(&flg);
1114 }
1115
1116 if (q->size + size > q->max_size) {
1117 /*
1118 * Adding this entry would put us over the max quarantine size. Free the
1119 * larger of the current object and the quarantine head object.
1120 */
1121 tofree = STAILQ_FIRST(&q->freelist);
1122 if (fle->size > tofree->size) {
1123 goto free_current_locked;
1124 }
1125 }
1126
1127 STAILQ_INSERT_TAIL(&q->freelist, fle, list);
1128 q->entries++;
1129 q->size += size;
1130
1131 /* free the oldest entry, if necessary */
1132 if (tofree || q->entries > q->max_entries) {
1133 tofree = STAILQ_FIRST(&q->freelist);
1134 STAILQ_REMOVE_HEAD(&q->freelist, list);
1135
1136 assert(q->entries > 0 && q->size >= tofree->size);
1137 q->entries--;
1138 q->size -= tofree->size;
1139
1140 if (type != KASAN_HEAP_KALLOC) {
1141 assert((vm_offset_t)zone >= VM_MIN_KERNEL_AND_KEXT_ADDRESS &&
1142 (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS);
1143 *zone = tofree->zone;
1144 }
1145
1146 size = tofree->size;
1147 addr = (vm_offset_t)tofree;
1148
1149 /* check the magic and crc match */
1150 if (tofree->magic != magic_for_addr(addr, FREE_XOR)) {
1151 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
1152 }
1153 if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) {
1154 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
1155 }
1156
1157 /* clobber the quarantine header */
1158 __nosan_bzero((void *)addr, sizeof(struct freelist_entry));
1159
1160 } else {
1161 /* quarantine is not full - don't really free anything */
1162 addr = 0;
1163 }
1164
1165 free_current_locked:
1166 if (!locked) {
1167 kasan_unlock(flg);
1168 }
1169
1170 free_current:
1171 *addrp = (void *)addr;
1172 if (addr) {
1173 kasan_unpoison((void *)addr, size);
1174 *sizep = size;
1175 }
1176 }
1177
1178 void NOINLINE
1179 kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone,
1180 vm_size_t user_size, bool quarantine)
1181 {
1182 kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine);
1183
1184 if (free_yield) {
1185 thread_yield_internal(free_yield);
1186 }
1187 }
1188
1189 uptr
1190 __asan_load_cxx_array_cookie(uptr *p)
1191 {
1192 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)p);
1193 if (*shadow == ASAN_ARRAY_COOKIE) {
1194 return *p;
1195 } else if (*shadow == ASAN_HEAP_FREED) {
1196 return 0;
1197 } else {
1198 return *p;
1199 }
1200 }
1201
1202 void
1203 __asan_poison_cxx_array_cookie(uptr p)
1204 {
1205 uint8_t *shadow = SHADOW_FOR_ADDRESS(p);
1206 *shadow = ASAN_ARRAY_COOKIE;
1207 }
1208
1209 #define ACCESS_CHECK_DECLARE(type, sz, access) \
1210 void __asan_##type##sz(uptr addr) { \
1211 kasan_check_range((const void *)addr, sz, access); \
1212 } \
1213 void UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
1214
1215 ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD);
1216 ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD);
1217 ACCESS_CHECK_DECLARE(load, 4, TYPE_LOAD);
1218 ACCESS_CHECK_DECLARE(load, 8, TYPE_LOAD);
1219 ACCESS_CHECK_DECLARE(load, 16, TYPE_LOAD);
1220 ACCESS_CHECK_DECLARE(store, 1, TYPE_STORE);
1221 ACCESS_CHECK_DECLARE(store, 2, TYPE_STORE);
1222 ACCESS_CHECK_DECLARE(store, 4, TYPE_STORE);
1223 ACCESS_CHECK_DECLARE(store, 8, TYPE_STORE);
1224 ACCESS_CHECK_DECLARE(store, 16, TYPE_STORE);
1225
1226 void
1227 __asan_loadN(uptr addr, size_t sz)
1228 {
1229 kasan_check_range((const void *)addr, sz, TYPE_LOAD);
1230 }
1231
1232 void
1233 __asan_storeN(uptr addr, size_t sz)
1234 {
1235 kasan_check_range((const void *)addr, sz, TYPE_STORE);
1236 }
1237
1238 static void
1239 kasan_set_shadow(uptr addr, size_t sz, uint8_t val)
1240 {
1241 __nosan_memset((void *)addr, val, sz);
1242 }
1243
1244 #define SET_SHADOW_DECLARE(val) \
1245 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1246 kasan_set_shadow(addr, sz, 0x##val); \
1247 }
1248
1249 SET_SHADOW_DECLARE(00)
1250 SET_SHADOW_DECLARE(f1)
1251 SET_SHADOW_DECLARE(f2)
1252 SET_SHADOW_DECLARE(f3)
1253 SET_SHADOW_DECLARE(f5)
1254 SET_SHADOW_DECLARE(f8)
1255
1256
1257 /*
1258 * Call 'cb' for each contiguous range of the shadow map. This could be more
1259 * efficient by walking the page table directly.
1260 */
1261 int
1262 kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx)
1263 {
1264 uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS);
1265 uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS);
1266 shadow_base = vm_map_trunc_page(shadow_base, HW_PAGE_MASK);
1267 shadow_top = vm_map_round_page(shadow_top, HW_PAGE_MASK);
1268
1269 uintptr_t start = 0, end = 0;
1270
1271 for (uintptr_t addr = shadow_base; addr < shadow_top; addr += HW_PAGE_SIZE) {
1272 if (kasan_is_shadow_mapped(addr)) {
1273 if (start == 0) {
1274 start = addr;
1275 }
1276 end = addr + HW_PAGE_SIZE;
1277 } else if (start && end) {
1278 cb(start, end, ctx);
1279 start = end = 0;
1280 }
1281 }
1282
1283 if (start && end) {
1284 cb(start, end, ctx);
1285 }
1286
1287 return 0;
1288 }
1289
1290 /*
1291 * XXX: implement these
1292 */
1293
1294 UNUSED_ABI(__asan_alloca_poison, uptr addr, uptr size);
1295 UNUSED_ABI(__asan_allocas_unpoison, uptr top, uptr bottom);
1296 UNUSED_ABI(__sanitizer_ptr_sub, uptr a, uptr b);
1297 UNUSED_ABI(__sanitizer_ptr_cmp, uptr a, uptr b);
1298 UNUSED_ABI(__sanitizer_annotate_contiguous_container, const void *a, const void *b, const void *c, const void *d);
1299 UNUSED_ABI(__asan_poison_stack_memory, uptr addr, size_t size);
1300 UNUSED_ABI(__asan_unpoison_stack_memory, uptr a, uptr b);
1301
1302 /*
1303 * Miscellaneous unimplemented asan ABI
1304 */
1305
1306 UNUSED_ABI(__asan_init, void);
1307 UNUSED_ABI(__asan_register_image_globals, uptr a);
1308 UNUSED_ABI(__asan_unregister_image_globals, uptr a);
1309 UNUSED_ABI(__asan_before_dynamic_init, uptr a);
1310 UNUSED_ABI(__asan_after_dynamic_init, void);
1311 UNUSED_ABI(__asan_version_mismatch_check_v8, void);
1312 UNUSED_ABI(__asan_version_mismatch_check_apple_802, void);
1313 UNUSED_ABI(__asan_version_mismatch_check_apple_900, void);
1314 UNUSED_ABI(__asan_version_mismatch_check_apple_902, void);
1315 UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void);
1316 UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void);
1317
1318 void UNSUPPORTED_API(__asan_init_v5, void);
1319 void UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b);
1320 void UNSUPPORTED_API(__asan_unregister_globals, uptr a, uptr b);
1321 void UNSUPPORTED_API(__asan_register_elf_globals, uptr a, uptr b, uptr c);
1322 void UNSUPPORTED_API(__asan_unregister_elf_globals, uptr a, uptr b, uptr c);
1323
1324 void UNSUPPORTED_API(__asan_exp_loadN, uptr addr, size_t sz, int32_t e);
1325 void UNSUPPORTED_API(__asan_exp_storeN, uptr addr, size_t sz, int32_t e);
1326 void UNSUPPORTED_API(__asan_report_exp_load_n, uptr addr, unsigned long b, int32_t c);
1327 void UNSUPPORTED_API(__asan_report_exp_store_n, uptr addr, unsigned long b, int32_t c);
1328
1329 /*
1330 *
1331 * SYSCTL
1332 *
1333 */
1334
1335 static int
1336 sysctl_kasan_test(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req)
1337 {
1338 int mask = 0;
1339 int ch;
1340 int err;
1341 err = sysctl_io_number(req, 0, sizeof(int), &mask, &ch);
1342
1343 if (!err && mask) {
1344 kasan_test(mask, arg2);
1345 }
1346
1347 return err;
1348 }
1349
1350 static int
1351 sysctl_fakestack_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, int __unused arg2, struct sysctl_req *req)
1352 {
1353 int ch, err, val;
1354
1355 err = sysctl_io_number(req, fakestack_enabled, sizeof(fakestack_enabled), &val, &ch);
1356 if (err == 0 && ch) {
1357 fakestack_enabled = !!val;
1358 __asan_option_detect_stack_use_after_return = !!val;
1359 }
1360
1361 return err;
1362 }
1363
1364 SYSCTL_DECL(kasan);
1365 SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
1366
1367 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, "");
1368 SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
1369 SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &enabled_checks, 0, "");
1370 SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
1371 SYSCTL_UINT(_kern_kasan, OID_AUTO, report_ignored, CTLFLAG_RW, &report_ignored, 0, "");
1372 SYSCTL_UINT(_kern_kasan, OID_AUTO, free_yield_ms, CTLFLAG_RW, &free_yield, 0, "");
1373 SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, "");
1374 SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, "");
1375 SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, "");
1376 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
1377 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, "");
1378 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, "");
1379 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, "");
1380
1381 SYSCTL_PROC(_kern_kasan, OID_AUTO, fakestack,
1382 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1383 0, 0, sysctl_fakestack_enable, "I", "");
1384
1385 SYSCTL_PROC(_kern_kasan, OID_AUTO, test,
1386 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1387 0, 0, sysctl_kasan_test, "I", "");
1388
1389 SYSCTL_PROC(_kern_kasan, OID_AUTO, fail,
1390 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1391 0, 1, sysctl_kasan_test, "I", "");