2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <mach/mach_vm.h>
41 #include <mach/mach_types.h>
42 #include <mach/vm_param.h>
43 #include <mach/machine/vm_param.h>
45 #include <libkern/libkern.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/kernel_mach_header.h>
48 #include <sys/queue.h>
49 #include <sys/sysctl.h>
50 #include <kern/thread.h>
51 #include <machine/atomic.h>
54 #include <kasan_internal.h>
55 #include <memintrinsics.h>
57 const uintptr_t __asan_shadow_memory_dynamic_address
= KASAN_OFFSET
;
59 static unsigned kexts_loaded
;
60 unsigned shadow_pages_total
;
61 unsigned shadow_pages_used
;
63 vm_offset_t kernel_vbase
;
64 vm_offset_t kernel_vtop
;
66 static unsigned kasan_enabled
;
67 static unsigned quarantine_enabled
;
68 static unsigned enabled_checks
= TYPE_ALL
& ~TYPE_LEAK
; /* bitmask of enabled checks */
69 static unsigned report_ignored
; /* issue non-fatal report for disabled/blacklisted checks */
70 static unsigned free_yield
= 0; /* ms yield after each free */
71 static unsigned leak_threshold
= 3; /* threshold for uninitialized memory leak detection */
72 static unsigned leak_fatal_threshold
= 0; /* threshold for treating leaks as fatal errors (0 means never) */
75 static void kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
76 static void kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
78 /* imported osfmk functions */
79 extern vm_offset_t
ml_stack_base(void);
80 extern vm_size_t
ml_stack_size(void);
83 * unused: expected to be called, but (currently) does nothing
85 #define UNUSED_ABI(func, ...) \
86 _Pragma("clang diagnostic push") \
87 _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
88 void func(__VA_ARGS__); \
89 void func(__VA_ARGS__) {}; \
90 _Pragma("clang diagnostic pop") \
92 static const size_t BACKTRACE_BITS = 4;
93 static const size_t BACKTRACE_MAXFRAMES
= (1UL << BACKTRACE_BITS
) - 1;
95 static vm_size_t
kasan_alloc_retrieve_bt(vm_address_t addr
, uintptr_t frames
[static BACKTRACE_MAXFRAMES
]);
97 decl_simple_lock_data(, kasan_vm_lock
);
98 static thread_t kasan_lock_holder
;
101 * kasan is called from the interrupt path, so we need to disable interrupts to
102 * ensure atomicity manipulating the global objects
105 kasan_lock(boolean_t
*b
)
107 *b
= ml_set_interrupts_enabled(false);
108 simple_lock(&kasan_vm_lock
, LCK_GRP_NULL
);
109 kasan_lock_holder
= current_thread();
113 kasan_unlock(boolean_t b
)
115 kasan_lock_holder
= THREAD_NULL
;
116 simple_unlock(&kasan_vm_lock
);
117 ml_set_interrupts_enabled(b
);
120 /* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
123 kasan_lock_held(thread_t thread
)
125 return thread
&& thread
== kasan_lock_holder
;
129 kasan_check_enabled(access_t access
)
131 return kasan_enabled
&& (enabled_checks
& access
) && !kasan_is_blacklisted(access
);
135 kasan_poison_active(uint8_t flags
)
139 return kasan_check_enabled(TYPE_POISON_GLOBAL
);
141 case ASAN_HEAP_LEFT_RZ
:
142 case ASAN_HEAP_RIGHT_RZ
:
143 case ASAN_HEAP_FREED
:
144 return kasan_check_enabled(TYPE_POISON_HEAP
);
151 * poison redzones in the shadow map
154 kasan_poison(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
, uint8_t flags
)
156 uint8_t *shadow
= SHADOW_FOR_ADDRESS(base
);
157 uint8_t partial
= (uint8_t)kasan_granule_partial(size
);
158 vm_size_t total
= leftrz
+ size
+ rightrz
;
161 /* ensure base, leftrz and total allocation size are granule-aligned */
162 assert(kasan_granule_partial(base
) == 0);
163 assert(kasan_granule_partial(leftrz
) == 0);
164 assert(kasan_granule_partial(total
) == 0);
166 if (!kasan_enabled
|| !kasan_poison_active(flags
)) {
170 leftrz
>>= KASAN_SCALE
;
171 size
>>= KASAN_SCALE
;
172 total
>>= KASAN_SCALE
;
174 uint8_t l_flags
= flags
;
175 uint8_t r_flags
= flags
;
177 if (flags
== ASAN_STACK_RZ
) {
178 l_flags
= ASAN_STACK_LEFT_RZ
;
179 r_flags
= ASAN_STACK_RIGHT_RZ
;
180 } else if (flags
== ASAN_HEAP_RZ
) {
181 l_flags
= ASAN_HEAP_LEFT_RZ
;
182 r_flags
= ASAN_HEAP_RIGHT_RZ
;
186 * poison the redzones and unpoison the valid bytes
188 for (; i
< leftrz
; i
++) {
191 for (; i
< leftrz
+ size
; i
++) {
192 shadow
[i
] = ASAN_VALID
; /* XXX: should not be necessary */
194 if (partial
&& (i
< total
)) {
198 for (; i
< total
; i
++) {
204 kasan_poison_range(vm_offset_t base
, vm_size_t size
, uint8_t flags
)
206 assert(kasan_granule_partial(base
) == 0);
207 assert(kasan_granule_partial(size
) == 0);
208 kasan_poison(base
, 0, 0, size
, flags
);
212 kasan_unpoison(void *base
, vm_size_t size
)
214 kasan_poison((vm_offset_t
)base
, size
, 0, 0, 0);
218 kasan_unpoison_stack(uintptr_t base
, size_t size
)
223 size_t partial
= kasan_granule_partial(base
);
224 base
= kasan_granule_trunc(base
);
225 size
= kasan_granule_round(size
+ partial
);
227 kasan_unpoison((void *)base
, size
);
231 * write junk into the redzones
234 kasan_rz_clobber(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
)
238 const uint8_t deadbeef
[] = { 0xde, 0xad, 0xbe, 0xef };
239 const uint8_t c0ffee
[] = { 0xc0, 0xff, 0xee, 0xc0 };
240 uint8_t *buf
= (uint8_t *)base
;
242 assert(kasan_granule_partial(base
) == 0);
243 assert(kasan_granule_partial(leftrz
) == 0);
244 assert(kasan_granule_partial(size
+ leftrz
+ rightrz
) == 0);
246 for (i
= 0; i
< leftrz
; i
++) {
247 buf
[i
] = deadbeef
[i
% 4];
250 for (i
= 0; i
< rightrz
; i
++) {
251 buf
[i
+ size
+ leftrz
] = c0ffee
[i
% 4];
262 * Report a violation that may be disabled and/or blacklisted. This can only be
263 * called for dynamic checks (i.e. where the fault is recoverable). Use
264 * kasan_crash_report() for static (unrecoverable) violations.
266 * access: what we were trying to do when the violation occured
267 * reason: what failed about the access
270 kasan_violation(uintptr_t addr
, size_t size
, access_t access
, violation_t reason
)
272 assert(__builtin_popcount(access
) == 1);
273 if (!kasan_check_enabled(access
)) {
274 if (report_ignored
) {
275 kasan_log_report(addr
, size
, access
, reason
);
279 kasan_crash_report(addr
, size
, access
, reason
);
283 kasan_check_range(const void *x
, size_t sz
, access_t access
)
286 uintptr_t ptr
= (uintptr_t)x
;
287 if (kasan_range_poisoned(ptr
, sz
, &invalid
)) {
288 size_t remaining
= sz
- (invalid
- ptr
);
289 kasan_violation(invalid
, remaining
, access
, 0);
294 * Return true if [base, base+sz) is unpoisoned or has given shadow value.
297 kasan_check_shadow(vm_address_t addr
, vm_size_t sz
, uint8_t shadow
)
299 /* round 'base' up to skip any partial, which won't match 'shadow' */
300 uintptr_t base
= kasan_granule_round(addr
);
303 uintptr_t end
= base
+ sz
;
306 uint8_t *sh
= SHADOW_FOR_ADDRESS(base
);
307 if (*sh
&& *sh
!= shadow
) {
310 base
+= KASAN_GRANULE
;
316 kasan_report_leak(vm_address_t base
, vm_size_t sz
, vm_offset_t offset
, vm_size_t leak_sz
)
318 if (leak_fatal_threshold
> leak_threshold
&& leak_sz
>= leak_fatal_threshold
) {
319 kasan_violation(base
+ offset
, leak_sz
, TYPE_LEAK
, REASON_UNINITIALIZED
);
322 char string_rep
[BACKTRACE_MAXFRAMES
* 20] = {};
323 vm_offset_t stack_base
= dtrace_get_kernel_stack(current_thread());
324 bool is_stack
= (base
>= stack_base
&& base
< (stack_base
+ kernel_stack_size
));
327 uintptr_t alloc_bt
[BACKTRACE_MAXFRAMES
] = {};
328 vm_size_t num_frames
= 0;
330 num_frames
= kasan_alloc_retrieve_bt(base
, alloc_bt
);
331 for (vm_size_t i
= 0; i
< num_frames
; i
++) {
332 l
+= scnprintf(string_rep
+ l
, sizeof(string_rep
) - l
, " %lx", alloc_bt
[i
]);
336 DTRACE_KASAN5(leak_detected
,
345 * Initialize buffer by writing unique pattern that can be looked for
346 * in copyout path to detect uninitialized memory leaks.
349 kasan_leak_init(vm_address_t addr
, vm_size_t sz
)
351 if (enabled_checks
& TYPE_LEAK
) {
352 __nosan_memset((void *)addr
, KASAN_UNINITIALIZED_HEAP
, sz
);
357 * Check for possible uninitialized memory contained in [base, base+sz).
360 kasan_check_uninitialized(vm_address_t base
, vm_size_t sz
)
362 if (!(enabled_checks
& TYPE_LEAK
) || sz
< leak_threshold
) {
366 vm_address_t cur
= base
;
367 vm_address_t end
= base
+ sz
;
369 vm_size_t max_count
= 0;
370 vm_address_t leak_offset
= 0;
374 byte
= *(uint8_t *)cur
;
375 count
= (byte
== KASAN_UNINITIALIZED_HEAP
) ? (count
+ 1) : 0;
376 if (count
> max_count
) {
378 leak_offset
= cur
- (count
- 1) - base
;
383 if (max_count
>= leak_threshold
) {
384 kasan_report_leak(base
, sz
, leak_offset
, max_count
);
390 * KASAN violation reporting
395 access_str(access_t type
)
397 if (type
& TYPE_READ
) {
399 } else if (type
& TYPE_WRITE
) {
401 } else if (type
& TYPE_FREE
) {
403 } else if (type
& TYPE_LEAK
) {
410 static const char *shadow_strings
[] = {
411 [ASAN_VALID
] = "VALID",
412 [ASAN_PARTIAL1
] = "PARTIAL1",
413 [ASAN_PARTIAL2
] = "PARTIAL2",
414 [ASAN_PARTIAL3
] = "PARTIAL3",
415 [ASAN_PARTIAL4
] = "PARTIAL4",
416 [ASAN_PARTIAL5
] = "PARTIAL5",
417 [ASAN_PARTIAL6
] = "PARTIAL6",
418 [ASAN_PARTIAL7
] = "PARTIAL7",
419 [ASAN_STACK_LEFT_RZ
] = "STACK_LEFT_RZ",
420 [ASAN_STACK_MID_RZ
] = "STACK_MID_RZ",
421 [ASAN_STACK_RIGHT_RZ
] = "STACK_RIGHT_RZ",
422 [ASAN_STACK_FREED
] = "STACK_FREED",
423 [ASAN_STACK_OOSCOPE
] = "STACK_OOSCOPE",
424 [ASAN_GLOBAL_RZ
] = "GLOBAL_RZ",
425 [ASAN_HEAP_LEFT_RZ
] = "HEAP_LEFT_RZ",
426 [ASAN_HEAP_RIGHT_RZ
] = "HEAP_RIGHT_RZ",
427 [ASAN_HEAP_FREED
] = "HEAP_FREED",
431 #define CRASH_CONTEXT_BEFORE 5
432 #define CRASH_CONTEXT_AFTER 5
435 kasan_shadow_crashlog(uptr p
, char *buf
, size_t len
)
439 int before
= CRASH_CONTEXT_BEFORE
;
440 int after
= CRASH_CONTEXT_AFTER
;
442 uptr shadow
= (uptr
)SHADOW_FOR_ADDRESS(p
);
443 uptr shadow_p
= shadow
;
444 uptr shadow_page
= vm_map_round_page(shadow_p
, HW_PAGE_MASK
);
446 /* rewind to start of context block */
447 shadow
&= ~((uptr
)0xf);
448 shadow
-= 16 * before
;
450 n
+= scnprintf(buf
+ n
, len
- n
,
451 " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
453 for (i
= 0; i
< 1 + before
+ after
; i
++, shadow
+= 16) {
454 if ((vm_map_round_page(shadow
, HW_PAGE_MASK
) != shadow_page
) && !kasan_is_shadow_mapped(shadow
)) {
455 /* avoid unmapped shadow when crossing page boundaries */
459 n
+= scnprintf(buf
+ n
, len
- n
, " %16lx:", shadow
);
464 for (j
= 0; j
< 16; j
++) {
465 uint8_t *x
= (uint8_t *)(shadow
+ j
);
468 if ((uptr
)x
== shadow_p
) {
471 } else if ((uptr
)(x
+ 1) == shadow_p
) {
475 n
+= scnprintf(buf
+ n
, len
- n
, "%s%02x%s", left
, (unsigned)*x
, right
);
478 n
+= scnprintf(buf
+ n
, len
- n
, "\n");
481 n
+= scnprintf(buf
+ n
, len
- n
, "\n");
486 kasan_report_internal(uptr p
, uptr width
, access_t access
, violation_t reason
, bool dopanic
)
488 const size_t len
= 4096;
489 static char buf
[len
];
492 uint8_t *shadow_ptr
= SHADOW_FOR_ADDRESS(p
);
493 uint8_t shadow_type
= *shadow_ptr
;
494 const char *shadow_str
= shadow_strings
[shadow_type
];
496 shadow_str
= "<invalid>";
500 if (reason
== REASON_MOD_OOB
|| reason
== REASON_BAD_METADATA
) {
501 n
+= scnprintf(buf
+ n
, len
- n
, "KASan: free of corrupted/invalid object %#lx\n", p
);
502 } else if (reason
== REASON_MOD_AFTER_FREE
) {
503 n
+= scnprintf(buf
+ n
, len
- n
, "KASan: UaF of quarantined object %#lx\n", p
);
505 n
+= scnprintf(buf
+ n
, len
- n
, "KASan: invalid %lu-byte %s %#lx [%s]\n",
506 width
, access_str(access
), p
, shadow_str
);
508 n
+= kasan_shadow_crashlog(p
, buf
+ n
, len
- n
);
517 static void NOINLINE OS_NORETURN
518 kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
521 kasan_report_internal(p
, width
, access
, reason
, true);
522 __builtin_unreachable(); /* we cant handle this returning anyway */
526 kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
528 const size_t len
= 256;
531 uint32_t nframes
= 14;
532 uintptr_t frames
[nframes
];
533 uintptr_t *bt
= frames
;
535 kasan_report_internal(p
, width
, access
, reason
, false);
541 nframes
= backtrace_frame(bt
, nframes
, __builtin_frame_address(0),
542 NULL
); /* ignore current frame */
545 l
+= scnprintf(buf
+ l
, len
- l
, "Backtrace: ");
546 for (uint32_t i
= 0; i
< nframes
; i
++) {
547 l
+= scnprintf(buf
+ l
, len
- l
, "%lx,", VM_KERNEL_UNSLIDE(bt
[i
]));
549 l
+= scnprintf(buf
+ l
, len
- l
, "\n");
554 #define REPORT_DECLARE(n) \
555 void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
556 void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
557 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
558 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
567 __asan_report_load_n(uptr p
, unsigned long sz
)
569 kasan_crash_report(p
, sz
, TYPE_LOAD
, 0);
572 __asan_report_store_n(uptr p
, unsigned long sz
)
574 kasan_crash_report(p
, sz
, TYPE_STORE
, 0);
577 /* unpoison the current stack */
579 kasan_unpoison_curstack(bool whole_stack
)
581 uintptr_t base
= ml_stack_base();
582 size_t sz
= ml_stack_size();
583 uintptr_t cur
= (uintptr_t)&base
;
589 if (cur
>= base
&& cur
< base
+ sz
) {
590 /* unpoison from current stack depth to the top */
591 size_t unused
= cur
- base
;
592 kasan_unpoison_stack(cur
, sz
- unused
);
597 __asan_handle_no_return(void)
599 kasan_unpoison_curstack(false);
602 * No need to free any fakestack objects because they must stay alive until
603 * we drop the real stack, at which point we can drop the entire fakestack
609 kasan_range_poisoned(vm_offset_t base
, vm_size_t size
, vm_offset_t
*first_invalid
)
614 if (!kasan_enabled
) {
618 size
+= kasan_granule_partial(base
);
619 base
= kasan_granule_trunc(base
);
621 shadow
= SHADOW_FOR_ADDRESS(base
);
622 size_t limit
= (size
+ KASAN_GRANULE
- 1) / KASAN_GRANULE
;
624 /* XXX: to make debugging easier, catch unmapped shadow here */
626 for (i
= 0; i
< limit
; i
++, size
-= KASAN_GRANULE
) {
628 uint8_t s
= shadow
[i
];
629 if (s
== 0 || (size
< KASAN_GRANULE
&& s
>= size
&& s
< KASAN_GRANULE
)) {
640 /* XXX: calculate the exact first byte that failed */
641 *first_invalid
= base
+ i
* 8;
647 kasan_init_globals(vm_offset_t base
, vm_size_t size
)
649 struct asan_global
*glob
= (struct asan_global
*)base
;
650 struct asan_global
*glob_end
= (struct asan_global
*)(base
+ size
);
651 for (; glob
< glob_end
; glob
++) {
652 /* handle one global */
653 kasan_poison(glob
->addr
, glob
->size
, 0, glob
->size_with_redzone
- glob
->size
, ASAN_GLOBAL_RZ
);
658 kasan_load_kext(vm_offset_t base
, vm_size_t __unused size
, const void *bundleid
)
660 unsigned long sectsz
;
663 #if KASAN_DYNAMIC_BLACKLIST
664 kasan_dybl_load_kext(base
, bundleid
);
667 /* find the kasan globals segment/section */
668 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
670 kasan_init_globals((vm_address_t
)sect
, (vm_size_t
)sectsz
);
676 kasan_unload_kext(vm_offset_t base
, vm_size_t size
)
678 unsigned long sectsz
;
681 /* find the kasan globals segment/section */
682 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
684 kasan_unpoison((void *)base
, size
);
688 #if KASAN_DYNAMIC_BLACKLIST
689 kasan_dybl_unload_kext(base
);
694 * Turn off as much as possible for panic path etc. There's no way to turn it back
700 __asan_option_detect_stack_use_after_return
= 0;
701 fakestack_enabled
= 0;
703 quarantine_enabled
= 0;
708 kasan_init_xnu_globals(void)
710 const char *seg
= KASAN_GLOBAL_SEGNAME
;
711 const char *sect
= KASAN_GLOBAL_SECTNAME
;
715 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)&_mh_execute_header
;
718 printf("KASan: failed to find kernel mach header\n");
719 printf("KASan: redzones for globals not poisoned\n");
723 globals
= (vm_offset_t
)getsectdatafromheader(header
, seg
, sect
, &_size
);
725 printf("KASan: failed to find segment %s section %s\n", seg
, sect
);
726 printf("KASan: redzones for globals not poisoned\n");
729 size
= (vm_size_t
)_size
;
731 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg
, sect
, globals
, size
);
732 printf("KASan: poisoning redzone for %lu globals\n", size
/ sizeof(struct asan_global
));
734 kasan_init_globals(globals
, size
);
738 kasan_late_init(void)
740 #if KASAN_DYNAMIC_BLACKLIST
744 kasan_init_fakestack();
745 kasan_init_xnu_globals();
749 kasan_notify_stolen(vm_offset_t top
)
751 kasan_map_shadow(kernel_vtop
, top
- kernel_vtop
, false);
755 kasan_debug_touch_mappings(vm_offset_t base
, vm_size_t sz
)
761 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
762 for (i
= 0; i
< sz
; i
+= sizeof(uint64_t)) {
763 vm_offset_t addr
= base
+ i
;
764 uint8_t *x
= SHADOW_FOR_ADDRESS(addr
);
766 asm volatile ("" ::: "memory");
768 asm volatile ("" ::: "memory");
769 assert(tmp1
== tmp2
);
782 simple_lock_init(&kasan_vm_lock
, 0);
784 /* Map all of the kernel text and data */
785 kasan_map_shadow(kernel_vbase
, kernel_vtop
- kernel_vbase
, false);
790 * handle KASan boot-args
793 if (PE_parse_boot_argn("kasan.checks", &arg
, sizeof(arg
))) {
794 enabled_checks
= arg
;
797 if (PE_parse_boot_argn("kasan", &arg
, sizeof(arg
))) {
798 if (arg
& KASAN_ARGS_FAKESTACK
) {
799 fakestack_enabled
= 1;
801 if (arg
& KASAN_ARGS_REPORTIGNORED
) {
804 if (arg
& KASAN_ARGS_NODYCHECKS
) {
805 enabled_checks
&= ~TYPE_DYNAMIC
;
807 if (arg
& KASAN_ARGS_NOPOISON_HEAP
) {
808 enabled_checks
&= ~TYPE_POISON_HEAP
;
810 if (arg
& KASAN_ARGS_NOPOISON_GLOBAL
) {
811 enabled_checks
&= ~TYPE_POISON_GLOBAL
;
813 if (arg
& KASAN_ARGS_CHECK_LEAKS
) {
814 enabled_checks
|= TYPE_LEAK
;
818 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg
, sizeof(arg
))) {
822 if (PE_parse_boot_argn("kasan.leak_threshold", &arg
, sizeof(arg
))) {
823 leak_threshold
= arg
;
826 if (PE_parse_boot_argn("kasan.leak_fatal_threshold", &arg
, sizeof(arg
))) {
827 leak_fatal_threshold
= arg
;
830 /* kasan.bl boot-arg handled in kasan_init_dybl() */
832 quarantine_enabled
= 1;
837 kasan_notify_address_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
)
839 assert(address
< VM_MAX_KERNEL_ADDRESS
);
841 if (!kasan_enabled
) {
845 if (address
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
846 /* only map kernel addresses */
857 kasan_map_shadow(address
, size
, is_zero
);
859 kasan_debug_touch_mappings(address
, size
);
863 kasan_notify_address(vm_offset_t address
, vm_size_t size
)
865 kasan_notify_address_internal(address
, size
, false);
869 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
872 kasan_notify_address_nopoison(vm_offset_t address
, vm_size_t size
)
874 kasan_notify_address_internal(address
, size
, true);
883 struct kasan_alloc_header
{
889 uint32_t left_rz
: 32 - BACKTRACE_BITS
;
890 uint32_t frames
: BACKTRACE_BITS
;
893 _Static_assert(sizeof(struct kasan_alloc_header
) <= KASAN_GUARD_SIZE
, "kasan alloc header exceeds guard size");
895 struct kasan_alloc_footer
{
896 uint32_t backtrace
[0];
898 _Static_assert(sizeof(struct kasan_alloc_footer
) <= KASAN_GUARD_SIZE
, "kasan alloc footer exceeds guard size");
900 #define LIVE_XOR ((uint16_t)0x3a65)
901 #define FREE_XOR ((uint16_t)0xf233)
904 magic_for_addr(vm_offset_t addr
, uint16_t magic_xor
)
906 uint16_t magic
= addr
& 0xFFFF;
907 magic
^= (addr
>> 16) & 0xFFFF;
908 magic
^= (addr
>> 32) & 0xFFFF;
909 magic
^= (addr
>> 48) & 0xFFFF;
914 static struct kasan_alloc_header
*
915 header_for_user_addr(vm_offset_t addr
)
917 return (void *)(addr
- sizeof(struct kasan_alloc_header
));
920 static struct kasan_alloc_footer
*
921 footer_for_user_addr(vm_offset_t addr
, vm_size_t
*size
)
923 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
924 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
926 return (void *)(addr
+ h
->user_size
);
930 * size: user-requested allocation size
931 * ret: minimum size for the real allocation
934 kasan_alloc_resize(vm_size_t size
)
937 if (os_add_overflow(size
, 4 * PAGE_SIZE
, &tmp
)) {
938 panic("allocation size overflow (%lu)", size
);
942 /* Add a little extra right redzone to larger objects. Gives us extra
943 * overflow protection, and more space for the backtrace. */
947 /* add left and right redzones */
948 size
+= KASAN_GUARD_PAD
;
950 /* ensure the final allocation is a multiple of the granule */
951 size
= kasan_granule_round(size
);
956 extern vm_offset_t vm_kernel_slid_base
;
959 kasan_alloc_bt(uint32_t *ptr
, vm_size_t sz
, vm_size_t skip
)
961 uintptr_t buf
[BACKTRACE_MAXFRAMES
];
964 sz
/= sizeof(uint32_t);
965 vm_size_t frames
= sz
;
968 frames
= min((uint32_t)(frames
+ skip
), BACKTRACE_MAXFRAMES
);
969 frames
= backtrace(bt
, (uint32_t)frames
, NULL
);
971 while (frames
> sz
&& skip
> 0) {
977 /* only store the offset from kernel base, and cram that into 32
979 for (vm_size_t i
= 0; i
< frames
; i
++) {
980 ptr
[i
] = (uint32_t)(bt
[i
] - vm_kernel_slid_base
);
986 /* addr: user address of allocation */
988 kasan_alloc_crc(vm_offset_t addr
)
990 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
991 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
993 uint16_t crc_orig
= h
->crc
;
997 crc
= __nosan_crc16(crc
, (void *)(addr
- h
->left_rz
), h
->left_rz
);
998 crc
= __nosan_crc16(crc
, (void *)(addr
+ h
->user_size
), rightrz
);
1006 kasan_alloc_retrieve_bt(vm_address_t addr
, uintptr_t frames
[static BACKTRACE_MAXFRAMES
])
1008 vm_size_t num_frames
= 0;
1009 uptr shadow
= (uptr
)SHADOW_FOR_ADDRESS(addr
);
1010 uptr max_search
= shadow
- 4096;
1011 vm_address_t alloc_base
= 0;
1014 /* walk the shadow backwards to find the allocation base */
1015 while (shadow
>= max_search
) {
1016 if (*(uint8_t *)shadow
== ASAN_HEAP_LEFT_RZ
) {
1017 alloc_base
= ADDRESS_FOR_SHADOW(shadow
) + 8;
1024 struct kasan_alloc_header
*header
= header_for_user_addr(alloc_base
);
1025 if (magic_for_addr(alloc_base
, LIVE_XOR
) == header
->magic
) {
1026 struct kasan_alloc_footer
*footer
= footer_for_user_addr(alloc_base
, &fsize
);
1027 if ((fsize
/ sizeof(footer
->backtrace
[0])) >= header
->frames
) {
1028 num_frames
= header
->frames
;
1029 for (size_t i
= 0; i
< num_frames
; i
++) {
1030 frames
[i
] = footer
->backtrace
[i
] + vm_kernel_slid_base
;
1040 * addr: base address of full allocation (including redzones)
1041 * size: total size of allocation (include redzones)
1042 * req: user-requested allocation size
1043 * lrz: size of the left redzone in bytes
1044 * ret: address of usable allocation
1047 kasan_alloc(vm_offset_t addr
, vm_size_t size
, vm_size_t req
, vm_size_t leftrz
)
1053 assert(kasan_granule_partial(addr
) == 0);
1054 assert(kasan_granule_partial(size
) == 0);
1056 vm_size_t rightrz
= size
- req
- leftrz
;
1058 kasan_poison(addr
, req
, leftrz
, rightrz
, ASAN_HEAP_RZ
);
1059 kasan_rz_clobber(addr
, req
, leftrz
, rightrz
);
1063 /* stash the allocation sizes in the left redzone */
1064 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1065 h
->magic
= magic_for_addr(addr
, LIVE_XOR
);
1066 h
->left_rz
= (uint32_t)leftrz
;
1067 h
->alloc_size
= (uint32_t)size
;
1068 h
->user_size
= (uint32_t)req
;
1070 /* ... and a backtrace in the right redzone */
1072 struct kasan_alloc_footer
*f
= footer_for_user_addr(addr
, &fsize
);
1073 h
->frames
= (uint32_t)kasan_alloc_bt(f
->backtrace
, fsize
, 2);
1075 /* checksum the whole object, minus the user part */
1076 h
->crc
= kasan_alloc_crc(addr
);
1082 * addr: user pointer
1083 * size: returns full original allocation size
1084 * ret: original allocation ptr
1087 kasan_dealloc(vm_offset_t addr
, vm_size_t
*size
)
1089 assert(size
&& addr
);
1090 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1091 *size
= h
->alloc_size
;
1092 h
->magic
= 0; /* clear the magic so the debugger doesn't find a bogus object */
1093 return addr
- h
->left_rz
;
1097 * return the original user-requested allocation size
1098 * addr: user alloc pointer
1101 kasan_user_size(vm_offset_t addr
)
1103 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1104 assert(h
->magic
== magic_for_addr(addr
, LIVE_XOR
));
1105 return h
->user_size
;
1109 * Verify that `addr' (user pointer) is a valid allocation of `type'
1112 kasan_check_free(vm_offset_t addr
, vm_size_t size
, unsigned heap_type
)
1114 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1116 /* map heap type to an internal access type */
1117 access_t type
= heap_type
== KASAN_HEAP_KALLOC
? TYPE_KFREE
:
1118 heap_type
== KASAN_HEAP_ZALLOC
? TYPE_ZFREE
:
1119 heap_type
== KASAN_HEAP_FAKESTACK
? TYPE_FSFREE
: 0;
1121 /* check the magic and crc match */
1122 if (h
->magic
!= magic_for_addr(addr
, LIVE_XOR
)) {
1123 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
1125 if (h
->crc
!= kasan_alloc_crc(addr
)) {
1126 kasan_violation(addr
, size
, type
, REASON_MOD_OOB
);
1129 /* check the freed size matches what we recorded at alloc time */
1130 if (h
->user_size
!= size
) {
1131 kasan_violation(addr
, size
, type
, REASON_INVALID_SIZE
);
1134 vm_size_t rightrz_sz
= h
->alloc_size
- h
->left_rz
- h
->user_size
;
1136 /* Check that the redzones are valid */
1137 if (!kasan_check_shadow(addr
- h
->left_rz
, h
->left_rz
, ASAN_HEAP_LEFT_RZ
) ||
1138 !kasan_check_shadow(addr
+ h
->user_size
, rightrz_sz
, ASAN_HEAP_RIGHT_RZ
)) {
1139 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
1142 /* Check the allocated range is not poisoned */
1143 kasan_check_range((void *)addr
, size
, type
);
1152 struct freelist_entry
{
1155 STAILQ_ENTRY(freelist_entry
) list
;
1158 vm_size_t size
: 28;
1159 vm_size_t user_size
: 28;
1160 vm_size_t frames
: BACKTRACE_BITS
; /* number of frames in backtrace */
1161 vm_size_t __unused
: 8 - BACKTRACE_BITS
;
1166 uint32_t backtrace
[];
1168 _Static_assert(sizeof(struct freelist_entry
) <= KASAN_GUARD_PAD
, "kasan freelist header exceeds padded size");
1171 STAILQ_HEAD(freelist_head
, freelist_entry
) freelist
;
1172 unsigned long entries
;
1173 unsigned long max_entries
;
1178 struct quarantine quarantines
[] = {
1179 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_ZALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1180 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_KALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1181 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_FAKESTACK
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
}
1185 fle_crc(struct freelist_entry
*fle
)
1187 return __nosan_crc16(0, &fle
->bits
, fle
->size
- offsetof(struct freelist_entry
, bits
));
1191 * addr, sizep: pointer/size of full allocation including redzone
1194 kasan_free_internal(void **addrp
, vm_size_t
*sizep
, int type
,
1195 zone_t
*zone
, vm_size_t user_size
, int locked
,
1198 vm_size_t size
= *sizep
;
1199 vm_offset_t addr
= *(vm_offset_t
*)addrp
;
1201 assert(type
>= 0 && type
< KASAN_HEAP_TYPES
);
1202 if (type
== KASAN_HEAP_KALLOC
) {
1203 /* zero-size kalloc allocations are allowed */
1205 } else if (type
== KASAN_HEAP_ZALLOC
) {
1206 assert(zone
&& user_size
);
1207 } else if (type
== KASAN_HEAP_FAKESTACK
) {
1208 assert(zone
&& user_size
);
1211 /* clobber the entire freed region */
1212 kasan_rz_clobber(addr
, 0, size
, 0);
1214 if (!doquarantine
|| !quarantine_enabled
) {
1218 /* poison the entire freed region */
1219 uint8_t flags
= (type
== KASAN_HEAP_FAKESTACK
) ? ASAN_STACK_FREED
: ASAN_HEAP_FREED
;
1220 kasan_poison(addr
, 0, size
, 0, flags
);
1222 struct freelist_entry
*fle
, *tofree
= NULL
;
1223 struct quarantine
*q
= &quarantines
[type
];
1224 assert(size
>= sizeof(struct freelist_entry
));
1226 /* create a new freelist entry */
1227 fle
= (struct freelist_entry
*)addr
;
1228 fle
->magic
= magic_for_addr((vm_offset_t
)fle
, FREE_XOR
);
1230 fle
->user_size
= user_size
;
1232 fle
->zone
= ZONE_NULL
;
1236 if (type
!= KASAN_HEAP_FAKESTACK
) {
1237 /* don't do expensive things on the fakestack path */
1238 fle
->frames
= kasan_alloc_bt(fle
->backtrace
, fle
->size
- sizeof(struct freelist_entry
), 3);
1239 fle
->crc
= fle_crc(fle
);
1247 if (q
->size
+ size
> q
->max_size
) {
1249 * Adding this entry would put us over the max quarantine size. Free the
1250 * larger of the current object and the quarantine head object.
1252 tofree
= STAILQ_FIRST(&q
->freelist
);
1253 if (fle
->size
> tofree
->size
) {
1254 goto free_current_locked
;
1258 STAILQ_INSERT_TAIL(&q
->freelist
, fle
, list
);
1262 /* free the oldest entry, if necessary */
1263 if (tofree
|| q
->entries
> q
->max_entries
) {
1264 tofree
= STAILQ_FIRST(&q
->freelist
);
1265 STAILQ_REMOVE_HEAD(&q
->freelist
, list
);
1267 assert(q
->entries
> 0 && q
->size
>= tofree
->size
);
1269 q
->size
-= tofree
->size
;
1271 if (type
!= KASAN_HEAP_KALLOC
) {
1272 assert((vm_offset_t
)zone
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
&&
1273 (vm_offset_t
)zone
<= VM_MAX_KERNEL_ADDRESS
);
1274 *zone
= tofree
->zone
;
1277 size
= tofree
->size
;
1278 addr
= (vm_offset_t
)tofree
;
1280 /* check the magic and crc match */
1281 if (tofree
->magic
!= magic_for_addr(addr
, FREE_XOR
)) {
1282 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1284 if (type
!= KASAN_HEAP_FAKESTACK
&& tofree
->crc
!= fle_crc(tofree
)) {
1285 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1288 /* clobber the quarantine header */
1289 __nosan_bzero((void *)addr
, sizeof(struct freelist_entry
));
1291 /* quarantine is not full - don't really free anything */
1295 free_current_locked
:
1301 *addrp
= (void *)addr
;
1303 kasan_unpoison((void *)addr
, size
);
1309 kasan_free(void **addrp
, vm_size_t
*sizep
, int type
, zone_t
*zone
,
1310 vm_size_t user_size
, bool quarantine
)
1312 kasan_free_internal(addrp
, sizep
, type
, zone
, user_size
, 0, quarantine
);
1315 thread_yield_internal(free_yield
);
1320 __asan_load_cxx_array_cookie(uptr
*p
)
1322 uint8_t *shadow
= SHADOW_FOR_ADDRESS((uptr
)p
);
1323 if (*shadow
== ASAN_ARRAY_COOKIE
) {
1325 } else if (*shadow
== ASAN_HEAP_FREED
) {
1333 __asan_poison_cxx_array_cookie(uptr p
)
1335 uint8_t *shadow
= SHADOW_FOR_ADDRESS(p
);
1336 *shadow
= ASAN_ARRAY_COOKIE
;
1340 * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
1341 * lives relative to the start of the buffer, but it's always the word immediately
1342 * before the start of the array data, so for naturally-aligned objects we need to
1343 * search at most 2 shadow bytes.
1346 kasan_unpoison_cxx_array_cookie(void *ptr
)
1348 uint8_t *shadow
= SHADOW_FOR_ADDRESS((uptr
)ptr
);
1349 for (size_t i
= 0; i
< 2; i
++) {
1350 if (shadow
[i
] == ASAN_ARRAY_COOKIE
) {
1351 shadow
[i
] = ASAN_VALID
;
1353 } else if (shadow
[i
] != ASAN_VALID
) {
1354 /* must have seen the cookie by now */
1360 #define ACCESS_CHECK_DECLARE(type, sz, access) \
1361 void __asan_##type##sz(uptr addr) { \
1362 kasan_check_range((const void *)addr, sz, access); \
1364 void OS_NORETURN UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
1366 ACCESS_CHECK_DECLARE(load
, 1, TYPE_LOAD
);
1367 ACCESS_CHECK_DECLARE(load
, 2, TYPE_LOAD
);
1368 ACCESS_CHECK_DECLARE(load
, 4, TYPE_LOAD
);
1369 ACCESS_CHECK_DECLARE(load
, 8, TYPE_LOAD
);
1370 ACCESS_CHECK_DECLARE(load
, 16, TYPE_LOAD
);
1371 ACCESS_CHECK_DECLARE(store
, 1, TYPE_STORE
);
1372 ACCESS_CHECK_DECLARE(store
, 2, TYPE_STORE
);
1373 ACCESS_CHECK_DECLARE(store
, 4, TYPE_STORE
);
1374 ACCESS_CHECK_DECLARE(store
, 8, TYPE_STORE
);
1375 ACCESS_CHECK_DECLARE(store
, 16, TYPE_STORE
);
1378 __asan_loadN(uptr addr
, size_t sz
)
1380 kasan_check_range((const void *)addr
, sz
, TYPE_LOAD
);
1384 __asan_storeN(uptr addr
, size_t sz
)
1386 kasan_check_range((const void *)addr
, sz
, TYPE_STORE
);
1390 kasan_set_shadow(uptr addr
, size_t sz
, uint8_t val
)
1392 __nosan_memset((void *)addr
, val
, sz
);
1395 #define SET_SHADOW_DECLARE(val) \
1396 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1397 kasan_set_shadow(addr, sz, 0x##val); \
1400 SET_SHADOW_DECLARE(00)
1401 SET_SHADOW_DECLARE(f1
)
1402 SET_SHADOW_DECLARE(f2
)
1403 SET_SHADOW_DECLARE(f3
)
1404 SET_SHADOW_DECLARE(f5
)
1405 SET_SHADOW_DECLARE(f8
)
1409 * Call 'cb' for each contiguous range of the shadow map. This could be more
1410 * efficient by walking the page table directly.
1413 kasan_traverse_mappings(pmap_traverse_callback cb
, void *ctx
)
1415 uintptr_t shadow_base
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
1416 uintptr_t shadow_top
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS
);
1417 shadow_base
= vm_map_trunc_page(shadow_base
, HW_PAGE_MASK
);
1418 shadow_top
= vm_map_round_page(shadow_top
, HW_PAGE_MASK
);
1420 uintptr_t start
= 0, end
= 0;
1422 for (uintptr_t addr
= shadow_base
; addr
< shadow_top
; addr
+= HW_PAGE_SIZE
) {
1423 if (kasan_is_shadow_mapped(addr
)) {
1427 end
= addr
+ HW_PAGE_SIZE
;
1428 } else if (start
&& end
) {
1429 cb(start
, end
, ctx
);
1435 cb(start
, end
, ctx
);
1442 * XXX: implement these
1445 UNUSED_ABI(__asan_alloca_poison
, uptr addr
, uptr size
);
1446 UNUSED_ABI(__asan_allocas_unpoison
, uptr top
, uptr bottom
);
1447 UNUSED_ABI(__sanitizer_ptr_sub
, uptr a
, uptr b
);
1448 UNUSED_ABI(__sanitizer_ptr_cmp
, uptr a
, uptr b
);
1449 UNUSED_ABI(__sanitizer_annotate_contiguous_container
, const void *a
, const void *b
, const void *c
, const void *d
);
1450 UNUSED_ABI(__asan_poison_stack_memory
, uptr addr
, size_t size
);
1451 UNUSED_ABI(__asan_unpoison_stack_memory
, uptr a
, uptr b
);
1454 * Miscellaneous unimplemented asan ABI
1457 UNUSED_ABI(__asan_init
, void);
1458 UNUSED_ABI(__asan_register_image_globals
, uptr a
);
1459 UNUSED_ABI(__asan_unregister_image_globals
, uptr a
);
1460 UNUSED_ABI(__asan_before_dynamic_init
, uptr a
);
1461 UNUSED_ABI(__asan_after_dynamic_init
, void);
1462 UNUSED_ABI(__asan_version_mismatch_check_v8
, void);
1463 UNUSED_ABI(__asan_version_mismatch_check_apple_802
, void);
1464 UNUSED_ABI(__asan_version_mismatch_check_apple_900
, void);
1465 UNUSED_ABI(__asan_version_mismatch_check_apple_902
, void);
1466 UNUSED_ABI(__asan_version_mismatch_check_apple_1000
, void);
1467 UNUSED_ABI(__asan_version_mismatch_check_apple_1001
, void);
1468 UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100
, void);
1469 UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1200
, void);
1471 void OS_NORETURN
UNSUPPORTED_API(__asan_init_v5
, void);
1472 void OS_NORETURN
UNSUPPORTED_API(__asan_register_globals
, uptr a
, uptr b
);
1473 void OS_NORETURN
UNSUPPORTED_API(__asan_unregister_globals
, uptr a
, uptr b
);
1474 void OS_NORETURN
UNSUPPORTED_API(__asan_register_elf_globals
, uptr a
, uptr b
, uptr c
);
1475 void OS_NORETURN
UNSUPPORTED_API(__asan_unregister_elf_globals
, uptr a
, uptr b
, uptr c
);
1477 void OS_NORETURN
UNSUPPORTED_API(__asan_exp_loadN
, uptr addr
, size_t sz
, int32_t e
);
1478 void OS_NORETURN
UNSUPPORTED_API(__asan_exp_storeN
, uptr addr
, size_t sz
, int32_t e
);
1479 void OS_NORETURN
UNSUPPORTED_API(__asan_report_exp_load_n
, uptr addr
, unsigned long b
, int32_t c
);
1480 void OS_NORETURN
UNSUPPORTED_API(__asan_report_exp_store_n
, uptr addr
, unsigned long b
, int32_t c
);
1489 sysctl_kasan_test(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int arg2
, struct sysctl_req
*req
)
1494 err
= sysctl_io_number(req
, 0, sizeof(int), &mask
, &ch
);
1497 kasan_test(mask
, arg2
);
1504 sysctl_fakestack_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int __unused arg2
, struct sysctl_req
*req
)
1508 err
= sysctl_io_number(req
, fakestack_enabled
, sizeof(fakestack_enabled
), &val
, &ch
);
1509 if (err
== 0 && ch
) {
1510 fakestack_enabled
= !!val
;
1511 __asan_option_detect_stack_use_after_return
= !!val
;
1518 SYSCTL_NODE(_kern
, OID_AUTO
, kasan
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
1520 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, available
, CTLFLAG_RD
, NULL
, KASAN
, "");
1521 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, enabled
, CTLFLAG_RD
, &kasan_enabled
, 0, "");
1522 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, checks
, CTLFLAG_RW
, &enabled_checks
, 0, "");
1523 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, quarantine
, CTLFLAG_RW
, &quarantine_enabled
, 0, "");
1524 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, report_ignored
, CTLFLAG_RW
, &report_ignored
, 0, "");
1525 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, free_yield_ms
, CTLFLAG_RW
, &free_yield
, 0, "");
1526 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, leak_threshold
, CTLFLAG_RW
, &leak_threshold
, 0, "");
1527 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, leak_fatal_threshold
, CTLFLAG_RW
, &leak_fatal_threshold
, 0, "");
1528 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memused
, CTLFLAG_RD
, &shadow_pages_used
, 0, "");
1529 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memtotal
, CTLFLAG_RD
, &shadow_pages_total
, 0, "");
1530 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, kexts
, CTLFLAG_RD
, &kexts_loaded
, 0, "");
1531 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, debug
, CTLFLAG_RD
, NULL
, KASAN_DEBUG
, "");
1532 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, zalloc
, CTLFLAG_RD
, NULL
, KASAN_ZALLOC
, "");
1533 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, kalloc
, CTLFLAG_RD
, NULL
, KASAN_KALLOC
, "");
1534 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, dynamicbl
, CTLFLAG_RD
, NULL
, KASAN_DYNAMIC_BLACKLIST
, "");
1536 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fakestack
,
1537 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1538 0, 0, sysctl_fakestack_enable
, "I", "");
1540 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, test
,
1541 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1542 0, 0, sysctl_kasan_test
, "I", "");
1544 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fail
,
1545 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1546 0, 1, sysctl_kasan_test
, "I", "");