2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <kern/kalloc.h>
41 #include <kern/zalloc.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
47 #include <libkern/libkern.h>
48 #include <libkern/OSAtomic.h>
49 #include <libkern/kernel_mach_header.h>
50 #include <sys/queue.h>
51 #include <sys/sysctl.h>
52 #include <kern/thread.h>
53 #include <machine/atomic.h>
56 #include <kasan_internal.h>
57 #include <memintrinsics.h>
59 const uintptr_t __asan_shadow_memory_dynamic_address
= KASAN_SHIFT
;
61 static unsigned kexts_loaded
;
62 unsigned shadow_pages_total
;
63 unsigned shadow_pages_used
;
65 vm_offset_t kernel_vbase
;
66 vm_offset_t kernel_vtop
;
68 static unsigned kasan_enabled
;
69 static unsigned quarantine_enabled
;
70 static unsigned enabled_checks
= TYPE_ALL
& ~TYPE_LEAK
; /* bitmask of enabled checks */
71 static unsigned report_ignored
; /* issue non-fatal report for disabled/blacklisted checks */
72 static unsigned free_yield
= 0; /* ms yield after each free */
73 static unsigned leak_threshold
= 3; /* threshold for uninitialized memory leak detection */
74 static unsigned leak_fatal_threshold
= 0; /* threshold for treating leaks as fatal errors (0 means never) */
77 static void kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
78 static void kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
80 /* imported osfmk functions */
81 extern vm_offset_t
ml_stack_base(void);
82 extern vm_size_t
ml_stack_size(void);
85 * unused: expected to be called, but (currently) does nothing
87 #define UNUSED_ABI(func, ...) \
88 _Pragma("clang diagnostic push") \
89 _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
90 void func(__VA_ARGS__); \
91 void func(__VA_ARGS__) {}; \
92 _Pragma("clang diagnostic pop") \
94 static const size_t BACKTRACE_BITS = 4;
95 static const size_t BACKTRACE_MAXFRAMES
= (1UL << BACKTRACE_BITS
) - 1;
97 static vm_size_t
kasan_alloc_retrieve_bt(vm_address_t addr
, uintptr_t frames
[static BACKTRACE_MAXFRAMES
]);
99 decl_simple_lock_data(, kasan_vm_lock
);
100 static thread_t kasan_lock_holder
;
103 * kasan is called from the interrupt path, so we need to disable interrupts to
104 * ensure atomicity manipulating the global objects
107 kasan_lock(boolean_t
*b
)
109 *b
= ml_set_interrupts_enabled(false);
110 simple_lock(&kasan_vm_lock
, LCK_GRP_NULL
);
111 kasan_lock_holder
= current_thread();
115 kasan_unlock(boolean_t b
)
117 kasan_lock_holder
= THREAD_NULL
;
118 simple_unlock(&kasan_vm_lock
);
119 ml_set_interrupts_enabled(b
);
122 /* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
125 kasan_lock_held(thread_t thread
)
127 return thread
&& thread
== kasan_lock_holder
;
131 kasan_check_enabled(access_t access
)
133 return kasan_enabled
&& (enabled_checks
& access
) && !kasan_is_blacklisted(access
);
137 kasan_poison_active(uint8_t flags
)
141 return kasan_check_enabled(TYPE_POISON_GLOBAL
);
143 case ASAN_HEAP_LEFT_RZ
:
144 case ASAN_HEAP_RIGHT_RZ
:
145 case ASAN_HEAP_FREED
:
146 return kasan_check_enabled(TYPE_POISON_HEAP
);
153 * poison redzones in the shadow map
156 kasan_poison(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
, uint8_t flags
)
158 uint8_t *shadow
= SHADOW_FOR_ADDRESS(base
);
159 uint8_t partial
= size
& 0x07;
160 vm_size_t total
= leftrz
+ size
+ rightrz
;
163 /* base must be 8-byte aligned */
164 /* any left redzone must be a multiple of 8 */
165 /* total region must cover 8-byte multiple */
166 assert((base
& 0x07) == 0);
167 assert((leftrz
& 0x07) == 0);
168 assert((total
& 0x07) == 0);
170 if (!kasan_enabled
|| !kasan_poison_active(flags
)) {
178 uint8_t l_flags
= flags
;
179 uint8_t r_flags
= flags
;
181 if (flags
== ASAN_STACK_RZ
) {
182 l_flags
= ASAN_STACK_LEFT_RZ
;
183 r_flags
= ASAN_STACK_RIGHT_RZ
;
184 } else if (flags
== ASAN_HEAP_RZ
) {
185 l_flags
= ASAN_HEAP_LEFT_RZ
;
186 r_flags
= ASAN_HEAP_RIGHT_RZ
;
190 * poison the redzones and unpoison the valid bytes
192 for (; i
< leftrz
; i
++) {
195 for (; i
< leftrz
+ size
; i
++) {
196 shadow
[i
] = ASAN_VALID
; /* XXX: should not be necessary */
198 if (partial
&& (i
< total
)) {
202 for (; i
< total
; i
++) {
208 kasan_poison_range(vm_offset_t base
, vm_size_t size
, uint8_t flags
)
210 /* base must be 8-byte aligned */
211 /* total region must cover 8-byte multiple */
212 assert((base
& 0x07) == 0);
213 assert((size
& 0x07) == 0);
214 kasan_poison(base
, 0, 0, size
, flags
);
218 kasan_unpoison(void *base
, vm_size_t size
)
220 kasan_poison((vm_offset_t
)base
, size
, 0, 0, 0);
224 kasan_unpoison_stack(vm_offset_t base
, vm_size_t size
)
229 /* align base and size to 8 bytes */
230 vm_offset_t align
= base
& 0x7;
233 size
= (size
+ 7) & ~0x7;
235 kasan_unpoison((void *)base
, size
);
239 * write junk into the redzones
242 kasan_rz_clobber(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
)
246 const uint8_t deadbeef
[] = { 0xde, 0xad, 0xbe, 0xef };
247 const uint8_t c0ffee
[] = { 0xc0, 0xff, 0xee, 0xc0 };
248 uint8_t *buf
= (uint8_t *)base
;
250 /* base must be 8-byte aligned */
251 /* any left redzone must be a multiple of 8 */
252 /* total region must cover 8-byte multiple */
253 assert((base
& 0x07) == 0);
254 assert((leftrz
& 0x07) == 0);
255 assert(((size
+ leftrz
+ rightrz
) & 0x07) == 0);
257 for (i
= 0; i
< leftrz
; i
++) {
258 buf
[i
] = deadbeef
[i
% 4];
261 for (i
= 0; i
< rightrz
; i
++) {
262 buf
[i
+ size
+ leftrz
] = c0ffee
[i
% 4];
273 * Report a violation that may be disabled and/or blacklisted. This can only be
274 * called for dynamic checks (i.e. where the fault is recoverable). Use
275 * kasan_crash_report() for static (unrecoverable) violations.
277 * access: what we were trying to do when the violation occured
278 * reason: what failed about the access
281 kasan_violation(uintptr_t addr
, size_t size
, access_t access
, violation_t reason
)
283 assert(__builtin_popcount(access
) == 1);
284 if (!kasan_check_enabled(access
)) {
285 if (report_ignored
) {
286 kasan_log_report(addr
, size
, access
, reason
);
290 kasan_crash_report(addr
, size
, access
, reason
);
294 kasan_check_range(const void *x
, size_t sz
, access_t access
)
297 uintptr_t ptr
= (uintptr_t)x
;
298 if (kasan_range_poisoned(ptr
, sz
, &invalid
)) {
299 size_t remaining
= sz
- (invalid
- ptr
);
300 kasan_violation(invalid
, remaining
, access
, 0);
305 * Return true if [base, base+sz) is unpoisoned or has given shadow value.
308 kasan_check_shadow(vm_address_t base
, vm_size_t sz
, uint8_t shadow
)
310 sz
-= 8 - (base
% 8);
311 base
+= 8 - (base
% 8);
313 vm_address_t end
= base
+ sz
;
316 uint8_t *sh
= SHADOW_FOR_ADDRESS(base
);
317 if (*sh
&& *sh
!= shadow
) {
326 kasan_report_leak(vm_address_t base
, vm_size_t sz
, vm_offset_t offset
, vm_size_t leak_sz
)
328 if (leak_fatal_threshold
> leak_threshold
&& leak_sz
>= leak_fatal_threshold
){
329 kasan_violation(base
+ offset
, leak_sz
, TYPE_LEAK
, REASON_UNINITIALIZED
);
332 char string_rep
[BACKTRACE_MAXFRAMES
* 20] = {};
333 vm_offset_t stack_base
= dtrace_get_kernel_stack(current_thread());
334 bool is_stack
= (base
>= stack_base
&& base
< (stack_base
+ kernel_stack_size
));
337 uintptr_t alloc_bt
[BACKTRACE_MAXFRAMES
] = {};
338 vm_size_t num_frames
= 0;
340 num_frames
= kasan_alloc_retrieve_bt(base
, alloc_bt
);
341 for (vm_size_t i
= 0; i
< num_frames
; i
++) {
342 l
+= scnprintf(string_rep
+ l
, sizeof(string_rep
) - l
, " %lx", alloc_bt
[i
]);
346 DTRACE_KASAN5(leak_detected
,
355 * Check for possible uninitialized memory contained in [base, base+sz).
358 kasan_check_uninitialized(vm_address_t base
, vm_size_t sz
)
360 if (!(enabled_checks
& TYPE_LEAK
) || sz
< leak_threshold
) {
364 vm_address_t cur
= base
;
365 vm_address_t end
= base
+ sz
;
367 vm_size_t max_count
= 0;
368 vm_address_t leak_offset
= 0;
372 byte
= *(uint8_t *)cur
;
373 count
= (byte
== KASAN_UNINITIALIZED_HEAP
) ? (count
+ 1) : 0;
374 if (count
> max_count
) {
376 leak_offset
= cur
- (count
- 1) - base
;
381 if (max_count
>= leak_threshold
) {
382 kasan_report_leak(base
, sz
, leak_offset
, max_count
);
388 * KASAN violation reporting
393 access_str(access_t type
)
395 if (type
& TYPE_READ
) {
397 } else if (type
& TYPE_WRITE
) {
399 } else if (type
& TYPE_FREE
) {
401 } else if (type
& TYPE_LEAK
) {
408 static const char *shadow_strings
[] = {
409 [ASAN_VALID
] = "VALID",
410 [ASAN_PARTIAL1
] = "PARTIAL1",
411 [ASAN_PARTIAL2
] = "PARTIAL2",
412 [ASAN_PARTIAL3
] = "PARTIAL3",
413 [ASAN_PARTIAL4
] = "PARTIAL4",
414 [ASAN_PARTIAL5
] = "PARTIAL5",
415 [ASAN_PARTIAL6
] = "PARTIAL6",
416 [ASAN_PARTIAL7
] = "PARTIAL7",
417 [ASAN_STACK_LEFT_RZ
] = "STACK_LEFT_RZ",
418 [ASAN_STACK_MID_RZ
] = "STACK_MID_RZ",
419 [ASAN_STACK_RIGHT_RZ
] = "STACK_RIGHT_RZ",
420 [ASAN_STACK_FREED
] = "STACK_FREED",
421 [ASAN_STACK_OOSCOPE
] = "STACK_OOSCOPE",
422 [ASAN_GLOBAL_RZ
] = "GLOBAL_RZ",
423 [ASAN_HEAP_LEFT_RZ
] = "HEAP_LEFT_RZ",
424 [ASAN_HEAP_RIGHT_RZ
] = "HEAP_RIGHT_RZ",
425 [ASAN_HEAP_FREED
] = "HEAP_FREED",
429 #define CRASH_CONTEXT_BEFORE 5
430 #define CRASH_CONTEXT_AFTER 5
433 kasan_shadow_crashlog(uptr p
, char *buf
, size_t len
)
437 int before
= CRASH_CONTEXT_BEFORE
;
438 int after
= CRASH_CONTEXT_AFTER
;
440 uptr shadow
= (uptr
)SHADOW_FOR_ADDRESS(p
);
441 uptr shadow_p
= shadow
;
442 uptr shadow_page
= vm_map_round_page(shadow_p
, HW_PAGE_MASK
);
444 /* rewind to start of context block */
445 shadow
&= ~((uptr
)0xf);
446 shadow
-= 16 * before
;
448 n
+= scnprintf(buf
+n
, len
-n
,
449 " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
451 for (i
= 0; i
< 1 + before
+ after
; i
++, shadow
+= 16) {
452 if ((vm_map_round_page(shadow
, HW_PAGE_MASK
) != shadow_page
) && !kasan_is_shadow_mapped(shadow
)) {
453 /* avoid unmapped shadow when crossing page boundaries */
457 n
+= scnprintf(buf
+n
, len
-n
, " %16lx:", shadow
);
462 for (j
= 0; j
< 16; j
++) {
463 uint8_t *x
= (uint8_t *)(shadow
+ j
);
466 if ((uptr
)x
== shadow_p
) {
469 } else if ((uptr
)(x
+ 1) == shadow_p
) {
473 n
+= scnprintf(buf
+n
, len
-n
, "%s%02x%s", left
, (unsigned)*x
, right
);
476 n
+= scnprintf(buf
+n
, len
-n
, "\n");
479 n
+= scnprintf(buf
+n
, len
-n
, "\n");
484 kasan_report_internal(uptr p
, uptr width
, access_t access
, violation_t reason
, bool dopanic
)
486 const size_t len
= 4096;
487 static char buf
[len
];
490 uint8_t *shadow_ptr
= SHADOW_FOR_ADDRESS(p
);
491 uint8_t shadow_type
= *shadow_ptr
;
492 const char *shadow_str
= shadow_strings
[shadow_type
];
494 shadow_str
= "<invalid>";
498 if (reason
== REASON_MOD_OOB
|| reason
== REASON_BAD_METADATA
) {
499 n
+= scnprintf(buf
+n
, len
-n
, "KASan: free of corrupted/invalid object %#lx\n", p
);
500 } else if (reason
== REASON_MOD_AFTER_FREE
) {
501 n
+= scnprintf(buf
+n
, len
-n
, "KASan: UaF of quarantined object %#lx\n", p
);
503 n
+= scnprintf(buf
+n
, len
-n
, "KASan: invalid %lu-byte %s %#lx [%s]\n",
504 width
, access_str(access
), p
, shadow_str
);
506 n
+= kasan_shadow_crashlog(p
, buf
+n
, len
-n
);
515 static void NOINLINE OS_NORETURN
516 kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
519 kasan_report_internal(p
, width
, access
, reason
, true);
520 __builtin_unreachable(); /* we cant handle this returning anyway */
524 kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
526 const size_t len
= 256;
529 uint32_t nframes
= 14;
530 uintptr_t frames
[nframes
];
531 uintptr_t *bt
= frames
;
533 kasan_report_internal(p
, width
, access
, reason
, false);
539 nframes
= backtrace_frame(bt
, nframes
, __builtin_frame_address(0),
540 NULL
); /* ignore current frame */
543 l
+= scnprintf(buf
+l
, len
-l
, "Backtrace: ");
544 for (uint32_t i
= 0; i
< nframes
; i
++) {
545 l
+= scnprintf(buf
+l
, len
-l
, "%lx,", VM_KERNEL_UNSLIDE(bt
[i
]));
547 l
+= scnprintf(buf
+l
, len
-l
, "\n");
552 #define REPORT_DECLARE(n) \
553 void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
554 void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
555 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
556 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
564 void OS_NORETURN
__asan_report_load_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_LOAD
, 0); }
565 void OS_NORETURN
__asan_report_store_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_STORE
, 0); }
567 /* unpoison the current stack */
569 kasan_unpoison_curstack(bool whole_stack
)
571 uintptr_t base
= ml_stack_base();
572 size_t sz
= ml_stack_size();
573 uintptr_t cur
= (uintptr_t)&base
;
579 if (cur
>= base
&& cur
< base
+ sz
) {
580 /* unpoison from current stack depth to the top */
581 size_t unused
= cur
- base
;
582 kasan_unpoison_stack(cur
, sz
- unused
);
587 __asan_handle_no_return(void)
589 kasan_unpoison_curstack(false);
592 * No need to free any fakestack objects because they must stay alive until
593 * we drop the real stack, at which point we can drop the entire fakestack
599 kasan_range_poisoned(vm_offset_t base
, vm_size_t size
, vm_offset_t
*first_invalid
)
604 if (!kasan_enabled
) {
609 base
&= ~(vm_offset_t
)0x07;
611 shadow
= SHADOW_FOR_ADDRESS(base
);
612 vm_size_t limit
= (size
+ 7) / 8;
614 /* XXX: to make debugging easier, catch unmapped shadow here */
616 for (i
= 0; i
< limit
; i
++, size
-= 8) {
618 uint8_t s
= shadow
[i
];
619 if (s
== 0 || (size
< 8 && s
>= size
&& s
<= 7)) {
630 /* XXX: calculate the exact first byte that failed */
631 *first_invalid
= base
+ i
*8;
637 kasan_init_globals(vm_offset_t base
, vm_size_t size
)
639 struct asan_global
*glob
= (struct asan_global
*)base
;
640 struct asan_global
*glob_end
= (struct asan_global
*)(base
+ size
);
641 for (; glob
< glob_end
; glob
++) {
642 /* handle one global */
643 kasan_poison(glob
->addr
, glob
->size
, 0, glob
->size_with_redzone
- glob
->size
, ASAN_GLOBAL_RZ
);
648 kasan_load_kext(vm_offset_t base
, vm_size_t __unused size
, const void *bundleid
)
650 unsigned long sectsz
;
653 #if KASAN_DYNAMIC_BLACKLIST
654 kasan_dybl_load_kext(base
, bundleid
);
657 /* find the kasan globals segment/section */
658 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
660 kasan_init_globals((vm_address_t
)sect
, (vm_size_t
)sectsz
);
666 kasan_unload_kext(vm_offset_t base
, vm_size_t size
)
668 unsigned long sectsz
;
671 /* find the kasan globals segment/section */
672 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
674 kasan_unpoison((void *)base
, size
);
678 #if KASAN_DYNAMIC_BLACKLIST
679 kasan_dybl_unload_kext(base
);
684 * Turn off as much as possible for panic path etc. There's no way to turn it back
690 __asan_option_detect_stack_use_after_return
= 0;
691 fakestack_enabled
= 0;
693 quarantine_enabled
= 0;
698 kasan_init_xnu_globals(void)
700 const char *seg
= KASAN_GLOBAL_SEGNAME
;
701 const char *sect
= KASAN_GLOBAL_SECTNAME
;
705 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)&_mh_execute_header
;
708 printf("KASan: failed to find kernel mach header\n");
709 printf("KASan: redzones for globals not poisoned\n");
713 globals
= (vm_offset_t
)getsectdatafromheader(header
, seg
, sect
, &_size
);
715 printf("KASan: failed to find segment %s section %s\n", seg
, sect
);
716 printf("KASan: redzones for globals not poisoned\n");
719 size
= (vm_size_t
)_size
;
721 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg
, sect
, globals
, size
);
722 printf("KASan: poisoning redzone for %lu globals\n", size
/ sizeof(struct asan_global
));
724 kasan_init_globals(globals
, size
);
728 kasan_late_init(void)
730 #if KASAN_DYNAMIC_BLACKLIST
734 kasan_init_fakestack();
735 kasan_init_xnu_globals();
739 kasan_notify_stolen(vm_offset_t top
)
741 kasan_map_shadow(kernel_vtop
, top
- kernel_vtop
, false);
745 kasan_debug_touch_mappings(vm_offset_t base
, vm_size_t sz
)
751 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
752 for (i
= 0; i
< sz
; i
+= sizeof(uint64_t)) {
753 vm_offset_t addr
= base
+ i
;
754 uint8_t *x
= SHADOW_FOR_ADDRESS(addr
);
756 asm volatile("" ::: "memory");
758 asm volatile("" ::: "memory");
759 assert(tmp1
== tmp2
);
772 simple_lock_init(&kasan_vm_lock
, 0);
774 /* Map all of the kernel text and data */
775 kasan_map_shadow(kernel_vbase
, kernel_vtop
- kernel_vbase
, false);
780 * handle KASan boot-args
783 if (PE_parse_boot_argn("kasan.checks", &arg
, sizeof(arg
))) {
784 enabled_checks
= arg
;
787 if (PE_parse_boot_argn("kasan", &arg
, sizeof(arg
))) {
788 if (arg
& KASAN_ARGS_FAKESTACK
) {
789 fakestack_enabled
= 1;
791 if (arg
& KASAN_ARGS_REPORTIGNORED
) {
794 if (arg
& KASAN_ARGS_NODYCHECKS
) {
795 enabled_checks
&= ~TYPE_DYNAMIC
;
797 if (arg
& KASAN_ARGS_NOPOISON_HEAP
) {
798 enabled_checks
&= ~TYPE_POISON_HEAP
;
800 if (arg
& KASAN_ARGS_NOPOISON_GLOBAL
) {
801 enabled_checks
&= ~TYPE_POISON_GLOBAL
;
803 if (arg
& KASAN_ARGS_CHECK_LEAKS
) {
804 enabled_checks
|= TYPE_LEAK
;
808 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg
, sizeof(arg
))) {
812 if (PE_parse_boot_argn("kasan.leak_threshold", &arg
, sizeof(arg
))) {
813 leak_threshold
= arg
;
816 if (PE_parse_boot_argn("kasan.leak_fatal_threshold", &arg
, sizeof(arg
))) {
817 leak_fatal_threshold
= arg
;
820 /* kasan.bl boot-arg handled in kasan_init_dybl() */
822 quarantine_enabled
= 1;
827 kasan_notify_address_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
)
829 assert(address
< VM_MAX_KERNEL_ADDRESS
);
831 if (!kasan_enabled
) {
835 if (address
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
836 /* only map kernel addresses */
847 kasan_map_shadow(address
, size
, is_zero
);
849 kasan_debug_touch_mappings(address
, size
);
853 kasan_notify_address(vm_offset_t address
, vm_size_t size
)
855 kasan_notify_address_internal(address
, size
, false);
859 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
862 kasan_notify_address_nopoison(vm_offset_t address
, vm_size_t size
)
864 kasan_notify_address_internal(address
, size
, true);
873 struct kasan_alloc_header
{
879 uint32_t left_rz
: 32 - BACKTRACE_BITS
;
880 uint32_t frames
: BACKTRACE_BITS
;
883 _Static_assert(sizeof(struct kasan_alloc_header
) <= KASAN_GUARD_SIZE
, "kasan alloc header exceeds guard size");
885 struct kasan_alloc_footer
{
886 uint32_t backtrace
[0];
888 _Static_assert(sizeof(struct kasan_alloc_footer
) <= KASAN_GUARD_SIZE
, "kasan alloc footer exceeds guard size");
890 #define LIVE_XOR ((uint16_t)0x3a65)
891 #define FREE_XOR ((uint16_t)0xf233)
894 magic_for_addr(vm_offset_t addr
, uint16_t magic_xor
)
896 uint16_t magic
= addr
& 0xFFFF;
897 magic
^= (addr
>> 16) & 0xFFFF;
898 magic
^= (addr
>> 32) & 0xFFFF;
899 magic
^= (addr
>> 48) & 0xFFFF;
904 static struct kasan_alloc_header
*
905 header_for_user_addr(vm_offset_t addr
)
907 return (void *)(addr
- sizeof(struct kasan_alloc_header
));
910 static struct kasan_alloc_footer
*
911 footer_for_user_addr(vm_offset_t addr
, vm_size_t
*size
)
913 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
914 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
916 return (void *)(addr
+ h
->user_size
);
920 * size: user-requested allocation size
921 * ret: minimum size for the real allocation
924 kasan_alloc_resize(vm_size_t size
)
927 if (os_add_overflow(size
, 4 * PAGE_SIZE
, &tmp
)) {
928 panic("allocation size overflow (%lu)", size
);
931 /* add left and right redzones */
932 size
+= KASAN_GUARD_PAD
;
934 /* ensure the final allocation is an 8-byte multiple */
935 size
+= 8 - (size
% 8);
940 extern vm_offset_t vm_kernel_slid_base
;
943 kasan_alloc_bt(uint32_t *ptr
, vm_size_t sz
, vm_size_t skip
)
945 uintptr_t buf
[BACKTRACE_MAXFRAMES
];
948 sz
/= sizeof(uint32_t);
949 vm_size_t frames
= sz
;
952 frames
= min(frames
+ skip
, BACKTRACE_MAXFRAMES
);
953 frames
= backtrace(bt
, frames
, NULL
);
955 while (frames
> sz
&& skip
> 0) {
961 /* only store the offset from kernel base, and cram that into 32
963 for (vm_size_t i
= 0; i
< frames
; i
++) {
964 ptr
[i
] = (uint32_t)(bt
[i
] - vm_kernel_slid_base
);
970 /* addr: user address of allocation */
972 kasan_alloc_crc(vm_offset_t addr
)
974 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
975 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
977 uint16_t crc_orig
= h
->crc
;
981 crc
= __nosan_crc16(crc
, (void *)(addr
- h
->left_rz
), h
->left_rz
);
982 crc
= __nosan_crc16(crc
, (void *)(addr
+ h
->user_size
), rightrz
);
990 kasan_alloc_retrieve_bt(vm_address_t addr
, uintptr_t frames
[static BACKTRACE_MAXFRAMES
])
992 vm_size_t num_frames
= 0;
993 uptr shadow
= (uptr
)SHADOW_FOR_ADDRESS(addr
);
994 uptr max_search
= shadow
- 4096;
995 vm_address_t alloc_base
= 0;
998 /* walk the shadow backwards to find the allocation base */
999 while (shadow
>= max_search
) {
1000 if (*(uint8_t *)shadow
== ASAN_HEAP_LEFT_RZ
) {
1001 alloc_base
= ADDRESS_FOR_SHADOW(shadow
) + 8;
1008 struct kasan_alloc_header
*header
= header_for_user_addr(alloc_base
);
1009 if (magic_for_addr(alloc_base
, LIVE_XOR
) == header
->magic
) {
1010 struct kasan_alloc_footer
*footer
= footer_for_user_addr(alloc_base
, &fsize
);
1011 if ((fsize
/sizeof(footer
->backtrace
[0])) >= header
->frames
) {
1012 num_frames
= header
->frames
;
1013 for (size_t i
= 0; i
< num_frames
; i
++) {
1014 frames
[i
] = footer
->backtrace
[i
] + vm_kernel_slid_base
;
1024 * addr: base address of full allocation (including redzones)
1025 * size: total size of allocation (include redzones)
1026 * req: user-requested allocation size
1027 * lrz: size of the left redzone in bytes
1028 * ret: address of usable allocation
1031 kasan_alloc(vm_offset_t addr
, vm_size_t size
, vm_size_t req
, vm_size_t leftrz
)
1037 assert((addr
% 8) == 0);
1038 assert((size
% 8) == 0);
1040 vm_size_t rightrz
= size
- req
- leftrz
;
1042 kasan_poison(addr
, req
, leftrz
, rightrz
, ASAN_HEAP_RZ
);
1043 kasan_rz_clobber(addr
, req
, leftrz
, rightrz
);
1047 if (enabled_checks
& TYPE_LEAK
) {
1048 __nosan_memset((void *)addr
, KASAN_UNINITIALIZED_HEAP
, req
);
1051 /* stash the allocation sizes in the left redzone */
1052 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1053 h
->magic
= magic_for_addr(addr
, LIVE_XOR
);
1054 h
->left_rz
= leftrz
;
1055 h
->alloc_size
= size
;
1058 /* ... and a backtrace in the right redzone */
1060 struct kasan_alloc_footer
*f
= footer_for_user_addr(addr
, &fsize
);
1061 h
->frames
= kasan_alloc_bt(f
->backtrace
, fsize
, 2);
1063 /* checksum the whole object, minus the user part */
1064 h
->crc
= kasan_alloc_crc(addr
);
1070 * addr: user pointer
1071 * size: returns full original allocation size
1072 * ret: original allocation ptr
1075 kasan_dealloc(vm_offset_t addr
, vm_size_t
*size
)
1077 assert(size
&& addr
);
1078 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1079 *size
= h
->alloc_size
;
1080 return addr
- h
->left_rz
;
1084 * return the original user-requested allocation size
1085 * addr: user alloc pointer
1088 kasan_user_size(vm_offset_t addr
)
1090 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1091 assert(h
->magic
== magic_for_addr(addr
, LIVE_XOR
));
1092 return h
->user_size
;
1096 * Verify that `addr' (user pointer) is a valid allocation of `type'
1099 kasan_check_free(vm_offset_t addr
, vm_size_t size
, unsigned heap_type
)
1101 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
1103 /* map heap type to an internal access type */
1104 access_t type
= heap_type
== KASAN_HEAP_KALLOC
? TYPE_KFREE
:
1105 heap_type
== KASAN_HEAP_ZALLOC
? TYPE_ZFREE
:
1106 heap_type
== KASAN_HEAP_FAKESTACK
? TYPE_FSFREE
: 0;
1108 /* check the magic and crc match */
1109 if (h
->magic
!= magic_for_addr(addr
, LIVE_XOR
)) {
1110 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
1112 if (h
->crc
!= kasan_alloc_crc(addr
)) {
1113 kasan_violation(addr
, size
, type
, REASON_MOD_OOB
);
1116 /* check the freed size matches what we recorded at alloc time */
1117 if (h
->user_size
!= size
) {
1118 kasan_violation(addr
, size
, type
, REASON_INVALID_SIZE
);
1121 vm_size_t rightrz_sz
= h
->alloc_size
- h
->left_rz
- h
->user_size
;
1123 /* Check that the redzones are valid */
1124 if (!kasan_check_shadow(addr
- h
->left_rz
, h
->left_rz
, ASAN_HEAP_LEFT_RZ
) ||
1125 !kasan_check_shadow(addr
+ h
->user_size
, rightrz_sz
, ASAN_HEAP_RIGHT_RZ
)) {
1126 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
1129 /* Check the allocated range is not poisoned */
1130 kasan_check_range((void *)addr
, size
, type
);
1139 struct freelist_entry
{
1142 STAILQ_ENTRY(freelist_entry
) list
;
1145 vm_size_t size
: 28;
1146 vm_size_t user_size
: 28;
1147 vm_size_t frames
: BACKTRACE_BITS
; /* number of frames in backtrace */
1148 vm_size_t __unused
: 8 - BACKTRACE_BITS
;
1153 uint32_t backtrace
[];
1155 _Static_assert(sizeof(struct freelist_entry
) <= KASAN_GUARD_PAD
, "kasan freelist header exceeds padded size");
1158 STAILQ_HEAD(freelist_head
, freelist_entry
) freelist
;
1159 unsigned long entries
;
1160 unsigned long max_entries
;
1165 struct quarantine quarantines
[] = {
1166 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_ZALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1167 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_KALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1168 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_FAKESTACK
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
}
1172 fle_crc(struct freelist_entry
*fle
)
1174 return __nosan_crc16(0, &fle
->bits
, fle
->size
- offsetof(struct freelist_entry
, bits
));
1178 * addr, sizep: pointer/size of full allocation including redzone
1181 kasan_free_internal(void **addrp
, vm_size_t
*sizep
, int type
,
1182 zone_t
*zone
, vm_size_t user_size
, int locked
,
1185 vm_size_t size
= *sizep
;
1186 vm_offset_t addr
= *(vm_offset_t
*)addrp
;
1188 assert(type
>= 0 && type
< KASAN_HEAP_TYPES
);
1189 if (type
== KASAN_HEAP_KALLOC
) {
1190 /* zero-size kalloc allocations are allowed */
1192 } else if (type
== KASAN_HEAP_ZALLOC
) {
1193 assert(zone
&& user_size
);
1194 } else if (type
== KASAN_HEAP_FAKESTACK
) {
1195 assert(zone
&& user_size
);
1198 /* clobber the entire freed region */
1199 kasan_rz_clobber(addr
, 0, size
, 0);
1201 if (!doquarantine
|| !quarantine_enabled
) {
1205 /* poison the entire freed region */
1206 uint8_t flags
= (type
== KASAN_HEAP_FAKESTACK
) ? ASAN_STACK_FREED
: ASAN_HEAP_FREED
;
1207 kasan_poison(addr
, 0, size
, 0, flags
);
1209 struct freelist_entry
*fle
, *tofree
= NULL
;
1210 struct quarantine
*q
= &quarantines
[type
];
1211 assert(size
>= sizeof(struct freelist_entry
));
1213 /* create a new freelist entry */
1214 fle
= (struct freelist_entry
*)addr
;
1215 fle
->magic
= magic_for_addr((vm_offset_t
)fle
, FREE_XOR
);
1217 fle
->user_size
= user_size
;
1219 fle
->zone
= ZONE_NULL
;
1223 if (type
!= KASAN_HEAP_FAKESTACK
) {
1224 /* don't do expensive things on the fakestack path */
1225 fle
->frames
= kasan_alloc_bt(fle
->backtrace
, fle
->size
- sizeof(struct freelist_entry
), 3);
1226 fle
->crc
= fle_crc(fle
);
1234 if (q
->size
+ size
> q
->max_size
) {
1236 * Adding this entry would put us over the max quarantine size. Free the
1237 * larger of the current object and the quarantine head object.
1239 tofree
= STAILQ_FIRST(&q
->freelist
);
1240 if (fle
->size
> tofree
->size
) {
1241 goto free_current_locked
;
1245 STAILQ_INSERT_TAIL(&q
->freelist
, fle
, list
);
1249 /* free the oldest entry, if necessary */
1250 if (tofree
|| q
->entries
> q
->max_entries
) {
1251 tofree
= STAILQ_FIRST(&q
->freelist
);
1252 STAILQ_REMOVE_HEAD(&q
->freelist
, list
);
1254 assert(q
->entries
> 0 && q
->size
>= tofree
->size
);
1256 q
->size
-= tofree
->size
;
1258 if (type
!= KASAN_HEAP_KALLOC
) {
1259 assert((vm_offset_t
)zone
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
&&
1260 (vm_offset_t
)zone
<= VM_MAX_KERNEL_ADDRESS
);
1261 *zone
= tofree
->zone
;
1264 size
= tofree
->size
;
1265 addr
= (vm_offset_t
)tofree
;
1267 /* check the magic and crc match */
1268 if (tofree
->magic
!= magic_for_addr(addr
, FREE_XOR
)) {
1269 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1271 if (type
!= KASAN_HEAP_FAKESTACK
&& tofree
->crc
!= fle_crc(tofree
)) {
1272 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1275 /* clobber the quarantine header */
1276 __nosan_bzero((void *)addr
, sizeof(struct freelist_entry
));
1279 /* quarantine is not full - don't really free anything */
1283 free_current_locked
:
1289 *addrp
= (void *)addr
;
1291 kasan_unpoison((void *)addr
, size
);
1297 kasan_free(void **addrp
, vm_size_t
*sizep
, int type
, zone_t
*zone
,
1298 vm_size_t user_size
, bool quarantine
)
1300 kasan_free_internal(addrp
, sizep
, type
, zone
, user_size
, 0, quarantine
);
1303 thread_yield_internal(free_yield
);
1308 __asan_load_cxx_array_cookie(uptr
*p
)
1310 uint8_t *shadow
= SHADOW_FOR_ADDRESS((uptr
)p
);
1311 if (*shadow
== ASAN_ARRAY_COOKIE
) {
1313 } else if (*shadow
== ASAN_HEAP_FREED
) {
1321 __asan_poison_cxx_array_cookie(uptr p
)
1323 uint8_t *shadow
= SHADOW_FOR_ADDRESS(p
);
1324 *shadow
= ASAN_ARRAY_COOKIE
;
1328 * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
1329 * lives relative to the start of the buffer, but it's always the word immediately
1330 * before the start of the array data, so for naturally-aligned objects we need to
1331 * search at most 2 shadow bytes.
1334 kasan_unpoison_cxx_array_cookie(void *ptr
)
1336 uint8_t *shadow
= SHADOW_FOR_ADDRESS((uptr
)ptr
);
1337 for (size_t i
= 0; i
< 2; i
++) {
1338 if (shadow
[i
] == ASAN_ARRAY_COOKIE
) {
1339 shadow
[i
] = ASAN_VALID
;
1341 } else if (shadow
[i
] != ASAN_VALID
) {
1342 /* must have seen the cookie by now */
1348 #define ACCESS_CHECK_DECLARE(type, sz, access) \
1349 void __asan_##type##sz(uptr addr) { \
1350 kasan_check_range((const void *)addr, sz, access); \
1352 void OS_NORETURN UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
1354 ACCESS_CHECK_DECLARE(load
, 1, TYPE_LOAD
);
1355 ACCESS_CHECK_DECLARE(load
, 2, TYPE_LOAD
);
1356 ACCESS_CHECK_DECLARE(load
, 4, TYPE_LOAD
);
1357 ACCESS_CHECK_DECLARE(load
, 8, TYPE_LOAD
);
1358 ACCESS_CHECK_DECLARE(load
, 16, TYPE_LOAD
);
1359 ACCESS_CHECK_DECLARE(store
, 1, TYPE_STORE
);
1360 ACCESS_CHECK_DECLARE(store
, 2, TYPE_STORE
);
1361 ACCESS_CHECK_DECLARE(store
, 4, TYPE_STORE
);
1362 ACCESS_CHECK_DECLARE(store
, 8, TYPE_STORE
);
1363 ACCESS_CHECK_DECLARE(store
, 16, TYPE_STORE
);
1366 __asan_loadN(uptr addr
, size_t sz
)
1368 kasan_check_range((const void *)addr
, sz
, TYPE_LOAD
);
1372 __asan_storeN(uptr addr
, size_t sz
)
1374 kasan_check_range((const void *)addr
, sz
, TYPE_STORE
);
1378 kasan_set_shadow(uptr addr
, size_t sz
, uint8_t val
)
1380 __nosan_memset((void *)addr
, val
, sz
);
1383 #define SET_SHADOW_DECLARE(val) \
1384 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1385 kasan_set_shadow(addr, sz, 0x##val); \
1388 SET_SHADOW_DECLARE(00)
1389 SET_SHADOW_DECLARE(f1
)
1390 SET_SHADOW_DECLARE(f2
)
1391 SET_SHADOW_DECLARE(f3
)
1392 SET_SHADOW_DECLARE(f5
)
1393 SET_SHADOW_DECLARE(f8
)
1397 * Call 'cb' for each contiguous range of the shadow map. This could be more
1398 * efficient by walking the page table directly.
1401 kasan_traverse_mappings(pmap_traverse_callback cb
, void *ctx
)
1403 uintptr_t shadow_base
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
1404 uintptr_t shadow_top
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS
);
1405 shadow_base
= vm_map_trunc_page(shadow_base
, HW_PAGE_MASK
);
1406 shadow_top
= vm_map_round_page(shadow_top
, HW_PAGE_MASK
);
1408 uintptr_t start
= 0, end
= 0;
1410 for (uintptr_t addr
= shadow_base
; addr
< shadow_top
; addr
+= HW_PAGE_SIZE
) {
1411 if (kasan_is_shadow_mapped(addr
)) {
1415 end
= addr
+ HW_PAGE_SIZE
;
1416 } else if (start
&& end
) {
1417 cb(start
, end
, ctx
);
1423 cb(start
, end
, ctx
);
1430 * XXX: implement these
1433 UNUSED_ABI(__asan_alloca_poison
, uptr addr
, uptr size
);
1434 UNUSED_ABI(__asan_allocas_unpoison
, uptr top
, uptr bottom
);
1435 UNUSED_ABI(__sanitizer_ptr_sub
, uptr a
, uptr b
);
1436 UNUSED_ABI(__sanitizer_ptr_cmp
, uptr a
, uptr b
);
1437 UNUSED_ABI(__sanitizer_annotate_contiguous_container
, const void *a
, const void *b
, const void *c
, const void *d
);
1438 UNUSED_ABI(__asan_poison_stack_memory
, uptr addr
, size_t size
);
1439 UNUSED_ABI(__asan_unpoison_stack_memory
, uptr a
, uptr b
);
1442 * Miscellaneous unimplemented asan ABI
1445 UNUSED_ABI(__asan_init
, void);
1446 UNUSED_ABI(__asan_register_image_globals
, uptr a
);
1447 UNUSED_ABI(__asan_unregister_image_globals
, uptr a
);
1448 UNUSED_ABI(__asan_before_dynamic_init
, uptr a
);
1449 UNUSED_ABI(__asan_after_dynamic_init
, void);
1450 UNUSED_ABI(__asan_version_mismatch_check_v8
, void);
1451 UNUSED_ABI(__asan_version_mismatch_check_apple_802
, void);
1452 UNUSED_ABI(__asan_version_mismatch_check_apple_900
, void);
1453 UNUSED_ABI(__asan_version_mismatch_check_apple_902
, void);
1454 UNUSED_ABI(__asan_version_mismatch_check_apple_1000
, void);
1455 UNUSED_ABI(__asan_version_mismatch_check_apple_1001
, void);
1456 UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100
, void);
1458 void OS_NORETURN
UNSUPPORTED_API(__asan_init_v5
, void);
1459 void OS_NORETURN
UNSUPPORTED_API(__asan_register_globals
, uptr a
, uptr b
);
1460 void OS_NORETURN
UNSUPPORTED_API(__asan_unregister_globals
, uptr a
, uptr b
);
1461 void OS_NORETURN
UNSUPPORTED_API(__asan_register_elf_globals
, uptr a
, uptr b
, uptr c
);
1462 void OS_NORETURN
UNSUPPORTED_API(__asan_unregister_elf_globals
, uptr a
, uptr b
, uptr c
);
1464 void OS_NORETURN
UNSUPPORTED_API(__asan_exp_loadN
, uptr addr
, size_t sz
, int32_t e
);
1465 void OS_NORETURN
UNSUPPORTED_API(__asan_exp_storeN
, uptr addr
, size_t sz
, int32_t e
);
1466 void OS_NORETURN
UNSUPPORTED_API(__asan_report_exp_load_n
, uptr addr
, unsigned long b
, int32_t c
);
1467 void OS_NORETURN
UNSUPPORTED_API(__asan_report_exp_store_n
, uptr addr
, unsigned long b
, int32_t c
);
1476 sysctl_kasan_test(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int arg2
, struct sysctl_req
*req
)
1481 err
= sysctl_io_number(req
, 0, sizeof(int), &mask
, &ch
);
1484 kasan_test(mask
, arg2
);
1491 sysctl_fakestack_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int __unused arg2
, struct sysctl_req
*req
)
1495 err
= sysctl_io_number(req
, fakestack_enabled
, sizeof(fakestack_enabled
), &val
, &ch
);
1496 if (err
== 0 && ch
) {
1497 fakestack_enabled
= !!val
;
1498 __asan_option_detect_stack_use_after_return
= !!val
;
1505 SYSCTL_NODE(_kern
, OID_AUTO
, kasan
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
1507 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, available
, CTLFLAG_RD
, NULL
, KASAN
, "");
1508 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, enabled
, CTLFLAG_RD
, &kasan_enabled
, 0, "");
1509 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, checks
, CTLFLAG_RW
, &enabled_checks
, 0, "");
1510 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, quarantine
, CTLFLAG_RW
, &quarantine_enabled
, 0, "");
1511 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, report_ignored
, CTLFLAG_RW
, &report_ignored
, 0, "");
1512 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, free_yield_ms
, CTLFLAG_RW
, &free_yield
, 0, "");
1513 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, leak_threshold
, CTLFLAG_RW
, &leak_threshold
, 0, "");
1514 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, leak_fatal_threshold
, CTLFLAG_RW
, &leak_fatal_threshold
, 0, "");
1515 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memused
, CTLFLAG_RD
, &shadow_pages_used
, 0, "");
1516 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memtotal
, CTLFLAG_RD
, &shadow_pages_total
, 0, "");
1517 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, kexts
, CTLFLAG_RD
, &kexts_loaded
, 0, "");
1518 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, debug
, CTLFLAG_RD
, NULL
, KASAN_DEBUG
, "");
1519 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, zalloc
, CTLFLAG_RD
, NULL
, KASAN_ZALLOC
, "");
1520 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, kalloc
, CTLFLAG_RD
, NULL
, KASAN_KALLOC
, "");
1521 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, dynamicbl
, CTLFLAG_RD
, NULL
, KASAN_DYNAMIC_BLACKLIST
, "");
1523 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fakestack
,
1524 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1525 0, 0, sysctl_fakestack_enable
, "I", "");
1527 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, test
,
1528 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1529 0, 0, sysctl_kasan_test
, "I", "");
1531 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fail
,
1532 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1533 0, 1, sysctl_kasan_test
, "I", "");