2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <kern/kalloc.h>
41 #include <kern/zalloc.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <libkern/libkern.h>
47 #include <libkern/OSAtomic.h>
48 #include <libkern/kernel_mach_header.h>
49 #include <sys/queue.h>
50 #include <sys/sysctl.h>
51 #include <kern/thread.h>
52 #include <machine/atomic.h>
55 #include <kasan_internal.h>
56 #include <memintrinsics.h>
58 const uintptr_t __asan_shadow_memory_dynamic_address
= KASAN_SHIFT
;
60 static unsigned kexts_loaded
;
61 unsigned shadow_pages_total
;
62 unsigned shadow_pages_used
;
64 vm_offset_t kernel_vbase
;
65 vm_offset_t kernel_vtop
;
67 static unsigned kasan_enabled
;
68 static unsigned quarantine_enabled
;
69 static unsigned enabled_checks
= TYPE_ALL
; /* bitmask of enabled checks */
70 static unsigned report_ignored
; /* issue non-fatal report for disabled/blacklisted checks */
71 static unsigned free_yield
= 0; /* ms yield after each free */
74 static void kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
75 static void kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
77 /* imported osfmk functions */
78 extern vm_offset_t
ml_stack_base(void);
79 extern vm_size_t
ml_stack_size(void);
82 * unused: expected to be called, but (currently) does nothing
84 #define UNUSED_ABI(func, ...) \
85 _Pragma("clang diagnostic push") \
86 _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
87 void func(__VA_ARGS__); \
88 void func(__VA_ARGS__) {}; \
89 _Pragma("clang diagnostic pop") \
91 static const size_t BACKTRACE_BITS = 4;
92 static const size_t BACKTRACE_MAXFRAMES
= (1UL << BACKTRACE_BITS
) - 1;
94 decl_simple_lock_data(, kasan_vm_lock
);
95 static thread_t kasan_lock_holder
;
98 * kasan is called from the interrupt path, so we need to disable interrupts to
99 * ensure atomicity manipulating the global objects
102 kasan_lock(boolean_t
*b
)
104 *b
= ml_set_interrupts_enabled(false);
105 simple_lock(&kasan_vm_lock
, LCK_GRP_NULL
);
106 kasan_lock_holder
= current_thread();
110 kasan_unlock(boolean_t b
)
112 kasan_lock_holder
= THREAD_NULL
;
113 simple_unlock(&kasan_vm_lock
);
114 ml_set_interrupts_enabled(b
);
117 /* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
120 kasan_lock_held(thread_t thread
)
122 return thread
&& thread
== kasan_lock_holder
;
126 kasan_check_enabled(access_t access
)
128 return kasan_enabled
&& (enabled_checks
& access
) && !kasan_is_blacklisted(access
);
132 kasan_poison_active(uint8_t flags
)
136 return kasan_check_enabled(TYPE_POISON_GLOBAL
);
138 case ASAN_HEAP_LEFT_RZ
:
139 case ASAN_HEAP_RIGHT_RZ
:
140 case ASAN_HEAP_FREED
:
141 return kasan_check_enabled(TYPE_POISON_HEAP
);
148 * poison redzones in the shadow map
151 kasan_poison(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
, uint8_t flags
)
153 uint8_t *shadow
= SHADOW_FOR_ADDRESS(base
);
154 uint8_t partial
= size
& 0x07;
155 vm_size_t total
= leftrz
+ size
+ rightrz
;
158 /* base must be 8-byte aligned */
159 /* any left redzone must be a multiple of 8 */
160 /* total region must cover 8-byte multiple */
161 assert((base
& 0x07) == 0);
162 assert((leftrz
& 0x07) == 0);
163 assert((total
& 0x07) == 0);
165 if (!kasan_enabled
|| !kasan_poison_active(flags
)) {
173 uint8_t l_flags
= flags
;
174 uint8_t r_flags
= flags
;
176 if (flags
== ASAN_STACK_RZ
) {
177 l_flags
= ASAN_STACK_LEFT_RZ
;
178 r_flags
= ASAN_STACK_RIGHT_RZ
;
179 } else if (flags
== ASAN_HEAP_RZ
) {
180 l_flags
= ASAN_HEAP_LEFT_RZ
;
181 r_flags
= ASAN_HEAP_RIGHT_RZ
;
185 * poison the redzones and unpoison the valid bytes
187 for (; i
< leftrz
; i
++) {
190 for (; i
< leftrz
+ size
; i
++) {
191 shadow
[i
] = ASAN_VALID
; /* XXX: should not be necessary */
193 if (partial
&& (i
< total
)) {
197 for (; i
< total
; i
++) {
203 kasan_poison_range(vm_offset_t base
, vm_size_t size
, uint8_t flags
)
205 /* base must be 8-byte aligned */
206 /* total region must cover 8-byte multiple */
207 assert((base
& 0x07) == 0);
208 assert((size
& 0x07) == 0);
209 kasan_poison(base
, 0, 0, size
, flags
);
213 kasan_unpoison(void *base
, vm_size_t size
)
215 kasan_poison((vm_offset_t
)base
, size
, 0, 0, 0);
219 kasan_unpoison_stack(vm_offset_t base
, vm_size_t size
)
224 /* align base and size to 8 bytes */
225 vm_offset_t align
= base
& 0x7;
228 size
= (size
+ 7) & ~0x7;
230 kasan_unpoison((void *)base
, size
);
234 * write junk into the redzones
237 kasan_rz_clobber(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
)
241 const uint8_t deadbeef
[] = { 0xde, 0xad, 0xbe, 0xef };
242 const uint8_t c0ffee
[] = { 0xc0, 0xff, 0xee, 0xc0 };
243 uint8_t *buf
= (uint8_t *)base
;
245 /* base must be 8-byte aligned */
246 /* any left redzone must be a multiple of 8 */
247 /* total region must cover 8-byte multiple */
248 assert((base
& 0x07) == 0);
249 assert((leftrz
& 0x07) == 0);
250 assert(((size
+ leftrz
+ rightrz
) & 0x07) == 0);
252 for (i
= 0; i
< leftrz
; i
++) {
253 buf
[i
] = deadbeef
[i
% 4];
256 for (i
= 0; i
< rightrz
; i
++) {
257 buf
[i
+ size
+ leftrz
] = c0ffee
[i
% 4];
268 * Report a violation that may be disabled and/or blacklisted. This can only be
269 * called for dynamic checks (i.e. where the fault is recoverable). Use
270 * kasan_crash_report() for static (unrecoverable) violations.
272 * access: what we were trying to do when the violation occured
273 * reason: what failed about the access
276 kasan_violation(uintptr_t addr
, size_t size
, access_t access
, violation_t reason
)
278 assert(__builtin_popcount(access
) == 1);
279 if (!kasan_check_enabled(access
)) {
280 if (report_ignored
) {
281 kasan_log_report(addr
, size
, access
, reason
);
285 kasan_crash_report(addr
, size
, access
, reason
);
289 kasan_check_range(const void *x
, size_t sz
, access_t access
)
292 uintptr_t ptr
= (uintptr_t)x
;
293 if (kasan_range_poisoned(ptr
, sz
, &invalid
)) {
294 size_t remaining
= sz
- (invalid
- ptr
);
295 kasan_violation(invalid
, remaining
, access
, 0);
300 * Return true if [base, base+sz) is unpoisoned or has given shadow value.
303 kasan_check_shadow(vm_address_t base
, vm_size_t sz
, uint8_t shadow
)
305 sz
-= 8 - (base
% 8);
306 base
+= 8 - (base
% 8);
308 vm_address_t end
= base
+ sz
;
311 uint8_t *sh
= SHADOW_FOR_ADDRESS(base
);
312 if (*sh
&& *sh
!= shadow
) {
322 * KASAN violation reporting
327 access_str(access_t type
)
329 if (type
& TYPE_READ
) {
331 } else if (type
& TYPE_WRITE
) {
333 } else if (type
& TYPE_FREE
) {
340 static const char *shadow_strings
[] = {
341 [ASAN_VALID
] = "VALID",
342 [ASAN_PARTIAL1
] = "PARTIAL1",
343 [ASAN_PARTIAL2
] = "PARTIAL2",
344 [ASAN_PARTIAL3
] = "PARTIAL3",
345 [ASAN_PARTIAL4
] = "PARTIAL4",
346 [ASAN_PARTIAL5
] = "PARTIAL5",
347 [ASAN_PARTIAL6
] = "PARTIAL6",
348 [ASAN_PARTIAL7
] = "PARTIAL7",
349 [ASAN_STACK_LEFT_RZ
] = "STACK_LEFT_RZ",
350 [ASAN_STACK_MID_RZ
] = "STACK_MID_RZ",
351 [ASAN_STACK_RIGHT_RZ
] = "STACK_RIGHT_RZ",
352 [ASAN_STACK_FREED
] = "STACK_FREED",
353 [ASAN_STACK_OOSCOPE
] = "STACK_OOSCOPE",
354 [ASAN_GLOBAL_RZ
] = "GLOBAL_RZ",
355 [ASAN_HEAP_LEFT_RZ
] = "HEAP_LEFT_RZ",
356 [ASAN_HEAP_RIGHT_RZ
] = "HEAP_RIGHT_RZ",
357 [ASAN_HEAP_FREED
] = "HEAP_FREED",
361 #define CRASH_CONTEXT_BEFORE 5
362 #define CRASH_CONTEXT_AFTER 5
365 kasan_shadow_crashlog(uptr p
, char *buf
, size_t len
)
369 int before
= CRASH_CONTEXT_BEFORE
;
370 int after
= CRASH_CONTEXT_AFTER
;
372 uptr shadow
= (uptr
)SHADOW_FOR_ADDRESS(p
);
373 uptr shadow_p
= shadow
;
374 uptr shadow_page
= vm_map_round_page(shadow_p
, HW_PAGE_MASK
);
376 /* rewind to start of context block */
377 shadow
&= ~((uptr
)0xf);
378 shadow
-= 16 * before
;
380 n
+= snprintf(buf
+n
, len
-n
,
381 " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
383 for (i
= 0; i
< 1 + before
+ after
; i
++, shadow
+= 16) {
384 if ((vm_map_round_page(shadow
, HW_PAGE_MASK
) != shadow_page
) && !kasan_is_shadow_mapped(shadow
)) {
385 /* avoid unmapped shadow when crossing page boundaries */
389 n
+= snprintf(buf
+n
, len
-n
, " %16lx:", shadow
);
394 for (j
= 0; j
< 16; j
++) {
395 uint8_t *x
= (uint8_t *)(shadow
+ j
);
398 if ((uptr
)x
== shadow_p
) {
401 } else if ((uptr
)(x
+ 1) == shadow_p
) {
405 n
+= snprintf(buf
+n
, len
-n
, "%s%02x%s", left
, (unsigned)*x
, right
);
408 n
+= snprintf(buf
+n
, len
-n
, "\n");
411 n
+= snprintf(buf
+n
, len
-n
, "\n");
416 kasan_report_internal(uptr p
, uptr width
, access_t access
, violation_t reason
, bool dopanic
)
418 const size_t len
= 4096;
419 static char buf
[len
];
422 uint8_t *shadow_ptr
= SHADOW_FOR_ADDRESS(p
);
423 uint8_t shadow_type
= *shadow_ptr
;
424 const char *shadow_str
= shadow_strings
[shadow_type
];
426 shadow_str
= "<invalid>";
430 if (reason
== REASON_MOD_OOB
|| reason
== REASON_BAD_METADATA
) {
431 n
+= snprintf(buf
+n
, len
-n
, "KASan: free of corrupted/invalid object %#lx\n", p
);
432 } else if (reason
== REASON_MOD_AFTER_FREE
) {
433 n
+= snprintf(buf
+n
, len
-n
, "KASan: UaF of quarantined object %#lx\n", p
);
435 n
+= snprintf(buf
+n
, len
-n
, "KASan: invalid %lu-byte %s %#lx [%s]\n",
436 width
, access_str(access
), p
, shadow_str
);
438 n
+= kasan_shadow_crashlog(p
, buf
+n
, len
-n
);
447 static void NOINLINE OS_NORETURN
448 kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
451 kasan_report_internal(p
, width
, access
, reason
, true);
452 __builtin_unreachable(); /* we cant handle this returning anyway */
456 kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
458 const size_t len
= 256;
461 uint32_t nframes
= 14;
462 uintptr_t frames
[nframes
];
463 uintptr_t *bt
= frames
;
465 kasan_report_internal(p
, width
, access
, reason
, false);
471 nframes
= backtrace_frame(bt
, nframes
, __builtin_frame_address(0)); /* ignore current frame */
474 l
+= snprintf(buf
+l
, len
-l
, "Backtrace: ");
475 for (uint32_t i
= 0; i
< nframes
; i
++) {
476 l
+= snprintf(buf
+l
, len
-l
, "%lx,", VM_KERNEL_UNSLIDE(bt
[i
]));
478 l
+= snprintf(buf
+l
, len
-l
, "\n");
483 #define REPORT_DECLARE(n) \
484 void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
485 void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
486 void UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
487 void UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
495 void OS_NORETURN
__asan_report_load_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_LOAD
, 0); }
496 void OS_NORETURN
__asan_report_store_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_STORE
, 0); }
498 /* unpoison the current stack */
500 kasan_unpoison_curstack(bool whole_stack
)
502 uintptr_t base
= ml_stack_base();
503 size_t sz
= ml_stack_size();
504 uintptr_t cur
= (uintptr_t)&base
;
510 if (cur
>= base
&& cur
< base
+ sz
) {
511 /* unpoison from current stack depth to the top */
512 size_t unused
= cur
- base
;
513 kasan_unpoison_stack(cur
, sz
- unused
);
518 __asan_handle_no_return(void)
520 kasan_unpoison_curstack(false);
523 * No need to free any fakestack objects because they must stay alive until
524 * we drop the real stack, at which point we can drop the entire fakestack
530 kasan_range_poisoned(vm_offset_t base
, vm_size_t size
, vm_offset_t
*first_invalid
)
535 if (!kasan_enabled
) {
540 base
&= ~(vm_offset_t
)0x07;
542 shadow
= SHADOW_FOR_ADDRESS(base
);
543 vm_size_t limit
= (size
+ 7) / 8;
545 /* XXX: to make debugging easier, catch unmapped shadow here */
547 for (i
= 0; i
< limit
; i
++, size
-= 8) {
549 uint8_t s
= shadow
[i
];
550 if (s
== 0 || (size
< 8 && s
>= size
&& s
<= 7)) {
561 /* XXX: calculate the exact first byte that failed */
562 *first_invalid
= base
+ i
*8;
568 kasan_init_globals(vm_offset_t base
, vm_size_t size
)
570 struct asan_global
*glob
= (struct asan_global
*)base
;
571 struct asan_global
*glob_end
= (struct asan_global
*)(base
+ size
);
572 for (; glob
< glob_end
; glob
++) {
573 /* handle one global */
574 kasan_poison(glob
->addr
, glob
->size
, 0, glob
->size_with_redzone
- glob
->size
, ASAN_GLOBAL_RZ
);
579 kasan_load_kext(vm_offset_t base
, vm_size_t __unused size
, const void *bundleid
)
581 unsigned long sectsz
;
584 #if KASAN_DYNAMIC_BLACKLIST
585 kasan_dybl_load_kext(base
, bundleid
);
588 /* find the kasan globals segment/section */
589 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
591 kasan_init_globals((vm_address_t
)sect
, (vm_size_t
)sectsz
);
597 kasan_unload_kext(vm_offset_t base
, vm_size_t size
)
599 unsigned long sectsz
;
602 /* find the kasan globals segment/section */
603 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
605 kasan_unpoison((void *)base
, size
);
609 #if KASAN_DYNAMIC_BLACKLIST
610 kasan_dybl_unload_kext(base
);
615 * Turn off as much as possible for panic path etc. There's no way to turn it back
621 __asan_option_detect_stack_use_after_return
= 0;
622 fakestack_enabled
= 0;
624 quarantine_enabled
= 0;
629 kasan_init_xnu_globals(void)
631 const char *seg
= KASAN_GLOBAL_SEGNAME
;
632 const char *sect
= KASAN_GLOBAL_SECTNAME
;
636 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)&_mh_execute_header
;
639 printf("KASan: failed to find kernel mach header\n");
640 printf("KASan: redzones for globals not poisoned\n");
644 globals
= (vm_offset_t
)getsectdatafromheader(header
, seg
, sect
, &_size
);
646 printf("KASan: failed to find segment %s section %s\n", seg
, sect
);
647 printf("KASan: redzones for globals not poisoned\n");
650 size
= (vm_size_t
)_size
;
652 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg
, sect
, globals
, size
);
653 printf("KASan: poisoning redzone for %lu globals\n", size
/ sizeof(struct asan_global
));
655 kasan_init_globals(globals
, size
);
659 kasan_late_init(void)
661 #if KASAN_DYNAMIC_BLACKLIST
665 kasan_init_fakestack();
666 kasan_init_xnu_globals();
670 kasan_notify_stolen(vm_offset_t top
)
672 kasan_map_shadow(kernel_vtop
, top
- kernel_vtop
, false);
676 kasan_debug_touch_mappings(vm_offset_t base
, vm_size_t sz
)
682 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
683 for (i
= 0; i
< sz
; i
+= sizeof(uint64_t)) {
684 vm_offset_t addr
= base
+ i
;
685 uint8_t *x
= SHADOW_FOR_ADDRESS(addr
);
687 asm volatile("" ::: "memory");
689 asm volatile("" ::: "memory");
690 assert(tmp1
== tmp2
);
703 simple_lock_init(&kasan_vm_lock
, 0);
705 /* Map all of the kernel text and data */
706 kasan_map_shadow(kernel_vbase
, kernel_vtop
- kernel_vbase
, false);
711 * handle KASan boot-args
714 if (PE_parse_boot_argn("kasan.checks", &arg
, sizeof(arg
))) {
715 enabled_checks
= arg
;
718 if (PE_parse_boot_argn("kasan", &arg
, sizeof(arg
))) {
719 if (arg
& KASAN_ARGS_FAKESTACK
) {
720 fakestack_enabled
= 1;
722 if (arg
& KASAN_ARGS_REPORTIGNORED
) {
725 if (arg
& KASAN_ARGS_NODYCHECKS
) {
726 enabled_checks
&= ~TYPE_DYNAMIC
;
728 if (arg
& KASAN_ARGS_NOPOISON_HEAP
) {
729 enabled_checks
&= ~TYPE_POISON_HEAP
;
731 if (arg
& KASAN_ARGS_NOPOISON_GLOBAL
) {
732 enabled_checks
&= ~TYPE_POISON_GLOBAL
;
736 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg
, sizeof(arg
))) {
740 /* kasan.bl boot-arg handled in kasan_init_dybl() */
742 quarantine_enabled
= 1;
747 kasan_notify_address_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
)
749 assert(address
< VM_MAX_KERNEL_ADDRESS
);
751 if (!kasan_enabled
) {
755 if (address
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
756 /* only map kernel addresses */
767 kasan_map_shadow(address
, size
, is_zero
);
769 kasan_debug_touch_mappings(address
, size
);
773 kasan_notify_address(vm_offset_t address
, vm_size_t size
)
775 kasan_notify_address_internal(address
, size
, false);
779 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
782 kasan_notify_address_nopoison(vm_offset_t address
, vm_size_t size
)
784 kasan_notify_address_internal(address
, size
, true);
793 struct kasan_alloc_header
{
799 uint32_t left_rz
: 32 - BACKTRACE_BITS
;
800 uint32_t frames
: BACKTRACE_BITS
;
803 _Static_assert(sizeof(struct kasan_alloc_header
) <= KASAN_GUARD_SIZE
, "kasan alloc header exceeds guard size");
805 struct kasan_alloc_footer
{
806 uint32_t backtrace
[0];
808 _Static_assert(sizeof(struct kasan_alloc_footer
) <= KASAN_GUARD_SIZE
, "kasan alloc footer exceeds guard size");
810 #define LIVE_XOR ((uint16_t)0x3a65)
811 #define FREE_XOR ((uint16_t)0xf233)
814 magic_for_addr(vm_offset_t addr
, uint16_t magic_xor
)
816 uint16_t magic
= addr
& 0xFFFF;
817 magic
^= (addr
>> 16) & 0xFFFF;
818 magic
^= (addr
>> 32) & 0xFFFF;
819 magic
^= (addr
>> 48) & 0xFFFF;
824 static struct kasan_alloc_header
*
825 header_for_user_addr(vm_offset_t addr
)
827 return (void *)(addr
- sizeof(struct kasan_alloc_header
));
830 static struct kasan_alloc_footer
*
831 footer_for_user_addr(vm_offset_t addr
, vm_size_t
*size
)
833 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
834 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
836 return (void *)(addr
+ h
->user_size
);
840 * size: user-requested allocation size
841 * ret: minimum size for the real allocation
844 kasan_alloc_resize(vm_size_t size
)
847 if (os_add_overflow(size
, 4 * PAGE_SIZE
, &tmp
)) {
848 panic("allocation size overflow (%lu)", size
);
851 /* add left and right redzones */
852 size
+= KASAN_GUARD_PAD
;
854 /* ensure the final allocation is an 8-byte multiple */
855 size
+= 8 - (size
% 8);
860 extern vm_offset_t vm_kernel_slid_base
;
863 kasan_alloc_bt(uint32_t *ptr
, vm_size_t sz
, vm_size_t skip
)
865 uintptr_t buf
[BACKTRACE_MAXFRAMES
];
868 sz
/= sizeof(uint32_t);
869 vm_size_t frames
= sz
;
872 frames
= min(frames
+ skip
, BACKTRACE_MAXFRAMES
);
873 frames
= backtrace(bt
, frames
);
875 while (frames
> sz
&& skip
> 0) {
881 /* only store the offset from kernel base, and cram that into 32
883 for (vm_size_t i
= 0; i
< frames
; i
++) {
884 ptr
[i
] = (uint32_t)(bt
[i
] - vm_kernel_slid_base
);
890 /* addr: user address of allocation */
892 kasan_alloc_crc(vm_offset_t addr
)
894 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
895 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
897 uint16_t crc_orig
= h
->crc
;
901 crc
= __nosan_crc16(crc
, (void *)(addr
- h
->left_rz
), h
->left_rz
);
902 crc
= __nosan_crc16(crc
, (void *)(addr
+ h
->user_size
), rightrz
);
910 * addr: base address of full allocation (including redzones)
911 * size: total size of allocation (include redzones)
912 * req: user-requested allocation size
913 * lrz: size of the left redzone in bytes
914 * ret: address of usable allocation
917 kasan_alloc(vm_offset_t addr
, vm_size_t size
, vm_size_t req
, vm_size_t leftrz
)
923 assert((addr
% 8) == 0);
924 assert((size
% 8) == 0);
926 vm_size_t rightrz
= size
- req
- leftrz
;
928 kasan_poison(addr
, req
, leftrz
, rightrz
, ASAN_HEAP_RZ
);
929 kasan_rz_clobber(addr
, req
, leftrz
, rightrz
);
933 /* stash the allocation sizes in the left redzone */
934 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
935 h
->magic
= magic_for_addr(addr
, LIVE_XOR
);
937 h
->alloc_size
= size
;
940 /* ... and a backtrace in the right redzone */
942 struct kasan_alloc_footer
*f
= footer_for_user_addr(addr
, &fsize
);
943 h
->frames
= kasan_alloc_bt(f
->backtrace
, fsize
, 2);
945 /* checksum the whole object, minus the user part */
946 h
->crc
= kasan_alloc_crc(addr
);
953 * size: returns full original allocation size
954 * ret: original allocation ptr
957 kasan_dealloc(vm_offset_t addr
, vm_size_t
*size
)
959 assert(size
&& addr
);
960 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
961 *size
= h
->alloc_size
;
962 return addr
- h
->left_rz
;
966 * return the original user-requested allocation size
967 * addr: user alloc pointer
970 kasan_user_size(vm_offset_t addr
)
972 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
973 assert(h
->magic
== magic_for_addr(addr
, LIVE_XOR
));
978 * Verify that `addr' (user pointer) is a valid allocation of `type'
981 kasan_check_free(vm_offset_t addr
, vm_size_t size
, unsigned heap_type
)
983 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
985 /* map heap type to an internal access type */
986 access_t type
= heap_type
== KASAN_HEAP_KALLOC
? TYPE_KFREE
:
987 heap_type
== KASAN_HEAP_ZALLOC
? TYPE_ZFREE
:
988 heap_type
== KASAN_HEAP_FAKESTACK
? TYPE_FSFREE
: 0;
990 /* check the magic and crc match */
991 if (h
->magic
!= magic_for_addr(addr
, LIVE_XOR
)) {
992 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
994 if (h
->crc
!= kasan_alloc_crc(addr
)) {
995 kasan_violation(addr
, size
, type
, REASON_MOD_OOB
);
998 /* check the freed size matches what we recorded at alloc time */
999 if (h
->user_size
!= size
) {
1000 kasan_violation(addr
, size
, type
, REASON_INVALID_SIZE
);
1003 vm_size_t rightrz_sz
= h
->alloc_size
- h
->left_rz
- h
->user_size
;
1005 /* Check that the redzones are valid */
1006 if (!kasan_check_shadow(addr
- h
->left_rz
, h
->left_rz
, ASAN_HEAP_LEFT_RZ
) ||
1007 !kasan_check_shadow(addr
+ h
->user_size
, rightrz_sz
, ASAN_HEAP_RIGHT_RZ
)) {
1008 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
1011 /* Check the allocated range is not poisoned */
1012 kasan_check_range((void *)addr
, size
, type
);
1021 struct freelist_entry
{
1024 STAILQ_ENTRY(freelist_entry
) list
;
1027 vm_size_t size
: 28;
1028 vm_size_t user_size
: 28;
1029 vm_size_t frames
: BACKTRACE_BITS
; /* number of frames in backtrace */
1030 vm_size_t __unused
: 8 - BACKTRACE_BITS
;
1035 uint32_t backtrace
[];
1037 _Static_assert(sizeof(struct freelist_entry
) <= KASAN_GUARD_PAD
, "kasan freelist header exceeds padded size");
1040 STAILQ_HEAD(freelist_head
, freelist_entry
) freelist
;
1041 unsigned long entries
;
1042 unsigned long max_entries
;
1047 struct quarantine quarantines
[] = {
1048 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_ZALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1049 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_KALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1050 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_FAKESTACK
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
}
1054 fle_crc(struct freelist_entry
*fle
)
1056 return __nosan_crc16(0, &fle
->bits
, fle
->size
- offsetof(struct freelist_entry
, bits
));
1060 * addr, sizep: pointer/size of full allocation including redzone
1063 kasan_free_internal(void **addrp
, vm_size_t
*sizep
, int type
,
1064 zone_t
*zone
, vm_size_t user_size
, int locked
,
1067 vm_size_t size
= *sizep
;
1068 vm_offset_t addr
= *(vm_offset_t
*)addrp
;
1070 assert(type
>= 0 && type
< KASAN_HEAP_TYPES
);
1071 if (type
== KASAN_HEAP_KALLOC
) {
1072 /* zero-size kalloc allocations are allowed */
1074 } else if (type
== KASAN_HEAP_ZALLOC
) {
1075 assert(zone
&& user_size
);
1076 } else if (type
== KASAN_HEAP_FAKESTACK
) {
1077 assert(zone
&& user_size
);
1080 /* clobber the entire freed region */
1081 kasan_rz_clobber(addr
, 0, size
, 0);
1083 if (!doquarantine
|| !quarantine_enabled
) {
1087 /* poison the entire freed region */
1088 uint8_t flags
= (type
== KASAN_HEAP_FAKESTACK
) ? ASAN_STACK_FREED
: ASAN_HEAP_FREED
;
1089 kasan_poison(addr
, 0, size
, 0, flags
);
1091 struct freelist_entry
*fle
, *tofree
= NULL
;
1092 struct quarantine
*q
= &quarantines
[type
];
1093 assert(size
>= sizeof(struct freelist_entry
));
1095 /* create a new freelist entry */
1096 fle
= (struct freelist_entry
*)addr
;
1097 fle
->magic
= magic_for_addr((vm_offset_t
)fle
, FREE_XOR
);
1099 fle
->user_size
= user_size
;
1101 fle
->zone
= ZONE_NULL
;
1105 if (type
!= KASAN_HEAP_FAKESTACK
) {
1106 /* don't do expensive things on the fakestack path */
1107 fle
->frames
= kasan_alloc_bt(fle
->backtrace
, fle
->size
- sizeof(struct freelist_entry
), 3);
1108 fle
->crc
= fle_crc(fle
);
1116 if (q
->size
+ size
> q
->max_size
) {
1118 * Adding this entry would put us over the max quarantine size. Free the
1119 * larger of the current object and the quarantine head object.
1121 tofree
= STAILQ_FIRST(&q
->freelist
);
1122 if (fle
->size
> tofree
->size
) {
1123 goto free_current_locked
;
1127 STAILQ_INSERT_TAIL(&q
->freelist
, fle
, list
);
1131 /* free the oldest entry, if necessary */
1132 if (tofree
|| q
->entries
> q
->max_entries
) {
1133 tofree
= STAILQ_FIRST(&q
->freelist
);
1134 STAILQ_REMOVE_HEAD(&q
->freelist
, list
);
1136 assert(q
->entries
> 0 && q
->size
>= tofree
->size
);
1138 q
->size
-= tofree
->size
;
1140 if (type
!= KASAN_HEAP_KALLOC
) {
1141 assert((vm_offset_t
)zone
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
&&
1142 (vm_offset_t
)zone
<= VM_MAX_KERNEL_ADDRESS
);
1143 *zone
= tofree
->zone
;
1146 size
= tofree
->size
;
1147 addr
= (vm_offset_t
)tofree
;
1149 /* check the magic and crc match */
1150 if (tofree
->magic
!= magic_for_addr(addr
, FREE_XOR
)) {
1151 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1153 if (type
!= KASAN_HEAP_FAKESTACK
&& tofree
->crc
!= fle_crc(tofree
)) {
1154 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1157 /* clobber the quarantine header */
1158 __nosan_bzero((void *)addr
, sizeof(struct freelist_entry
));
1161 /* quarantine is not full - don't really free anything */
1165 free_current_locked
:
1171 *addrp
= (void *)addr
;
1173 kasan_unpoison((void *)addr
, size
);
1179 kasan_free(void **addrp
, vm_size_t
*sizep
, int type
, zone_t
*zone
,
1180 vm_size_t user_size
, bool quarantine
)
1182 kasan_free_internal(addrp
, sizep
, type
, zone
, user_size
, 0, quarantine
);
1185 thread_yield_internal(free_yield
);
1190 __asan_load_cxx_array_cookie(uptr
*p
)
1192 uint8_t *shadow
= SHADOW_FOR_ADDRESS((uptr
)p
);
1193 if (*shadow
== ASAN_ARRAY_COOKIE
) {
1195 } else if (*shadow
== ASAN_HEAP_FREED
) {
1203 __asan_poison_cxx_array_cookie(uptr p
)
1205 uint8_t *shadow
= SHADOW_FOR_ADDRESS(p
);
1206 *shadow
= ASAN_ARRAY_COOKIE
;
1209 #define ACCESS_CHECK_DECLARE(type, sz, access) \
1210 void __asan_##type##sz(uptr addr) { \
1211 kasan_check_range((const void *)addr, sz, access); \
1213 void UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
1215 ACCESS_CHECK_DECLARE(load
, 1, TYPE_LOAD
);
1216 ACCESS_CHECK_DECLARE(load
, 2, TYPE_LOAD
);
1217 ACCESS_CHECK_DECLARE(load
, 4, TYPE_LOAD
);
1218 ACCESS_CHECK_DECLARE(load
, 8, TYPE_LOAD
);
1219 ACCESS_CHECK_DECLARE(load
, 16, TYPE_LOAD
);
1220 ACCESS_CHECK_DECLARE(store
, 1, TYPE_STORE
);
1221 ACCESS_CHECK_DECLARE(store
, 2, TYPE_STORE
);
1222 ACCESS_CHECK_DECLARE(store
, 4, TYPE_STORE
);
1223 ACCESS_CHECK_DECLARE(store
, 8, TYPE_STORE
);
1224 ACCESS_CHECK_DECLARE(store
, 16, TYPE_STORE
);
1227 __asan_loadN(uptr addr
, size_t sz
)
1229 kasan_check_range((const void *)addr
, sz
, TYPE_LOAD
);
1233 __asan_storeN(uptr addr
, size_t sz
)
1235 kasan_check_range((const void *)addr
, sz
, TYPE_STORE
);
1239 kasan_set_shadow(uptr addr
, size_t sz
, uint8_t val
)
1241 __nosan_memset((void *)addr
, val
, sz
);
1244 #define SET_SHADOW_DECLARE(val) \
1245 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1246 kasan_set_shadow(addr, sz, 0x##val); \
1249 SET_SHADOW_DECLARE(00)
1250 SET_SHADOW_DECLARE(f1
)
1251 SET_SHADOW_DECLARE(f2
)
1252 SET_SHADOW_DECLARE(f3
)
1253 SET_SHADOW_DECLARE(f5
)
1254 SET_SHADOW_DECLARE(f8
)
1258 * Call 'cb' for each contiguous range of the shadow map. This could be more
1259 * efficient by walking the page table directly.
1262 kasan_traverse_mappings(pmap_traverse_callback cb
, void *ctx
)
1264 uintptr_t shadow_base
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
1265 uintptr_t shadow_top
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS
);
1266 shadow_base
= vm_map_trunc_page(shadow_base
, HW_PAGE_MASK
);
1267 shadow_top
= vm_map_round_page(shadow_top
, HW_PAGE_MASK
);
1269 uintptr_t start
= 0, end
= 0;
1271 for (uintptr_t addr
= shadow_base
; addr
< shadow_top
; addr
+= HW_PAGE_SIZE
) {
1272 if (kasan_is_shadow_mapped(addr
)) {
1276 end
= addr
+ HW_PAGE_SIZE
;
1277 } else if (start
&& end
) {
1278 cb(start
, end
, ctx
);
1284 cb(start
, end
, ctx
);
1291 * XXX: implement these
1294 UNUSED_ABI(__asan_alloca_poison
, uptr addr
, uptr size
);
1295 UNUSED_ABI(__asan_allocas_unpoison
, uptr top
, uptr bottom
);
1296 UNUSED_ABI(__sanitizer_ptr_sub
, uptr a
, uptr b
);
1297 UNUSED_ABI(__sanitizer_ptr_cmp
, uptr a
, uptr b
);
1298 UNUSED_ABI(__sanitizer_annotate_contiguous_container
, const void *a
, const void *b
, const void *c
, const void *d
);
1299 UNUSED_ABI(__asan_poison_stack_memory
, uptr addr
, size_t size
);
1300 UNUSED_ABI(__asan_unpoison_stack_memory
, uptr a
, uptr b
);
1303 * Miscellaneous unimplemented asan ABI
1306 UNUSED_ABI(__asan_init
, void);
1307 UNUSED_ABI(__asan_register_image_globals
, uptr a
);
1308 UNUSED_ABI(__asan_unregister_image_globals
, uptr a
);
1309 UNUSED_ABI(__asan_before_dynamic_init
, uptr a
);
1310 UNUSED_ABI(__asan_after_dynamic_init
, void);
1311 UNUSED_ABI(__asan_version_mismatch_check_v8
, void);
1312 UNUSED_ABI(__asan_version_mismatch_check_apple_802
, void);
1313 UNUSED_ABI(__asan_version_mismatch_check_apple_900
, void);
1314 UNUSED_ABI(__asan_version_mismatch_check_apple_902
, void);
1315 UNUSED_ABI(__asan_version_mismatch_check_apple_1000
, void);
1316 UNUSED_ABI(__asan_version_mismatch_check_apple_1001
, void);
1318 void UNSUPPORTED_API(__asan_init_v5
, void);
1319 void UNSUPPORTED_API(__asan_register_globals
, uptr a
, uptr b
);
1320 void UNSUPPORTED_API(__asan_unregister_globals
, uptr a
, uptr b
);
1321 void UNSUPPORTED_API(__asan_register_elf_globals
, uptr a
, uptr b
, uptr c
);
1322 void UNSUPPORTED_API(__asan_unregister_elf_globals
, uptr a
, uptr b
, uptr c
);
1324 void UNSUPPORTED_API(__asan_exp_loadN
, uptr addr
, size_t sz
, int32_t e
);
1325 void UNSUPPORTED_API(__asan_exp_storeN
, uptr addr
, size_t sz
, int32_t e
);
1326 void UNSUPPORTED_API(__asan_report_exp_load_n
, uptr addr
, unsigned long b
, int32_t c
);
1327 void UNSUPPORTED_API(__asan_report_exp_store_n
, uptr addr
, unsigned long b
, int32_t c
);
1336 sysctl_kasan_test(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int arg2
, struct sysctl_req
*req
)
1341 err
= sysctl_io_number(req
, 0, sizeof(int), &mask
, &ch
);
1344 kasan_test(mask
, arg2
);
1351 sysctl_fakestack_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int __unused arg2
, struct sysctl_req
*req
)
1355 err
= sysctl_io_number(req
, fakestack_enabled
, sizeof(fakestack_enabled
), &val
, &ch
);
1356 if (err
== 0 && ch
) {
1357 fakestack_enabled
= !!val
;
1358 __asan_option_detect_stack_use_after_return
= !!val
;
1365 SYSCTL_NODE(_kern
, OID_AUTO
, kasan
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
1367 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, available
, CTLFLAG_RD
, NULL
, KASAN
, "");
1368 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, enabled
, CTLFLAG_RD
, &kasan_enabled
, 0, "");
1369 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, checks
, CTLFLAG_RW
, &enabled_checks
, 0, "");
1370 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, quarantine
, CTLFLAG_RW
, &quarantine_enabled
, 0, "");
1371 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, report_ignored
, CTLFLAG_RW
, &report_ignored
, 0, "");
1372 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, free_yield_ms
, CTLFLAG_RW
, &free_yield
, 0, "");
1373 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memused
, CTLFLAG_RD
, &shadow_pages_used
, 0, "");
1374 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memtotal
, CTLFLAG_RD
, &shadow_pages_total
, 0, "");
1375 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, kexts
, CTLFLAG_RD
, &kexts_loaded
, 0, "");
1376 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, debug
, CTLFLAG_RD
, NULL
, KASAN_DEBUG
, "");
1377 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, zalloc
, CTLFLAG_RD
, NULL
, KASAN_ZALLOC
, "");
1378 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, kalloc
, CTLFLAG_RD
, NULL
, KASAN_KALLOC
, "");
1379 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, dynamicbl
, CTLFLAG_RD
, NULL
, KASAN_DYNAMIC_BLACKLIST
, "");
1381 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fakestack
,
1382 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1383 0, 0, sysctl_fakestack_enable
, "I", "");
1385 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, test
,
1386 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1387 0, 0, sysctl_kasan_test
, "I", "");
1389 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fail
,
1390 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1391 0, 1, sysctl_kasan_test
, "I", "");