2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <kern/kalloc.h>
41 #include <kern/zalloc.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <libkern/libkern.h>
47 #include <libkern/OSAtomic.h>
48 #include <libkern/kernel_mach_header.h>
49 #include <sys/queue.h>
50 #include <sys/sysctl.h>
51 #include <kern/thread.h>
52 #include <machine/atomic.h>
55 #include <kasan_internal.h>
56 #include <memintrinsics.h>
58 const uintptr_t __asan_shadow_memory_dynamic_address
= KASAN_SHIFT
;
60 static unsigned kexts_loaded
;
61 unsigned shadow_pages_total
;
62 unsigned shadow_pages_used
;
64 vm_offset_t kernel_vbase
;
65 vm_offset_t kernel_vtop
;
67 static unsigned kasan_enabled
;
68 static unsigned quarantine_enabled
;
69 static unsigned enabled_checks
= TYPE_ALL
; /* bitmask of enabled checks */
70 static unsigned report_ignored
; /* issue non-fatal report for disabled/blacklisted checks */
71 static unsigned free_yield
= 0; /* ms yield after each free */
74 static void kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
75 static void kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
);
77 /* imported osfmk functions */
78 extern vm_offset_t
ml_stack_base(void);
79 extern vm_size_t
ml_stack_size(void);
82 * unused: expected to be called, but (currently) does nothing
84 #define UNUSED_ABI(func, ...) \
85 _Pragma("clang diagnostic push") \
86 _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
87 void func(__VA_ARGS__); \
88 void func(__VA_ARGS__) {}; \
89 _Pragma("clang diagnostic pop") \
91 static const size_t BACKTRACE_BITS = 4;
92 static const size_t BACKTRACE_MAXFRAMES
= (1UL << BACKTRACE_BITS
) - 1;
94 decl_simple_lock_data(, kasan_vm_lock
);
95 static thread_t kasan_lock_holder
;
98 * kasan is called from the interrupt path, so we need to disable interrupts to
99 * ensure atomicity manipulating the global objects
102 kasan_lock(boolean_t
*b
)
104 *b
= ml_set_interrupts_enabled(false);
105 simple_lock(&kasan_vm_lock
);
106 kasan_lock_holder
= current_thread();
110 kasan_unlock(boolean_t b
)
112 kasan_lock_holder
= THREAD_NULL
;
113 simple_unlock(&kasan_vm_lock
);
114 ml_set_interrupts_enabled(b
);
117 /* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
120 kasan_lock_held(thread_t thread
)
122 return thread
&& thread
== kasan_lock_holder
;
126 kasan_check_enabled(access_t access
)
128 return kasan_enabled
&& (enabled_checks
& access
) && !kasan_is_blacklisted(access
);
132 kasan_poison_active(uint8_t flags
)
136 return kasan_check_enabled(TYPE_POISON_GLOBAL
);
138 case ASAN_HEAP_LEFT_RZ
:
139 case ASAN_HEAP_RIGHT_RZ
:
140 case ASAN_HEAP_FREED
:
141 return kasan_check_enabled(TYPE_POISON_HEAP
);
148 * poison redzones in the shadow map
151 kasan_poison(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
, uint8_t flags
)
153 uint8_t *shadow
= SHADOW_FOR_ADDRESS(base
);
154 uint8_t partial
= size
& 0x07;
155 vm_size_t total
= leftrz
+ size
+ rightrz
;
158 /* base must be 8-byte aligned */
159 /* any left redzone must be a multiple of 8 */
160 /* total region must cover 8-byte multiple */
161 assert((base
& 0x07) == 0);
162 assert((leftrz
& 0x07) == 0);
163 assert((total
& 0x07) == 0);
165 if (!kasan_enabled
|| !kasan_poison_active(flags
)) {
173 uint8_t l_flags
= flags
;
174 uint8_t r_flags
= flags
;
176 if (flags
== ASAN_STACK_RZ
) {
177 l_flags
= ASAN_STACK_LEFT_RZ
;
178 r_flags
= ASAN_STACK_RIGHT_RZ
;
179 } else if (flags
== ASAN_HEAP_RZ
) {
180 l_flags
= ASAN_HEAP_LEFT_RZ
;
181 r_flags
= ASAN_HEAP_RIGHT_RZ
;
185 * poison the redzones and unpoison the valid bytes
187 for (; i
< leftrz
; i
++) {
190 for (; i
< leftrz
+ size
; i
++) {
191 shadow
[i
] = ASAN_VALID
; /* XXX: should not be necessary */
193 if (partial
&& (i
< total
)) {
197 for (; i
< total
; i
++) {
203 kasan_poison_range(vm_offset_t base
, vm_size_t size
, uint8_t flags
)
205 /* base must be 8-byte aligned */
206 /* total region must cover 8-byte multiple */
207 assert((base
& 0x07) == 0);
208 assert((size
& 0x07) == 0);
209 kasan_poison(base
, 0, 0, size
, flags
);
213 kasan_unpoison(void *base
, vm_size_t size
)
215 kasan_poison((vm_offset_t
)base
, size
, 0, 0, 0);
219 kasan_unpoison_stack(vm_offset_t base
, vm_size_t size
)
224 /* align base and size to 8 bytes */
225 vm_offset_t align
= base
& 0x7;
228 size
= (size
+ 7) & ~0x7;
230 kasan_unpoison((void *)base
, size
);
234 * write junk into the redzones
237 kasan_rz_clobber(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
)
241 const uint8_t deadbeef
[] = { 0xde, 0xad, 0xbe, 0xef };
242 const uint8_t c0ffee
[] = { 0xc0, 0xff, 0xee, 0xc0 };
243 uint8_t *buf
= (uint8_t *)base
;
245 /* base must be 8-byte aligned */
246 /* any left redzone must be a multiple of 8 */
247 /* total region must cover 8-byte multiple */
248 assert((base
& 0x07) == 0);
249 assert((leftrz
& 0x07) == 0);
250 assert(((size
+ leftrz
+ rightrz
) & 0x07) == 0);
252 for (i
= 0; i
< leftrz
; i
++) {
253 buf
[i
] = deadbeef
[i
% 4];
256 for (i
= 0; i
< rightrz
; i
++) {
257 buf
[i
+ size
+ leftrz
] = c0ffee
[i
% 4];
268 * Report a violation that may be disabled and/or blacklisted. This can only be
269 * called for dynamic checks (i.e. where the fault is recoverable). Use
270 * kasan_crash_report() for static (unrecoverable) violations.
272 * access: what we were trying to do when the violation occured
273 * reason: what failed about the access
276 kasan_violation(uintptr_t addr
, size_t size
, access_t access
, violation_t reason
)
278 assert(__builtin_popcount(access
) == 1);
279 if (!kasan_check_enabled(access
)) {
280 if (report_ignored
) {
281 kasan_log_report(addr
, size
, access
, reason
);
285 kasan_crash_report(addr
, size
, access
, reason
);
289 kasan_check_range(const void *x
, size_t sz
, access_t access
)
292 uintptr_t ptr
= (uintptr_t)x
;
293 if (kasan_range_poisoned(ptr
, sz
, &invalid
)) {
294 size_t remaining
= sz
- (invalid
- ptr
);
295 kasan_violation(invalid
, remaining
, access
, 0);
300 * Return true if [base, base+sz) is unpoisoned or has given shadow value.
303 kasan_check_shadow(vm_address_t base
, vm_size_t sz
, uint8_t shadow
)
305 sz
-= 8 - (base
% 8);
306 base
+= 8 - (base
% 8);
308 vm_address_t end
= base
+ sz
;
311 uint8_t *sh
= SHADOW_FOR_ADDRESS(base
);
312 if (*sh
&& *sh
!= shadow
) {
322 * KASAN violation reporting
327 access_str(access_t type
)
329 if (type
& TYPE_READ
) {
331 } else if (type
& TYPE_WRITE
) {
333 } else if (type
& TYPE_FREE
) {
340 static const char *shadow_strings
[] = {
341 [ASAN_VALID
] = "VALID",
342 [ASAN_PARTIAL1
] = "PARTIAL1",
343 [ASAN_PARTIAL2
] = "PARTIAL2",
344 [ASAN_PARTIAL3
] = "PARTIAL3",
345 [ASAN_PARTIAL4
] = "PARTIAL4",
346 [ASAN_PARTIAL5
] = "PARTIAL5",
347 [ASAN_PARTIAL6
] = "PARTIAL6",
348 [ASAN_PARTIAL7
] = "PARTIAL7",
349 [ASAN_STACK_LEFT_RZ
] = "STACK_LEFT_RZ",
350 [ASAN_STACK_MID_RZ
] = "STACK_MID_RZ",
351 [ASAN_STACK_RIGHT_RZ
] = "STACK_RIGHT_RZ",
352 [ASAN_STACK_FREED
] = "STACK_FREED",
353 [ASAN_STACK_OOSCOPE
] = "STACK_OOSCOPE",
354 [ASAN_GLOBAL_RZ
] = "GLOBAL_RZ",
355 [ASAN_HEAP_LEFT_RZ
] = "HEAP_LEFT_RZ",
356 [ASAN_HEAP_RIGHT_RZ
] = "HEAP_RIGHT_RZ",
357 [ASAN_HEAP_FREED
] = "HEAP_FREED",
361 #define CRASH_CONTEXT_BEFORE 5
362 #define CRASH_CONTEXT_AFTER 5
365 kasan_shadow_crashlog(uptr p
, char *buf
, size_t len
)
369 int before
= CRASH_CONTEXT_BEFORE
;
370 int after
= CRASH_CONTEXT_AFTER
;
372 uptr shadow
= (uptr
)SHADOW_FOR_ADDRESS(p
);
373 uptr shadow_p
= shadow
;
374 uptr shadow_page
= vm_map_round_page(shadow_p
, PAGE_MASK
);
376 /* rewind to start of context block */
377 shadow
&= ~((uptr
)0xf);
378 shadow
-= 16 * before
;
380 n
+= snprintf(buf
+n
, len
-n
,
381 " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
383 for (i
= 0; i
< 1 + before
+ after
; i
++, shadow
+= 16) {
384 if ((vm_map_round_page(shadow
, PAGE_MASK
) != shadow_page
) && !kasan_is_shadow_mapped(shadow
)) {
385 /* avoid unmapped shadow when crossing page boundaries */
389 n
+= snprintf(buf
+n
, len
-n
, " %16lx:", shadow
);
394 for (j
= 0; j
< 16; j
++) {
395 uint8_t *x
= (uint8_t *)(shadow
+ j
);
398 if ((uptr
)x
== shadow_p
) {
401 } else if ((uptr
)(x
+ 1) == shadow_p
) {
405 n
+= snprintf(buf
+n
, len
-n
, "%s%02x%s", left
, (unsigned)*x
, right
);
408 n
+= snprintf(buf
+n
, len
-n
, "\n");
411 n
+= snprintf(buf
+n
, len
-n
, "\n");
416 kasan_report_internal(uptr p
, uptr width
, access_t access
, violation_t reason
, bool dopanic
)
418 const size_t len
= 4096;
419 static char buf
[len
];
422 uint8_t *shadow_ptr
= SHADOW_FOR_ADDRESS(p
);
423 uint8_t shadow_type
= *shadow_ptr
;
424 const char *shadow_str
= shadow_strings
[shadow_type
];
426 shadow_str
= "<invalid>";
430 if (reason
== REASON_MOD_OOB
|| reason
== REASON_BAD_METADATA
) {
431 n
+= snprintf(buf
+n
, len
-n
, "KASan: free of corrupted/invalid object %#lx\n", p
);
432 } else if (reason
== REASON_MOD_AFTER_FREE
) {
433 n
+= snprintf(buf
+n
, len
-n
, "KASan: UaF of quarantined object %#lx\n", p
);
435 n
+= snprintf(buf
+n
, len
-n
, "KASan: invalid %lu-byte %s %#lx [%s]\n",
436 width
, access_str(access
), p
, shadow_str
);
438 n
+= kasan_shadow_crashlog(p
, buf
+n
, len
-n
);
447 static void NOINLINE OS_NORETURN
448 kasan_crash_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
451 kasan_report_internal(p
, width
, access
, reason
, true);
452 __builtin_unreachable(); /* we cant handle this returning anyway */
456 kasan_log_report(uptr p
, uptr width
, access_t access
, violation_t reason
)
458 const size_t len
= 256;
461 uint32_t nframes
= 14;
462 uintptr_t frames
[nframes
];
463 uintptr_t *bt
= frames
;
465 kasan_report_internal(p
, width
, access
, reason
, false);
471 nframes
= backtrace_frame(bt
, nframes
, __builtin_frame_address(0)); /* ignore current frame */
474 l
+= snprintf(buf
+l
, len
-l
, "Backtrace: ");
475 for (uint32_t i
= 0; i
< nframes
; i
++) {
476 l
+= snprintf(buf
+l
, len
-l
, "%lx,", VM_KERNEL_UNSLIDE(bt
[i
]));
478 l
+= snprintf(buf
+l
, len
-l
, "\n");
483 #define REPORT_DECLARE(n) \
484 void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
485 void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
486 void UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
487 void UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
495 void OS_NORETURN
__asan_report_load_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_LOAD
, 0); }
496 void OS_NORETURN
__asan_report_store_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_STORE
, 0); }
498 /* unpoison the current stack */
500 kasan_unpoison_curstack(bool whole_stack
)
502 uintptr_t base
= ml_stack_base();
503 size_t sz
= ml_stack_size();
504 uintptr_t cur
= (uintptr_t)&base
;
510 if (cur
>= base
&& cur
< base
+ sz
) {
511 /* unpoison from current stack depth to the top */
512 size_t unused
= cur
- base
;
513 kasan_unpoison_stack(cur
, sz
- unused
);
518 __asan_handle_no_return(void)
520 kasan_unpoison_curstack(false);
521 kasan_unpoison_fakestack(current_thread());
525 kasan_range_poisoned(vm_offset_t base
, vm_size_t size
, vm_offset_t
*first_invalid
)
530 if (!kasan_enabled
) {
535 base
&= ~(vm_offset_t
)0x07;
537 shadow
= SHADOW_FOR_ADDRESS(base
);
538 vm_size_t limit
= (size
+ 7) / 8;
540 /* XXX: to make debugging easier, catch unmapped shadow here */
542 for (i
= 0; i
< limit
; i
++, size
-= 8) {
544 uint8_t s
= shadow
[i
];
545 if (s
== 0 || (size
< 8 && s
>= size
&& s
<= 7)) {
556 /* XXX: calculate the exact first byte that failed */
557 *first_invalid
= base
+ i
*8;
563 kasan_init_globals(vm_offset_t base
, vm_size_t size
)
565 struct asan_global
*glob
= (struct asan_global
*)base
;
566 struct asan_global
*glob_end
= (struct asan_global
*)(base
+ size
);
567 for (; glob
< glob_end
; glob
++) {
568 /* handle one global */
569 kasan_poison(glob
->addr
, glob
->size
, 0, glob
->size_with_redzone
- glob
->size
, ASAN_GLOBAL_RZ
);
574 kasan_load_kext(vm_offset_t base
, vm_size_t __unused size
, const void *bundleid
)
576 unsigned long sectsz
;
579 #if KASAN_DYNAMIC_BLACKLIST
580 kasan_dybl_load_kext(base
, bundleid
);
583 /* find the kasan globals segment/section */
584 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
586 kasan_init_globals((vm_address_t
)sect
, (vm_size_t
)sectsz
);
592 kasan_unload_kext(vm_offset_t base
, vm_size_t size
)
594 unsigned long sectsz
;
597 /* find the kasan globals segment/section */
598 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
600 kasan_unpoison((void *)base
, size
);
604 #if KASAN_DYNAMIC_BLACKLIST
605 kasan_dybl_unload_kext(base
);
610 * Turn off as much as possible for panic path etc. There's no way to turn it back
616 __asan_option_detect_stack_use_after_return
= 0;
617 fakestack_enabled
= 0;
619 quarantine_enabled
= 0;
624 kasan_init_xnu_globals(void)
626 const char *seg
= KASAN_GLOBAL_SEGNAME
;
627 const char *sect
= KASAN_GLOBAL_SECTNAME
;
631 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)&_mh_execute_header
;
634 printf("KASan: failed to find kernel mach header\n");
635 printf("KASan: redzones for globals not poisoned\n");
639 globals
= (vm_offset_t
)getsectdatafromheader(header
, seg
, sect
, &_size
);
641 printf("KASan: failed to find segment %s section %s\n", seg
, sect
);
642 printf("KASan: redzones for globals not poisoned\n");
645 size
= (vm_size_t
)_size
;
647 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg
, sect
, globals
, size
);
648 printf("KASan: poisoning redzone for %lu globals\n", size
/ sizeof(struct asan_global
));
650 kasan_init_globals(globals
, size
);
654 kasan_late_init(void)
656 #if KASAN_DYNAMIC_BLACKLIST
660 kasan_init_fakestack();
661 kasan_init_xnu_globals();
665 kasan_notify_stolen(vm_offset_t top
)
667 kasan_map_shadow(kernel_vtop
, top
- kernel_vtop
, false);
671 kasan_debug_touch_mappings(vm_offset_t base
, vm_size_t sz
)
677 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
678 for (i
= 0; i
< sz
; i
+= sizeof(uint64_t)) {
679 vm_offset_t addr
= base
+ i
;
680 uint8_t *x
= SHADOW_FOR_ADDRESS(addr
);
682 asm volatile("" ::: "memory");
684 asm volatile("" ::: "memory");
685 assert(tmp1
== tmp2
);
698 simple_lock_init(&kasan_vm_lock
, 0);
700 /* Map all of the kernel text and data */
701 kasan_map_shadow(kernel_vbase
, kernel_vtop
- kernel_vbase
, false);
706 * handle KASan boot-args
709 if (PE_parse_boot_argn("kasan.checks", &arg
, sizeof(arg
))) {
710 enabled_checks
= arg
;
713 if (PE_parse_boot_argn("kasan", &arg
, sizeof(arg
))) {
714 if (arg
& KASAN_ARGS_FAKESTACK
) {
715 fakestack_enabled
= 1;
717 if (arg
& KASAN_ARGS_REPORTIGNORED
) {
720 if (arg
& KASAN_ARGS_NODYCHECKS
) {
721 enabled_checks
&= ~TYPE_DYNAMIC
;
723 if (arg
& KASAN_ARGS_NOPOISON_HEAP
) {
724 enabled_checks
&= ~TYPE_POISON_HEAP
;
726 if (arg
& KASAN_ARGS_NOPOISON_GLOBAL
) {
727 enabled_checks
&= ~TYPE_POISON_GLOBAL
;
731 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg
, sizeof(arg
))) {
735 /* kasan.bl boot-arg handled in kasan_init_dybl() */
737 quarantine_enabled
= 1;
742 kasan_notify_address_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
)
744 assert(address
< VM_MAX_KERNEL_ADDRESS
);
746 if (!kasan_enabled
) {
750 if (address
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
751 /* only map kernel addresses */
762 kasan_map_shadow(address
, size
, is_zero
);
764 kasan_debug_touch_mappings(address
, size
);
768 kasan_notify_address(vm_offset_t address
, vm_size_t size
)
770 kasan_notify_address_internal(address
, size
, false);
774 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
777 kasan_notify_address_nopoison(vm_offset_t address
, vm_size_t size
)
779 kasan_notify_address_internal(address
, size
, true);
788 struct kasan_alloc_header
{
794 uint32_t left_rz
: 32 - BACKTRACE_BITS
;
795 uint32_t frames
: BACKTRACE_BITS
;
798 _Static_assert(sizeof(struct kasan_alloc_header
) <= KASAN_GUARD_SIZE
, "kasan alloc header exceeds guard size");
800 struct kasan_alloc_footer
{
801 uint32_t backtrace
[0];
803 _Static_assert(sizeof(struct kasan_alloc_footer
) <= KASAN_GUARD_SIZE
, "kasan alloc footer exceeds guard size");
805 #define LIVE_XOR ((uint16_t)0x3a65)
806 #define FREE_XOR ((uint16_t)0xf233)
809 magic_for_addr(vm_offset_t addr
, uint16_t magic_xor
)
811 uint16_t magic
= addr
& 0xFFFF;
812 magic
^= (addr
>> 16) & 0xFFFF;
813 magic
^= (addr
>> 32) & 0xFFFF;
814 magic
^= (addr
>> 48) & 0xFFFF;
819 static struct kasan_alloc_header
*
820 header_for_user_addr(vm_offset_t addr
)
822 return (void *)(addr
- sizeof(struct kasan_alloc_header
));
825 static struct kasan_alloc_footer
*
826 footer_for_user_addr(vm_offset_t addr
, vm_size_t
*size
)
828 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
829 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
831 return (void *)(addr
+ h
->user_size
);
835 * size: user-requested allocation size
836 * ret: minimum size for the real allocation
839 kasan_alloc_resize(vm_size_t size
)
842 if (os_add_overflow(size
, 4 * PAGE_SIZE
, &tmp
)) {
843 panic("allocation size overflow (%lu)", size
);
846 /* add left and right redzones */
847 size
+= KASAN_GUARD_PAD
;
849 /* ensure the final allocation is an 8-byte multiple */
850 size
+= 8 - (size
% 8);
855 extern vm_offset_t vm_kernel_slid_base
;
858 kasan_alloc_bt(uint32_t *ptr
, vm_size_t sz
, vm_size_t skip
)
860 uintptr_t buf
[BACKTRACE_MAXFRAMES
];
863 sz
/= sizeof(uint32_t);
864 vm_size_t frames
= sz
;
867 frames
= min(frames
+ skip
, BACKTRACE_MAXFRAMES
);
868 frames
= backtrace(bt
, frames
);
870 while (frames
> sz
&& skip
> 0) {
876 /* only store the offset from kernel base, and cram that into 32
878 for (vm_size_t i
= 0; i
< frames
; i
++) {
879 ptr
[i
] = (uint32_t)(bt
[i
] - vm_kernel_slid_base
);
885 /* addr: user address of allocation */
887 kasan_alloc_crc(vm_offset_t addr
)
889 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
890 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
892 uint16_t crc_orig
= h
->crc
;
896 crc
= __nosan_crc16(crc
, (void *)(addr
- h
->left_rz
), h
->left_rz
);
897 crc
= __nosan_crc16(crc
, (void *)(addr
+ h
->user_size
), rightrz
);
905 * addr: base address of full allocation (including redzones)
906 * size: total size of allocation (include redzones)
907 * req: user-requested allocation size
908 * lrz: size of the left redzone in bytes
909 * ret: address of usable allocation
912 kasan_alloc(vm_offset_t addr
, vm_size_t size
, vm_size_t req
, vm_size_t leftrz
)
918 assert((addr
% 8) == 0);
919 assert((size
% 8) == 0);
921 vm_size_t rightrz
= size
- req
- leftrz
;
923 kasan_poison(addr
, req
, leftrz
, rightrz
, ASAN_HEAP_RZ
);
924 kasan_rz_clobber(addr
, req
, leftrz
, rightrz
);
928 /* stash the allocation sizes in the left redzone */
929 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
930 h
->magic
= magic_for_addr(addr
, LIVE_XOR
);
932 h
->alloc_size
= size
;
935 /* ... and a backtrace in the right redzone */
937 struct kasan_alloc_footer
*f
= footer_for_user_addr(addr
, &fsize
);
938 h
->frames
= kasan_alloc_bt(f
->backtrace
, fsize
, 2);
940 /* checksum the whole object, minus the user part */
941 h
->crc
= kasan_alloc_crc(addr
);
948 * size: returns full original allocation size
949 * ret: original allocation ptr
952 kasan_dealloc(vm_offset_t addr
, vm_size_t
*size
)
954 assert(size
&& addr
);
955 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
956 *size
= h
->alloc_size
;
957 return addr
- h
->left_rz
;
961 * return the original user-requested allocation size
962 * addr: user alloc pointer
965 kasan_user_size(vm_offset_t addr
)
967 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
968 assert(h
->magic
== magic_for_addr(addr
, LIVE_XOR
));
973 * Verify that `addr' (user pointer) is a valid allocation of `type'
976 kasan_check_free(vm_offset_t addr
, vm_size_t size
, unsigned heap_type
)
978 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
980 /* map heap type to an internal access type */
981 access_t type
= heap_type
== KASAN_HEAP_KALLOC
? TYPE_KFREE
:
982 heap_type
== KASAN_HEAP_ZALLOC
? TYPE_ZFREE
:
983 heap_type
== KASAN_HEAP_FAKESTACK
? TYPE_FSFREE
: 0;
985 /* check the magic and crc match */
986 if (h
->magic
!= magic_for_addr(addr
, LIVE_XOR
)) {
987 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
989 if (h
->crc
!= kasan_alloc_crc(addr
)) {
990 kasan_violation(addr
, size
, type
, REASON_MOD_OOB
);
993 /* check the freed size matches what we recorded at alloc time */
994 if (h
->user_size
!= size
) {
995 kasan_violation(addr
, size
, type
, REASON_INVALID_SIZE
);
998 vm_size_t rightrz_sz
= h
->alloc_size
- h
->left_rz
- h
->user_size
;
1000 /* Check that the redzones are valid */
1001 if (!kasan_check_shadow(addr
- h
->left_rz
, h
->left_rz
, ASAN_HEAP_LEFT_RZ
) ||
1002 !kasan_check_shadow(addr
+ h
->user_size
, rightrz_sz
, ASAN_HEAP_RIGHT_RZ
)) {
1003 kasan_violation(addr
, size
, type
, REASON_BAD_METADATA
);
1006 /* Check the allocated range is not poisoned */
1007 kasan_check_range((void *)addr
, size
, type
);
1016 struct freelist_entry
{
1019 STAILQ_ENTRY(freelist_entry
) list
;
1022 vm_size_t size
: 28;
1023 vm_size_t user_size
: 28;
1024 vm_size_t frames
: BACKTRACE_BITS
; /* number of frames in backtrace */
1025 vm_size_t __unused
: 8 - BACKTRACE_BITS
;
1030 uint32_t backtrace
[];
1032 _Static_assert(sizeof(struct freelist_entry
) <= KASAN_GUARD_PAD
, "kasan freelist header exceeds padded size");
1035 STAILQ_HEAD(freelist_head
, freelist_entry
) freelist
;
1036 unsigned long entries
;
1037 unsigned long max_entries
;
1042 struct quarantine quarantines
[] = {
1043 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_ZALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1044 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_KALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
1045 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_FAKESTACK
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
}
1049 fle_crc(struct freelist_entry
*fle
)
1051 return __nosan_crc16(0, &fle
->bits
, fle
->size
- offsetof(struct freelist_entry
, bits
));
1055 * addr, sizep: pointer/size of full allocation including redzone
1058 kasan_free_internal(void **addrp
, vm_size_t
*sizep
, int type
,
1059 zone_t
*zone
, vm_size_t user_size
, int locked
,
1062 vm_size_t size
= *sizep
;
1063 vm_offset_t addr
= *(vm_offset_t
*)addrp
;
1065 assert(type
>= 0 && type
< KASAN_HEAP_TYPES
);
1066 if (type
== KASAN_HEAP_KALLOC
) {
1067 /* zero-size kalloc allocations are allowed */
1069 } else if (type
== KASAN_HEAP_ZALLOC
) {
1070 assert(zone
&& user_size
);
1071 } else if (type
== KASAN_HEAP_FAKESTACK
) {
1072 assert(zone
&& user_size
);
1075 /* clobber the entire freed region */
1076 kasan_rz_clobber(addr
, 0, size
, 0);
1078 if (!doquarantine
|| !quarantine_enabled
) {
1082 /* poison the entire freed region */
1083 uint8_t flags
= (type
== KASAN_HEAP_FAKESTACK
) ? ASAN_STACK_FREED
: ASAN_HEAP_FREED
;
1084 kasan_poison(addr
, 0, size
, 0, flags
);
1086 struct freelist_entry
*fle
, *tofree
= NULL
;
1087 struct quarantine
*q
= &quarantines
[type
];
1088 assert(size
>= sizeof(struct freelist_entry
));
1090 /* create a new freelist entry */
1091 fle
= (struct freelist_entry
*)addr
;
1092 fle
->magic
= magic_for_addr((vm_offset_t
)fle
, FREE_XOR
);
1094 fle
->user_size
= user_size
;
1096 fle
->zone
= ZONE_NULL
;
1100 if (type
!= KASAN_HEAP_FAKESTACK
) {
1101 /* don't do expensive things on the fakestack path */
1102 fle
->frames
= kasan_alloc_bt(fle
->backtrace
, fle
->size
- sizeof(struct freelist_entry
), 3);
1103 fle
->crc
= fle_crc(fle
);
1111 if (q
->size
+ size
> q
->max_size
) {
1113 * Adding this entry would put us over the max quarantine size. Free the
1114 * larger of the current object and the quarantine head object.
1116 tofree
= STAILQ_FIRST(&q
->freelist
);
1117 if (fle
->size
> tofree
->size
) {
1118 goto free_current_locked
;
1122 STAILQ_INSERT_TAIL(&q
->freelist
, fle
, list
);
1126 /* free the oldest entry, if necessary */
1127 if (tofree
|| q
->entries
> q
->max_entries
) {
1128 tofree
= STAILQ_FIRST(&q
->freelist
);
1129 STAILQ_REMOVE_HEAD(&q
->freelist
, list
);
1131 assert(q
->entries
> 0 && q
->size
>= tofree
->size
);
1133 q
->size
-= tofree
->size
;
1135 if (type
!= KASAN_HEAP_KALLOC
) {
1136 assert((vm_offset_t
)zone
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
&&
1137 (vm_offset_t
)zone
<= VM_MAX_KERNEL_ADDRESS
);
1138 *zone
= tofree
->zone
;
1141 size
= tofree
->size
;
1142 addr
= (vm_offset_t
)tofree
;
1144 /* check the magic and crc match */
1145 if (tofree
->magic
!= magic_for_addr(addr
, FREE_XOR
)) {
1146 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1148 if (type
!= KASAN_HEAP_FAKESTACK
&& tofree
->crc
!= fle_crc(tofree
)) {
1149 kasan_violation(addr
, size
, TYPE_UAF
, REASON_MOD_AFTER_FREE
);
1152 /* clobber the quarantine header */
1153 __nosan_bzero((void *)addr
, sizeof(struct freelist_entry
));
1156 /* quarantine is not full - don't really free anything */
1160 free_current_locked
:
1166 *addrp
= (void *)addr
;
1168 kasan_unpoison((void *)addr
, size
);
1174 kasan_free(void **addrp
, vm_size_t
*sizep
, int type
, zone_t
*zone
,
1175 vm_size_t user_size
, bool quarantine
)
1177 kasan_free_internal(addrp
, sizep
, type
, zone
, user_size
, 0, quarantine
);
1180 thread_yield_internal(free_yield
);
1185 __asan_load_cxx_array_cookie(uptr
*p
)
1187 uint8_t *shadow
= SHADOW_FOR_ADDRESS((uptr
)p
);
1188 if (*shadow
== ASAN_ARRAY_COOKIE
) {
1190 } else if (*shadow
== ASAN_HEAP_FREED
) {
1198 __asan_poison_cxx_array_cookie(uptr p
)
1200 uint8_t *shadow
= SHADOW_FOR_ADDRESS(p
);
1201 *shadow
= ASAN_ARRAY_COOKIE
;
1204 #define ACCESS_CHECK_DECLARE(type, sz, access) \
1205 void __asan_##type##sz(uptr addr) { \
1206 kasan_check_range((const void *)addr, sz, access); \
1208 void UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
1210 ACCESS_CHECK_DECLARE(load
, 1, TYPE_LOAD
);
1211 ACCESS_CHECK_DECLARE(load
, 2, TYPE_LOAD
);
1212 ACCESS_CHECK_DECLARE(load
, 4, TYPE_LOAD
);
1213 ACCESS_CHECK_DECLARE(load
, 8, TYPE_LOAD
);
1214 ACCESS_CHECK_DECLARE(load
, 16, TYPE_LOAD
);
1215 ACCESS_CHECK_DECLARE(store
, 1, TYPE_STORE
);
1216 ACCESS_CHECK_DECLARE(store
, 2, TYPE_STORE
);
1217 ACCESS_CHECK_DECLARE(store
, 4, TYPE_STORE
);
1218 ACCESS_CHECK_DECLARE(store
, 8, TYPE_STORE
);
1219 ACCESS_CHECK_DECLARE(store
, 16, TYPE_STORE
);
1222 __asan_loadN(uptr addr
, size_t sz
)
1224 kasan_check_range((const void *)addr
, sz
, TYPE_LOAD
);
1228 __asan_storeN(uptr addr
, size_t sz
)
1230 kasan_check_range((const void *)addr
, sz
, TYPE_STORE
);
1234 kasan_set_shadow(uptr addr
, size_t sz
, uint8_t val
)
1236 __nosan_memset((void *)addr
, val
, sz
);
1239 #define SET_SHADOW_DECLARE(val) \
1240 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1241 kasan_set_shadow(addr, sz, 0x##val); \
1244 SET_SHADOW_DECLARE(00)
1245 SET_SHADOW_DECLARE(f1
)
1246 SET_SHADOW_DECLARE(f2
)
1247 SET_SHADOW_DECLARE(f3
)
1248 SET_SHADOW_DECLARE(f5
)
1249 SET_SHADOW_DECLARE(f8
)
1253 * Call 'cb' for each contiguous range of the shadow map. This could be more
1254 * efficient by walking the page table directly.
1257 kasan_traverse_mappings(pmap_traverse_callback cb
, void *ctx
)
1259 uintptr_t shadow_base
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS
);
1260 uintptr_t shadow_top
= (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS
);
1261 shadow_base
= vm_map_trunc_page(shadow_base
, PAGE_MASK
);
1262 shadow_top
= vm_map_round_page(shadow_top
, PAGE_MASK
);
1264 uintptr_t start
= 0, end
= 0;
1266 for (uintptr_t addr
= shadow_base
; addr
< shadow_top
; addr
+= PAGE_SIZE
) {
1267 if (kasan_is_shadow_mapped(addr
)) {
1271 end
= addr
+ PAGE_SIZE
;
1272 } else if (start
&& end
) {
1273 cb(start
, end
, ctx
);
1279 cb(start
, end
, ctx
);
1286 * XXX: implement these
1289 UNUSED_ABI(__asan_alloca_poison
, uptr addr
, uptr size
);
1290 UNUSED_ABI(__asan_allocas_unpoison
, uptr top
, uptr bottom
);
1291 UNUSED_ABI(__sanitizer_ptr_sub
, uptr a
, uptr b
);
1292 UNUSED_ABI(__sanitizer_ptr_cmp
, uptr a
, uptr b
);
1293 UNUSED_ABI(__sanitizer_annotate_contiguous_container
, const void *a
, const void *b
, const void *c
, const void *d
);
1294 UNUSED_ABI(__asan_poison_stack_memory
, uptr addr
, size_t size
);
1295 UNUSED_ABI(__asan_unpoison_stack_memory
, uptr a
, uptr b
);
1298 * Miscellaneous unimplemented asan ABI
1301 UNUSED_ABI(__asan_init
, void);
1302 UNUSED_ABI(__asan_register_image_globals
, uptr a
);
1303 UNUSED_ABI(__asan_unregister_image_globals
, uptr a
);
1304 UNUSED_ABI(__asan_before_dynamic_init
, uptr a
);
1305 UNUSED_ABI(__asan_after_dynamic_init
, void);
1306 UNUSED_ABI(__asan_version_mismatch_check_v8
, void);
1307 UNUSED_ABI(__asan_version_mismatch_check_apple_802
, void);
1308 UNUSED_ABI(__asan_version_mismatch_check_apple_900
, void);
1309 UNUSED_ABI(__asan_version_mismatch_check_apple_902
, void);
1311 void UNSUPPORTED_API(__asan_init_v5
, void);
1312 void UNSUPPORTED_API(__asan_register_globals
, uptr a
, uptr b
);
1313 void UNSUPPORTED_API(__asan_unregister_globals
, uptr a
, uptr b
);
1314 void UNSUPPORTED_API(__asan_register_elf_globals
, uptr a
, uptr b
, uptr c
);
1315 void UNSUPPORTED_API(__asan_unregister_elf_globals
, uptr a
, uptr b
, uptr c
);
1317 void UNSUPPORTED_API(__asan_exp_loadN
, uptr addr
, size_t sz
, int32_t e
);
1318 void UNSUPPORTED_API(__asan_exp_storeN
, uptr addr
, size_t sz
, int32_t e
);
1319 void UNSUPPORTED_API(__asan_report_exp_load_n
, uptr addr
, unsigned long b
, int32_t c
);
1320 void UNSUPPORTED_API(__asan_report_exp_store_n
, uptr addr
, unsigned long b
, int32_t c
);
1329 sysctl_kasan_test(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int arg2
, struct sysctl_req
*req
)
1334 err
= sysctl_io_number(req
, 0, sizeof(int), &mask
, &ch
);
1337 kasan_test(mask
, arg2
);
1344 sysctl_fakestack_enable(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int __unused arg2
, struct sysctl_req
*req
)
1348 err
= sysctl_io_number(req
, fakestack_enabled
, sizeof(fakestack_enabled
), &val
, &ch
);
1349 if (err
== 0 && ch
) {
1350 fakestack_enabled
= !!val
;
1351 __asan_option_detect_stack_use_after_return
= !!val
;
1358 SYSCTL_NODE(_kern
, OID_AUTO
, kasan
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
1360 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, available
, CTLFLAG_RD
, NULL
, KASAN
, "");
1361 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, enabled
, CTLFLAG_RD
, &kasan_enabled
, 0, "");
1362 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, checks
, CTLFLAG_RW
, &enabled_checks
, 0, "");
1363 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, quarantine
, CTLFLAG_RW
, &quarantine_enabled
, 0, "");
1364 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, report_ignored
, CTLFLAG_RW
, &report_ignored
, 0, "");
1365 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, free_yield_ms
, CTLFLAG_RW
, &free_yield
, 0, "");
1366 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memused
, CTLFLAG_RD
, &shadow_pages_used
, 0, "");
1367 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, memtotal
, CTLFLAG_RD
, &shadow_pages_total
, 0, "");
1368 SYSCTL_UINT(_kern_kasan
, OID_AUTO
, kexts
, CTLFLAG_RD
, &kexts_loaded
, 0, "");
1369 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, debug
, CTLFLAG_RD
, NULL
, KASAN_DEBUG
, "");
1370 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, zalloc
, CTLFLAG_RD
, NULL
, KASAN_ZALLOC
, "");
1371 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, kalloc
, CTLFLAG_RD
, NULL
, KASAN_KALLOC
, "");
1372 SYSCTL_COMPAT_UINT(_kern_kasan
, OID_AUTO
, dynamicbl
, CTLFLAG_RD
, NULL
, KASAN_DYNAMIC_BLACKLIST
, "");
1374 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fakestack
,
1375 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1376 0, 0, sysctl_fakestack_enable
, "I", "");
1378 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, test
,
1379 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1380 0, 0, sysctl_kasan_test
, "I", "");
1382 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fail
,
1383 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1384 0, 1, sysctl_kasan_test
, "I", "");