2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <kern/kalloc.h>
41 #include <kern/zalloc.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <libkern/libkern.h>
47 #include <libkern/OSAtomic.h>
48 #include <libkern/kernel_mach_header.h>
49 #include <sys/queue.h>
50 #include <sys/sysctl.h>
51 #include <kern/thread.h>
52 #include <machine/atomic.h>
55 #include <kasan_internal.h>
56 #include <memintrinsics.h>
63 const uintptr_t __asan_shadow_memory_dynamic_address
= KASAN_SHIFT
;
65 static long kexts_loaded
;
67 long shadow_pages_total
;
68 long shadow_pages_used
;
70 vm_offset_t kernel_vbase
;
71 vm_offset_t kernel_vtop
;
73 static bool kasan_initialized
;
74 static int kasan_enabled
;
75 static int quarantine_enabled
= 1;
77 static void kasan_crash_report(uptr p
, uptr width
, unsigned access_type
);
78 extern vm_offset_t
ml_stack_base(void);
79 extern vm_size_t
ml_stack_size(void);
81 #define ABI_UNSUPPORTED do { panic("KASan: unsupported ABI: %s\n", __func__); } while (0)
83 #define BACKTRACE_MAXFRAMES 16
85 decl_simple_lock_data(, kasan_vm_lock
);
87 _Atomic
int unsafe_count
= 0;
90 kasan_unsafe_start(void)
92 if (__c11_atomic_fetch_add(&unsafe_count
, 1, memory_order_relaxed
) == 128) {
93 panic("kasan_unsafe_start overflow");
98 kasan_unsafe_end(void)
100 if (__c11_atomic_fetch_sub(&unsafe_count
, 1, memory_order_relaxed
) == 0) {
101 panic("kasan_unsafe_end underflow");
106 kasan_in_unsafe(void)
108 return atomic_load_explicit(&unsafe_count
, memory_order_relaxed
) != 0;
112 * kasan is called from the interrupt path, so we need to disable interrupts to
113 * ensure atomicity manipulating the global objects
116 kasan_lock(boolean_t
*b
)
118 *b
= ml_set_interrupts_enabled(false);
119 simple_lock(&kasan_vm_lock
);
123 kasan_unlock(boolean_t b
)
125 simple_unlock(&kasan_vm_lock
);
126 ml_set_interrupts_enabled(b
);
130 * poison redzones in the shadow map
133 kasan_poison(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
, uint8_t flags
)
135 uint8_t *shadow
= SHADOW_FOR_ADDRESS(base
);
136 uint8_t partial
= size
& 0x07;
137 vm_size_t total
= leftrz
+ size
+ rightrz
;
140 /* base must be 8-byte aligned */
141 /* any left redzone must be a multiple of 8 */
142 /* total region must cover 8-byte multiple */
143 assert((base
& 0x07) == 0);
144 assert((leftrz
& 0x07) == 0);
145 assert((total
& 0x07) == 0);
147 if (!kasan_enabled
|| !kasan_initialized
) {
155 uint8_t l_flags
= flags
;
156 uint8_t r_flags
= flags
;
158 if (flags
== ASAN_STACK_RZ
) {
159 l_flags
= ASAN_STACK_LEFT_RZ
;
160 r_flags
= ASAN_STACK_RIGHT_RZ
;
161 } else if (flags
== ASAN_HEAP_RZ
) {
162 l_flags
= ASAN_HEAP_LEFT_RZ
;
163 r_flags
= ASAN_HEAP_RIGHT_RZ
;
167 * poison the redzones and unpoison the valid bytes
169 for (; i
< leftrz
; i
++) {
172 for (; i
< leftrz
+ size
; i
++) {
173 shadow
[i
] = ASAN_VALID
; /* not strictly necessary */
175 if (partial
&& (i
< total
)) {
179 for (; i
< total
; i
++) {
183 asm volatile("" ::: "memory"); /* compiler barrier XXX: is this needed? */
187 kasan_poison_range(vm_offset_t base
, vm_size_t size
, uint8_t flags
)
189 /* base must be 8-byte aligned */
190 /* total region must cover 8-byte multiple */
191 assert((base
& 0x07) == 0);
192 assert((size
& 0x07) == 0);
193 kasan_poison(base
, 0, 0, size
, flags
);
197 kasan_unpoison(void *base
, vm_size_t size
)
199 kasan_poison((vm_offset_t
)base
, size
, 0, 0, 0);
203 kasan_unpoison_stack(vm_offset_t base
, vm_size_t size
)
207 kasan_unpoison((void *)base
, size
);
211 * write junk into the redzones
214 kasan_rz_clobber(vm_offset_t base
, vm_size_t size
, vm_size_t leftrz
, vm_size_t rightrz
)
218 const uint8_t deadbeef
[] = { 0xde, 0xad, 0xbe, 0xef };
219 const uint8_t c0ffee
[] = { 0xc0, 0xff, 0xee, 0xc0 };
220 uint8_t *buf
= (uint8_t *)base
;
222 /* base must be 8-byte aligned */
223 /* any left redzone must be a multiple of 8 */
224 /* total region must cover 8-byte multiple */
225 assert((base
& 0x07) == 0);
226 assert((leftrz
& 0x07) == 0);
227 assert(((size
+ leftrz
+ rightrz
) & 0x07) == 0);
229 for (i
= 0; i
< leftrz
; i
++) {
230 buf
[i
] = deadbeef
[i
% 4];
233 for (i
= 0; i
< rightrz
; i
++) {
234 buf
[i
+ size
+ leftrz
] = c0ffee
[i
% 4];
245 kasan_check_range(const void *x
, size_t sz
, unsigned access_type
)
249 if (kasan_in_unsafe()) {
253 if (kasan_range_poisoned((vm_offset_t
)x
, sz
, &invalid
)) {
254 if (kasan_is_blacklisted(access_type
)) {
257 kasan_crash_report(invalid
, sz
, access_type
);
263 * Check that [base, base+sz) has shadow value `shadow'
264 * If not, report a KASan-violation on `addr'
267 kasan_assert_shadow(vm_address_t base
, vm_size_t sz
, vm_address_t addr
, uint8_t shadow
)
269 sz
-= 8 - (base
% 8);
270 base
+= 8 - (base
% 8);
272 vm_address_t end
= base
+ sz
;
275 uint8_t *sh
= SHADOW_FOR_ADDRESS(base
);
277 __asan_report_load1(addr
);
285 * KASAN violation reporting
290 access_type_str(unsigned type
)
292 if (type
& TYPE_LOAD_ALL
) {
294 } else if (type
& TYPE_STORE_ALL
) {
296 } else if (type
& TYPE_FREE
) {
303 static const char *shadow_strings
[] = {
304 [ASAN_VALID
] = "VALID",
305 [ASAN_PARTIAL1
] = "PARTIAL1",
306 [ASAN_PARTIAL2
] = "PARTIAL2",
307 [ASAN_PARTIAL3
] = "PARTIAL3",
308 [ASAN_PARTIAL4
] = "PARTIAL4",
309 [ASAN_PARTIAL5
] = "PARTIAL5",
310 [ASAN_PARTIAL6
] = "PARTIAL6",
311 [ASAN_PARTIAL7
] = "PARTIAL7",
312 [ASAN_STACK_RZ
] = "<invalid>",
313 [ASAN_STACK_LEFT_RZ
] = "STACK_LEFT_RZ",
314 [ASAN_STACK_MID_RZ
] = "STACK_MID_RZ",
315 [ASAN_STACK_RIGHT_RZ
] = "STACK_RIGHT_RZ",
316 [ASAN_STACK_FREED
] = "STACK_FREED",
317 [ASAN_GLOBAL_RZ
] = "GLOBAL_RZ",
318 [ASAN_HEAP_RZ
] = "<invalid>",
319 [ASAN_HEAP_LEFT_RZ
] = "HEAP_LEFT_RZ",
320 [ASAN_HEAP_RIGHT_RZ
] = "HEAP_RIGHT_RZ",
321 [ASAN_HEAP_FREED
] = "HEAP_FREED",
322 [0xff] = "<invalid>",
325 #define CRASH_CONTEXT_BEFORE 5
326 #define CRASH_CONTEXT_AFTER 5
329 kasan_shadow_crashlog(uptr p
, char *buf
, size_t len
)
333 int before
= CRASH_CONTEXT_BEFORE
;
334 int after
= CRASH_CONTEXT_AFTER
;
336 uptr shadow
= (uptr
)SHADOW_FOR_ADDRESS(p
);
337 uptr shadow_p
= shadow
;
339 /* rewind to start of context block */
340 shadow
&= ~((uptr
)0xf);
341 shadow
-= 16 * before
;
343 for (i
= 0; i
< 1 + before
+ after
; i
++, shadow
+= 16) {
344 if (vm_map_round_page(shadow
, PAGE_MASK
) != vm_map_round_page(shadow_p
, PAGE_MASK
)) {
345 /* don't cross a page boundary, in case the shadow is unmapped */
346 /* XXX: ideally we check instead of ignore */
350 l
+= snprintf(buf
+l
, len
-l
, " %#16lx: ", shadow
);
352 for (j
= 0; j
< 16; j
++) {
353 uint8_t *x
= (uint8_t *)(shadow
+ j
);
354 l
+= snprintf(buf
+l
, len
-l
, "%02x ", (unsigned)*x
);
356 l
+= snprintf(buf
+l
, len
-l
, "\n");
359 l
+= snprintf(buf
+l
, len
-l
, "\n");
364 kasan_crash_report(uptr p
, uptr width
, unsigned access_type
)
366 const size_t len
= 4096;
367 static char buf
[len
];
370 uint8_t *shadow_ptr
= SHADOW_FOR_ADDRESS(p
);
371 uint8_t shadow_type
= *shadow_ptr
;
372 const char *shadow_str
= shadow_strings
[shadow_type
];
377 l
+= snprintf(buf
+l
, len
-l
,
378 "KASan: invalid %lu-byte %s @ %#lx [%s]\n"
379 "Shadow %#02x @ %#lx\n\n",
380 width
, access_type_str(access_type
), p
, shadow_str
,
381 (unsigned)shadow_type
, (unsigned long)shadow_ptr
);
383 l
+= kasan_shadow_crashlog(p
, buf
+l
, len
-l
);
388 #define REPORT_DECLARE(n) \
389 void __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD); } \
390 void __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE); } \
391 void __asan_report_exp_load##n(uptr, int32_t); \
392 void __asan_report_exp_store##n(uptr, int32_t); \
393 void __asan_report_exp_load##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; } \
394 void __asan_report_exp_store##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; }
402 void __asan_report_load_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_LOAD
); }
403 void __asan_report_store_n(uptr p
, unsigned long sz
) { kasan_crash_report(p
, sz
, TYPE_STORE
); }
405 /* unpoison the current stack */
406 /* XXX: as an optimization, we could unpoison only up to the current stack depth */
408 kasan_unpoison_curstack(void)
410 kasan_unpoison_stack(ml_stack_base(), ml_stack_size());
414 __asan_handle_no_return(void)
416 kasan_unpoison_curstack();
417 kasan_unpoison_fakestack(current_thread());
421 kasan_range_poisoned(vm_offset_t base
, vm_size_t size
, vm_offset_t
*first_invalid
)
426 if (!kasan_initialized
|| !kasan_enabled
) {
431 base
&= ~(vm_offset_t
)0x07;
433 shadow
= SHADOW_FOR_ADDRESS(base
);
434 vm_size_t limit
= (size
+ 7) / 8;
436 /* XXX: to make debugging easier, catch unmapped shadow here */
438 for (i
= 0; i
< limit
; i
++, size
-= 8) {
440 uint8_t s
= shadow
[i
];
441 if (s
== 0 || (size
< 8 && s
>= size
&& s
<= 7)) {
452 /* XXX: calculate the exact first byte that failed */
453 *first_invalid
= base
+ i
*8;
459 kasan_init_globals(vm_offset_t base
, vm_size_t size
)
461 struct asan_global
*glob
= (struct asan_global
*)base
;
462 struct asan_global
*glob_end
= (struct asan_global
*)(base
+ size
);
463 for (; glob
< glob_end
; glob
++) {
464 /* handle one global */
465 kasan_poison(glob
->addr
, glob
->size
, 0, glob
->size_with_redzone
- glob
->size
, ASAN_GLOBAL_RZ
);
470 kasan_load_kext(vm_offset_t base
, vm_size_t __unused size
, const void *bundleid
)
472 unsigned long sectsz
;
475 /* find the kasan globals segment/section */
476 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
478 kasan_init_globals((vm_address_t
)sect
, (vm_size_t
)sectsz
);
482 #if KASAN_DYNAMIC_BLACKLIST
483 kasan_dybl_load_kext(base
, bundleid
);
488 kasan_unload_kext(vm_offset_t base
, vm_size_t size
)
490 unsigned long sectsz
;
493 /* find the kasan globals segment/section */
494 sect
= getsectdatafromheader((void *)base
, KASAN_GLOBAL_SEGNAME
, KASAN_GLOBAL_SECTNAME
, §sz
);
496 kasan_unpoison((void *)base
, size
);
500 #if KASAN_DYNAMIC_BLACKLIST
501 kasan_dybl_unload_kext(base
);
508 __asan_option_detect_stack_use_after_return
= 0;
513 kasan_init_xnu_globals(void)
515 const char *seg
= KASAN_GLOBAL_SEGNAME
;
516 const char *sect
= KASAN_GLOBAL_SECTNAME
;
520 kernel_mach_header_t
*header
= (kernel_mach_header_t
*)&_mh_execute_header
;
523 printf("KASAN: failed to find kernel mach header\n");
524 printf("KASAN: redzones for globals not poisoned\n");
528 globals
= (vm_offset_t
)getsectdatafromheader(header
, seg
, sect
, &_size
);
530 printf("KASAN: failed to find segment %s section %s\n", seg
, sect
);
531 printf("KASAN: redzones for globals not poisoned\n");
534 size
= (vm_size_t
)_size
;
536 printf("KASAN: found (%s,%s) at %#lx + %lu\n", seg
, sect
, globals
, size
);
537 printf("KASAN: poisoning redzone for %lu globals\n", size
/ sizeof(struct asan_global
));
539 kasan_init_globals(globals
, size
);
543 kasan_late_init(void)
545 kasan_init_fakestack();
546 kasan_init_xnu_globals();
548 #if KASAN_DYNAMIC_BLACKLIST
554 kasan_notify_stolen(vm_offset_t top
)
556 kasan_map_shadow(kernel_vtop
, top
- kernel_vtop
, false);
560 kasan_debug_touch_mappings(vm_offset_t base
, vm_size_t sz
)
566 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
567 for (i
= 0; i
< sz
; i
+= sizeof(uint64_t)) {
568 vm_offset_t addr
= base
+ i
;
569 uint8_t *x
= SHADOW_FOR_ADDRESS(addr
);
571 asm volatile("" ::: "memory");
573 asm volatile("" ::: "memory");
574 assert(tmp1
== tmp2
);
585 simple_lock_init(&kasan_vm_lock
, 0);
587 /* Map all of the kernel text and data */
588 kasan_map_shadow(kernel_vbase
, kernel_vtop
- kernel_vbase
, false);
592 kasan_initialized
= 1;
597 kasan_notify_address_internal(vm_offset_t address
, vm_size_t size
, bool is_zero
)
599 assert(address
< VM_MAX_KERNEL_ADDRESS
);
601 if (!kasan_initialized
|| !kasan_enabled
) {
605 if (address
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
606 /* only map kernel addresses */
617 kasan_map_shadow(address
, size
, is_zero
);
619 kasan_debug_touch_mappings(address
, size
);
623 kasan_notify_address(vm_offset_t address
, vm_size_t size
)
625 kasan_notify_address_internal(address
, size
, false);
629 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
632 kasan_notify_address_nopoison(vm_offset_t address
, vm_size_t size
)
634 kasan_notify_address_internal(address
, size
, true);
643 struct kasan_alloc_header
{
648 uint32_t left_rz
: 28;
652 _Static_assert(sizeof(struct kasan_alloc_header
) <= KASAN_GUARD_SIZE
, "kasan alloc header exceeds guard size");
654 struct kasan_alloc_footer
{
655 uint32_t backtrace
[0];
657 _Static_assert(sizeof(struct kasan_alloc_footer
) <= KASAN_GUARD_SIZE
, "kasan alloc footer exceeds guard size");
659 #define MAGIC_XOR ((uint32_t)0xA110C8ED)
661 magic_for_addr(vm_offset_t addr
)
663 return (uint32_t)addr
^ MAGIC_XOR
;
666 static struct kasan_alloc_header
*
667 header_for_user_addr(vm_offset_t addr
)
669 return (void *)(addr
- sizeof(struct kasan_alloc_header
));
672 static struct kasan_alloc_footer
*
673 footer_for_user_addr(vm_offset_t addr
, vm_size_t
*size
)
675 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
676 vm_size_t rightrz
= h
->alloc_size
- h
->user_size
- h
->left_rz
;
678 return (void *)(addr
+ h
->user_size
);
682 * size: user-requested allocation size
683 * ret: minimum size for the real allocation
686 kasan_alloc_resize(vm_size_t size
)
689 if (os_add_overflow(size
, 4 * PAGE_SIZE
, &tmp
)) {
690 panic("allocation size overflow (%lu)", size
);
693 /* add left and right redzones */
694 size
+= KASAN_GUARD_PAD
;
696 /* ensure the final allocation is an 8-byte multiple */
697 size
+= 8 - (size
% 8);
702 extern vm_offset_t vm_kernel_slid_base
;
705 kasan_alloc_bt(uint32_t *ptr
, vm_size_t sz
, vm_size_t skip
)
707 uintptr_t buf
[BACKTRACE_MAXFRAMES
];
710 sz
/= sizeof(uint32_t);
711 vm_size_t frames
= sz
;
714 frames
= min(frames
+ skip
, BACKTRACE_MAXFRAMES
);
715 frames
= backtrace(bt
, frames
);
717 while (frames
> sz
&& skip
> 0) {
723 /* only store the offset from kernel base, and cram that into 32
725 for (vm_size_t i
= 0; i
< frames
; i
++) {
726 ptr
[i
] = (uint32_t)(bt
[i
] - vm_kernel_slid_base
);
733 * addr: base address of full allocation (including redzones)
734 * size: total size of allocation (include redzones)
735 * req: user-requested allocation size
736 * lrz: size of the left redzone in bytes
737 * ret: address of usable allocation
740 kasan_alloc(vm_offset_t addr
, vm_size_t size
, vm_size_t req
, vm_size_t leftrz
)
746 assert((addr
% 8) == 0);
747 assert((size
% 8) == 0);
749 vm_size_t rightrz
= size
- req
- leftrz
;
751 kasan_poison(addr
, req
, leftrz
, rightrz
, ASAN_HEAP_RZ
);
752 kasan_rz_clobber(addr
, req
, leftrz
, rightrz
);
756 /* stash the allocation sizes in the left redzone */
757 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
758 h
->magic
= magic_for_addr(addr
);
760 h
->alloc_size
= size
;
763 /* ... and a backtrace in the right redzone */
765 struct kasan_alloc_footer
*f
= footer_for_user_addr(addr
, &fsize
);
766 h
->frames
= kasan_alloc_bt(f
->backtrace
, fsize
, 2);
773 * size: returns full original allocation size
774 * ret: original allocation ptr
777 kasan_dealloc(vm_offset_t addr
, vm_size_t
*size
)
779 assert(size
&& addr
);
780 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
781 if (h
->magic
!= magic_for_addr(addr
)) {
782 /* no point blacklisting here - this is fatal */
783 kasan_crash_report(addr
, *size
, TYPE_FREE
);
785 *size
= h
->alloc_size
;
786 return addr
- h
->left_rz
;
790 * return the original user-requested allocation size
791 * addr: user alloc pointer
794 kasan_user_size(vm_offset_t addr
)
796 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
797 assert(h
->magic
== magic_for_addr(addr
));
802 * Verify that `addr' (user pointer) is a valid allocation of `type'
805 kasan_check_free(vm_offset_t addr
, vm_size_t size
, unsigned heap_type
)
807 struct kasan_alloc_header
*h
= header_for_user_addr(addr
);
809 /* map heap type to an internal access type */
811 if (heap_type
== KASAN_HEAP_KALLOC
) {
813 } else if (heap_type
== KASAN_HEAP_ZALLOC
) {
815 } else if (heap_type
== KASAN_HEAP_FAKESTACK
) {
819 /* check the magic matches */
820 if (h
->magic
!= magic_for_addr(addr
)) {
821 if (kasan_is_blacklisted(type
)) {
824 kasan_crash_report(addr
, size
, type
);
827 /* check the freed size matches what we recorded at alloc time */
828 if (h
->user_size
!= size
) {
829 if (kasan_is_blacklisted(type
)) {
832 kasan_crash_report(addr
, size
, type
);
835 vm_size_t rightrz_sz
= h
->alloc_size
- h
->left_rz
- h
->user_size
;
837 /* Check that the redzones are valid */
838 kasan_assert_shadow(addr
- h
->left_rz
, h
->left_rz
, addr
, ASAN_HEAP_LEFT_RZ
);
839 kasan_assert_shadow(addr
+ h
->user_size
, rightrz_sz
, addr
, ASAN_HEAP_RIGHT_RZ
);
841 /* Check the allocated range is not poisoned */
842 kasan_check_range((void *)addr
, size
, type
);
851 struct freelist_entry
{
854 STAILQ_ENTRY(freelist_entry
) list
;
858 vm_size_t user_size
: 28;
859 vm_size_t frames
: 4; /* number of frames in backtrace */
860 vm_size_t __unused
: 4;
865 uint32_t backtrace
[];
867 _Static_assert(sizeof(struct freelist_entry
) <= KASAN_GUARD_PAD
, "kasan freelist header exceeds padded size");
869 #define FREELIST_MAGIC_XOR ((uint32_t)0xF23333D)
871 freelist_magic(vm_offset_t addr
)
873 return (uint32_t)addr
^ FREELIST_MAGIC_XOR
;
877 STAILQ_HEAD(freelist_head
, freelist_entry
) freelist
;
878 unsigned long entries
;
879 unsigned long max_entries
;
884 struct quarantine quarantines
[] = {
885 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_ZALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
886 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_KALLOC
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
},
887 { STAILQ_HEAD_INITIALIZER((quarantines
[KASAN_HEAP_FAKESTACK
].freelist
)), 0, QUARANTINE_ENTRIES
, 0, QUARANTINE_MAXSIZE
}
891 * addr, sizep: pointer/size of full allocation including redzone
894 kasan_free_internal(void **addrp
, vm_size_t
*sizep
, int type
,
895 zone_t
*zone
, vm_size_t user_size
, int locked
,
898 vm_size_t size
= *sizep
;
899 vm_offset_t addr
= *(vm_offset_t
*)addrp
;
901 assert(type
>= 0 && type
< KASAN_HEAP_TYPES
);
902 if (type
== KASAN_HEAP_KALLOC
) {
903 /* zero-size kalloc allocations are allowed */
905 } else if (type
== KASAN_HEAP_ZALLOC
) {
906 assert(zone
&& user_size
);
907 } else if (type
== KASAN_HEAP_FAKESTACK
) {
908 assert(zone
&& user_size
);
911 /* clobber the entire freed region */
912 kasan_rz_clobber(addr
, 0, size
, 0);
914 if (!doquarantine
|| !quarantine_enabled
) {
918 /* poison the entire freed region */
919 uint8_t flags
= (type
== KASAN_HEAP_FAKESTACK
) ? ASAN_STACK_FREED
: ASAN_HEAP_FREED
;
920 kasan_poison(addr
, 0, size
, 0, flags
);
922 struct freelist_entry
*fle
, *tofree
= NULL
;
923 struct quarantine
*q
= &quarantines
[type
];
924 assert(size
>= sizeof(struct freelist_entry
));
926 /* create a new freelist entry */
927 fle
= (struct freelist_entry
*)addr
;
928 fle
->magic
= freelist_magic((vm_offset_t
)fle
);
930 fle
->user_size
= user_size
;
932 fle
->zone
= ZONE_NULL
;
936 if (type
!= KASAN_HEAP_FAKESTACK
) {
937 fle
->frames
= kasan_alloc_bt(fle
->backtrace
, fle
->size
- sizeof(struct freelist_entry
), 3);
945 if (q
->size
+ size
> q
->max_size
) {
947 * Adding this entry would put us over the max quarantine size. Free the
948 * larger of the current object and the quarantine head object.
950 tofree
= STAILQ_FIRST(&q
->freelist
);
951 if (fle
->size
> tofree
->size
) {
952 goto free_current_locked
;
956 STAILQ_INSERT_TAIL(&q
->freelist
, fle
, list
);
960 /* free the oldest entry, if necessary */
961 if (tofree
|| q
->entries
> q
->max_entries
) {
962 tofree
= STAILQ_FIRST(&q
->freelist
);
963 STAILQ_REMOVE_HEAD(&q
->freelist
, list
);
965 assert(q
->entries
> 0 && q
->size
>= tofree
->size
);
967 q
->size
-= tofree
->size
;
969 if (type
!= KASAN_HEAP_KALLOC
) {
970 assert((vm_offset_t
)zone
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
&&
971 (vm_offset_t
)zone
<= VM_MAX_KERNEL_ADDRESS
);
972 *zone
= tofree
->zone
;
976 addr
= (vm_offset_t
)tofree
;
977 if (tofree
->magic
!= freelist_magic(addr
)) {
978 kasan_crash_report(addr
, size
, TYPE_FREE
);
981 /* clobber the quarantine header */
982 kasan_rz_clobber(addr
, 0, sizeof(struct freelist_entry
), 0);
985 /* quarantine is not full - don't really free anything */
995 *addrp
= (void *)addr
;
997 kasan_unpoison((void *)addr
, size
);
1003 kasan_free(void **addrp
, vm_size_t
*sizep
, int type
, zone_t
*zone
,
1004 vm_size_t user_size
, bool quarantine
)
1006 kasan_free_internal(addrp
, sizep
, type
, zone
, user_size
, 0, quarantine
);
1010 __asan_load_cxx_array_cookie(uptr
*p
)
1012 uint8_t *shadow
= SHADOW_FOR_ADDRESS((uptr
)p
);
1013 if (*shadow
== ASAN_ARRAY_COOKIE
) {
1015 } else if (*shadow
== ASAN_HEAP_FREED
) {
1023 __asan_poison_cxx_array_cookie(uptr p
)
1025 uint8_t *shadow
= SHADOW_FOR_ADDRESS(p
);
1026 *shadow
= ASAN_ARRAY_COOKIE
;
1029 #define ACCESS_CHECK_DECLARE(type, sz, access_type) \
1030 void __asan_##type##sz(uptr addr) { \
1031 kasan_check_range((const void *)addr, sz, access_type); \
1033 void __asan_exp_##type##sz(uptr, int32_t); \
1034 void __asan_exp_##type##sz(uptr __unused addr, int32_t __unused e) { ABI_UNSUPPORTED; }
1036 ACCESS_CHECK_DECLARE(load
, 1, TYPE_LOAD
);
1037 ACCESS_CHECK_DECLARE(load
, 2, TYPE_LOAD
);
1038 ACCESS_CHECK_DECLARE(load
, 4, TYPE_LOAD
);
1039 ACCESS_CHECK_DECLARE(load
, 8, TYPE_LOAD
);
1040 ACCESS_CHECK_DECLARE(load
, 16, TYPE_LOAD
);
1041 ACCESS_CHECK_DECLARE(store
, 1, TYPE_STORE
);
1042 ACCESS_CHECK_DECLARE(store
, 2, TYPE_STORE
);
1043 ACCESS_CHECK_DECLARE(store
, 4, TYPE_STORE
);
1044 ACCESS_CHECK_DECLARE(store
, 8, TYPE_STORE
);
1045 ACCESS_CHECK_DECLARE(store
, 16, TYPE_STORE
);
1048 __asan_loadN(uptr addr
, size_t sz
)
1050 kasan_check_range((const void *)addr
, sz
, TYPE_LOAD
);
1054 __asan_storeN(uptr addr
, size_t sz
)
1056 kasan_check_range((const void *)addr
, sz
, TYPE_STORE
);
1059 void __asan_exp_loadN(uptr
, size_t, int32_t);
1060 void __asan_exp_storeN(uptr
, size_t, int32_t);
1061 void __asan_exp_loadN(uptr __unused addr
, size_t __unused sz
, int32_t __unused e
) { ABI_UNSUPPORTED
; }
1062 void __asan_exp_storeN(uptr __unused addr
, size_t __unused sz
, int32_t __unused e
) { ABI_UNSUPPORTED
; }
1064 void __asan_report_exp_load_n(uptr
, unsigned long, int32_t);
1065 void __asan_report_exp_store_n(uptr
, unsigned long, int32_t);
1066 void __asan_report_exp_load_n(uptr __unused p
, unsigned long __unused sz
, int32_t __unused e
) { ABI_UNSUPPORTED
; }
1067 void __asan_report_exp_store_n(uptr __unused p
, unsigned long __unused sz
, int32_t __unused e
) { ABI_UNSUPPORTED
; }
1070 kasan_set_shadow(uptr addr
, size_t sz
, uint8_t val
)
1072 __nosan_memset((void *)addr
, val
, sz
);
1075 #define SET_SHADOW_DECLARE(val) \
1076 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1077 kasan_set_shadow(addr, sz, 0x##val); \
1080 SET_SHADOW_DECLARE(00)
1081 SET_SHADOW_DECLARE(f1
)
1082 SET_SHADOW_DECLARE(f2
)
1083 SET_SHADOW_DECLARE(f3
)
1084 SET_SHADOW_DECLARE(f5
)
1085 SET_SHADOW_DECLARE(f8
)
1088 * XXX: implement these
1091 void __asan_alloca_poison(uptr addr
, uptr size
)
1097 void __asan_allocas_unpoison(uptr top
, uptr bottom
)
1104 __sanitizer_ptr_sub(uptr a
, uptr b
)
1111 __sanitizer_ptr_cmp(uptr a
, uptr b
)
1118 __asan_poison_stack_memory(uptr addr
, size_t size
)
1125 __asan_unpoison_stack_memory(uptr addr
, size_t size
)
1132 __sanitizer_annotate_contiguous_container(const void *beg
,
1134 const void *old_mid
,
1135 const void *new_mid
)
1151 #define VERSION_DECLARE(v) \
1152 void __asan_version_mismatch_check_##v(void); \
1153 void __asan_version_mismatch_check_##v(void) {}
1156 VERSION_DECLARE(apple_802
)
1157 VERSION_DECLARE(apple_900
)
1160 __asan_register_globals(uptr __unused a
, uptr __unused b
)
1166 __asan_unregister_globals(uptr __unused a
, uptr __unused b
)
1172 __asan_register_image_globals(uptr __unused ptr
)
1177 __asan_unregister_image_globals(uptr __unused ptr
)
1182 __asan_init_v5(void)
1187 __asan_before_dynamic_init(uptr __unused arg
)
1192 __asan_after_dynamic_init(void)
1204 sysctl_kasan_test(__unused
struct sysctl_oid
*oidp
, __unused
void *arg1
, int arg2
, struct sysctl_req
*req
)
1209 err
= sysctl_io_number(req
, 0, sizeof(int), &mask
, &ch
);
1212 kasan_test(mask
, arg2
);
1219 SYSCTL_NODE(_kern
, OID_AUTO
, kasan
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "");
1221 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, available
, CTLFLAG_RD
, NULL
, KASAN
, "");
1222 SYSCTL_INT(_kern_kasan
, OID_AUTO
, enabled
, CTLFLAG_RD
, &kasan_enabled
, 0, "");
1223 SYSCTL_INT(_kern_kasan
, OID_AUTO
, quarantine
, CTLFLAG_RW
, &quarantine_enabled
, 0, "");
1224 SYSCTL_LONG(_kern_kasan
, OID_AUTO
, memused
, CTLFLAG_RD
, &shadow_pages_used
, "");
1225 SYSCTL_LONG(_kern_kasan
, OID_AUTO
, memtotal
, CTLFLAG_RD
, &shadow_pages_total
, "");
1226 SYSCTL_LONG(_kern_kasan
, OID_AUTO
, kexts
, CTLFLAG_RD
, &kexts_loaded
, "");
1228 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, debug
, CTLFLAG_RD
, NULL
, KASAN_DEBUG
, "");
1229 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, zalloc
, CTLFLAG_RD
, NULL
, KASAN_ZALLOC
, "");
1230 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, kalloc
, CTLFLAG_RD
, NULL
, KASAN_KALLOC
, "");
1231 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, fakestack
, CTLFLAG_RD
, NULL
, FAKESTACK
, "");
1232 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, dynamicbl
, CTLFLAG_RD
, NULL
, KASAN_DYNAMIC_BLACKLIST
, "");
1233 SYSCTL_COMPAT_INT(_kern_kasan
, OID_AUTO
, memintrinsics
, CTLFLAG_RD
, NULL
, MEMINTRINSICS
, "");
1235 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, test
,
1236 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1237 0, 0, sysctl_kasan_test
, "I", "");
1239 SYSCTL_PROC(_kern_kasan
, OID_AUTO
, fail
,
1240 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1241 0, 1, sysctl_kasan_test
, "I", "");