]> git.saurik.com Git - apple/xnu.git/blob - san/kasan.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / san / kasan.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <kern/kalloc.h>
41 #include <kern/zalloc.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <libkern/libkern.h>
47 #include <libkern/OSAtomic.h>
48 #include <libkern/kernel_mach_header.h>
49 #include <sys/queue.h>
50 #include <sys/sysctl.h>
51 #include <kern/thread.h>
52 #include <machine/atomic.h>
53
54 #include <kasan.h>
55 #include <kasan_internal.h>
56 #include <memintrinsics.h>
57
58 #if !KASAN_DEBUG
59 # undef NOINLINE
60 # define NOINLINE
61 #endif
62
63 const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_SHIFT;
64
65 static long kexts_loaded;
66
67 long shadow_pages_total;
68 long shadow_pages_used;
69
70 vm_offset_t kernel_vbase;
71 vm_offset_t kernel_vtop;
72
73 static bool kasan_initialized;
74 static int kasan_enabled;
75 static int quarantine_enabled = 1;
76
77 static void kasan_crash_report(uptr p, uptr width, unsigned access_type);
78 extern vm_offset_t ml_stack_base(void);
79 extern vm_size_t ml_stack_size(void);
80
81 #define ABI_UNSUPPORTED do { panic("KASan: unsupported ABI: %s\n", __func__); } while (0)
82
83 #define BACKTRACE_MAXFRAMES 16
84
85 decl_simple_lock_data(, kasan_vm_lock);
86
87 _Atomic int unsafe_count = 0;
88
89 void
90 kasan_unsafe_start(void)
91 {
92 if (__c11_atomic_fetch_add(&unsafe_count, 1, memory_order_relaxed) == 128) {
93 panic("kasan_unsafe_start overflow");
94 }
95 }
96
97 void
98 kasan_unsafe_end(void)
99 {
100 if (__c11_atomic_fetch_sub(&unsafe_count, 1, memory_order_relaxed) == 0) {
101 panic("kasan_unsafe_end underflow");
102 }
103 }
104
105 static bool
106 kasan_in_unsafe(void)
107 {
108 return atomic_load_explicit(&unsafe_count, memory_order_relaxed) != 0;
109 }
110
111 /*
112 * kasan is called from the interrupt path, so we need to disable interrupts to
113 * ensure atomicity manipulating the global objects
114 */
115 void
116 kasan_lock(boolean_t *b)
117 {
118 *b = ml_set_interrupts_enabled(false);
119 simple_lock(&kasan_vm_lock);
120 }
121
122 void
123 kasan_unlock(boolean_t b)
124 {
125 simple_unlock(&kasan_vm_lock);
126 ml_set_interrupts_enabled(b);
127 }
128
129 /*
130 * poison redzones in the shadow map
131 */
132 void NOINLINE
133 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz, uint8_t flags)
134 {
135 uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
136 uint8_t partial = size & 0x07;
137 vm_size_t total = leftrz + size + rightrz;
138 vm_size_t i = 0;
139
140 /* base must be 8-byte aligned */
141 /* any left redzone must be a multiple of 8 */
142 /* total region must cover 8-byte multiple */
143 assert((base & 0x07) == 0);
144 assert((leftrz & 0x07) == 0);
145 assert((total & 0x07) == 0);
146
147 if (!kasan_enabled || !kasan_initialized) {
148 return;
149 }
150
151 leftrz /= 8;
152 size /= 8;
153 total /= 8;
154
155 uint8_t l_flags = flags;
156 uint8_t r_flags = flags;
157
158 if (flags == ASAN_STACK_RZ) {
159 l_flags = ASAN_STACK_LEFT_RZ;
160 r_flags = ASAN_STACK_RIGHT_RZ;
161 } else if (flags == ASAN_HEAP_RZ) {
162 l_flags = ASAN_HEAP_LEFT_RZ;
163 r_flags = ASAN_HEAP_RIGHT_RZ;
164 }
165
166 /*
167 * poison the redzones and unpoison the valid bytes
168 */
169 for (; i < leftrz; i++) {
170 shadow[i] = l_flags;
171 }
172 for (; i < leftrz + size; i++) {
173 shadow[i] = ASAN_VALID; /* not strictly necessary */
174 }
175 if (partial && (i < total)) {
176 shadow[i] = partial;
177 i++;
178 }
179 for (; i < total; i++) {
180 shadow[i] = r_flags;
181 }
182
183 asm volatile("" ::: "memory"); /* compiler barrier XXX: is this needed? */
184 }
185
186 void
187 kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags)
188 {
189 /* base must be 8-byte aligned */
190 /* total region must cover 8-byte multiple */
191 assert((base & 0x07) == 0);
192 assert((size & 0x07) == 0);
193 kasan_poison(base, 0, 0, size, flags);
194 }
195
196 void NOINLINE
197 kasan_unpoison(void *base, vm_size_t size)
198 {
199 kasan_poison((vm_offset_t)base, size, 0, 0, 0);
200 }
201
202 void NOINLINE
203 kasan_unpoison_stack(vm_offset_t base, vm_size_t size)
204 {
205 assert(base);
206 assert(size);
207 kasan_unpoison((void *)base, size);
208 }
209
210 /*
211 * write junk into the redzones
212 */
213 static void NOINLINE
214 kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
215 {
216 #if KASAN_DEBUG
217 vm_size_t i;
218 const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef };
219 const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 };
220 uint8_t *buf = (uint8_t *)base;
221
222 /* base must be 8-byte aligned */
223 /* any left redzone must be a multiple of 8 */
224 /* total region must cover 8-byte multiple */
225 assert((base & 0x07) == 0);
226 assert((leftrz & 0x07) == 0);
227 assert(((size + leftrz + rightrz) & 0x07) == 0);
228
229 for (i = 0; i < leftrz; i++) {
230 buf[i] = deadbeef[i % 4];
231 }
232
233 for (i = 0; i < rightrz; i++) {
234 buf[i + size + leftrz] = c0ffee[i % 4];
235 }
236 #else
237 (void)base;
238 (void)size;
239 (void)leftrz;
240 (void)rightrz;
241 #endif
242 }
243
244 void NOINLINE
245 kasan_check_range(const void *x, size_t sz, unsigned access_type)
246 {
247 vm_offset_t invalid;
248
249 if (kasan_in_unsafe()) {
250 return;
251 }
252
253 if (kasan_range_poisoned((vm_offset_t)x, sz, &invalid)) {
254 if (kasan_is_blacklisted(access_type)) {
255 return;
256 }
257 kasan_crash_report(invalid, sz, access_type);
258 /* NOTREACHED */
259 }
260 }
261
262 /*
263 * Check that [base, base+sz) has shadow value `shadow'
264 * If not, report a KASan-violation on `addr'
265 */
266 static void
267 kasan_assert_shadow(vm_address_t base, vm_size_t sz, vm_address_t addr, uint8_t shadow)
268 {
269 sz -= 8 - (base % 8);
270 base += 8 - (base % 8);
271
272 vm_address_t end = base + sz;
273
274 while (base < end) {
275 uint8_t *sh = SHADOW_FOR_ADDRESS(base);
276 if (*sh != shadow) {
277 __asan_report_load1(addr);
278 }
279 base += 8;
280 }
281 }
282
283 /*
284 *
285 * KASAN violation reporting
286 *
287 */
288
289 static const char *
290 access_type_str(unsigned type)
291 {
292 if (type & TYPE_LOAD_ALL) {
293 return "load";
294 } else if (type & TYPE_STORE_ALL) {
295 return "store";
296 } else if (type & TYPE_FREE) {
297 return "free";
298 } else {
299 return "access";
300 }
301 }
302
303 static const char *shadow_strings[] = {
304 [ASAN_VALID] = "VALID",
305 [ASAN_PARTIAL1] = "PARTIAL1",
306 [ASAN_PARTIAL2] = "PARTIAL2",
307 [ASAN_PARTIAL3] = "PARTIAL3",
308 [ASAN_PARTIAL4] = "PARTIAL4",
309 [ASAN_PARTIAL5] = "PARTIAL5",
310 [ASAN_PARTIAL6] = "PARTIAL6",
311 [ASAN_PARTIAL7] = "PARTIAL7",
312 [ASAN_STACK_RZ] = "<invalid>",
313 [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
314 [ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
315 [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
316 [ASAN_STACK_FREED] = "STACK_FREED",
317 [ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
318 [ASAN_HEAP_RZ] = "<invalid>",
319 [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
320 [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
321 [ASAN_HEAP_FREED] = "HEAP_FREED",
322 [0xff] = "<invalid>",
323 };
324
325 #define CRASH_CONTEXT_BEFORE 5
326 #define CRASH_CONTEXT_AFTER 5
327
328 static size_t
329 kasan_shadow_crashlog(uptr p, char *buf, size_t len)
330 {
331 int i,j;
332 size_t l = 0;
333 int before = CRASH_CONTEXT_BEFORE;
334 int after = CRASH_CONTEXT_AFTER;
335
336 uptr shadow = (uptr)SHADOW_FOR_ADDRESS(p);
337 uptr shadow_p = shadow;
338
339 /* rewind to start of context block */
340 shadow &= ~((uptr)0xf);
341 shadow -= 16 * before;
342
343 for (i = 0; i < 1 + before + after; i++, shadow += 16) {
344 if (vm_map_round_page(shadow, PAGE_MASK) != vm_map_round_page(shadow_p, PAGE_MASK)) {
345 /* don't cross a page boundary, in case the shadow is unmapped */
346 /* XXX: ideally we check instead of ignore */
347 continue;
348 }
349
350 l += snprintf(buf+l, len-l, " %#16lx: ", shadow);
351
352 for (j = 0; j < 16; j++) {
353 uint8_t *x = (uint8_t *)(shadow + j);
354 l += snprintf(buf+l, len-l, "%02x ", (unsigned)*x);
355 }
356 l += snprintf(buf+l, len-l, "\n");
357 }
358
359 l += snprintf(buf+l, len-l, "\n");
360 return l;
361 }
362
363 static void NOINLINE
364 kasan_crash_report(uptr p, uptr width, unsigned access_type)
365 {
366 const size_t len = 4096;
367 static char buf[len];
368 size_t l = 0;
369
370 uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
371 uint8_t shadow_type = *shadow_ptr;
372 const char *shadow_str = shadow_strings[shadow_type];
373
374 kasan_handle_test();
375
376 buf[0] = '\0';
377 l += snprintf(buf+l, len-l,
378 "KASan: invalid %lu-byte %s @ %#lx [%s]\n"
379 "Shadow %#02x @ %#lx\n\n",
380 width, access_type_str(access_type), p, shadow_str,
381 (unsigned)shadow_type, (unsigned long)shadow_ptr);
382
383 l += kasan_shadow_crashlog(p, buf+l, len-l);
384
385 panic("%s", buf);
386 }
387
388 #define REPORT_DECLARE(n) \
389 void __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD); } \
390 void __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE); } \
391 void __asan_report_exp_load##n(uptr, int32_t); \
392 void __asan_report_exp_store##n(uptr, int32_t); \
393 void __asan_report_exp_load##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; } \
394 void __asan_report_exp_store##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; }
395
396 REPORT_DECLARE(1)
397 REPORT_DECLARE(2)
398 REPORT_DECLARE(4)
399 REPORT_DECLARE(8)
400 REPORT_DECLARE(16)
401
402 void __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD); }
403 void __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE); }
404
405 /* unpoison the current stack */
406 /* XXX: as an optimization, we could unpoison only up to the current stack depth */
407 void NOINLINE
408 kasan_unpoison_curstack(void)
409 {
410 kasan_unpoison_stack(ml_stack_base(), ml_stack_size());
411 }
412
413 void NOINLINE
414 __asan_handle_no_return(void)
415 {
416 kasan_unpoison_curstack();
417 kasan_unpoison_fakestack(current_thread());
418 }
419
420 bool NOINLINE
421 kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
422 {
423 uint8_t *shadow;
424 vm_size_t i;
425
426 if (!kasan_initialized || !kasan_enabled) {
427 return false;
428 }
429
430 size += base & 0x07;
431 base &= ~(vm_offset_t)0x07;
432
433 shadow = SHADOW_FOR_ADDRESS(base);
434 vm_size_t limit = (size + 7) / 8;
435
436 /* XXX: to make debugging easier, catch unmapped shadow here */
437
438 for (i = 0; i < limit; i++, size -= 8) {
439 assert(size > 0);
440 uint8_t s = shadow[i];
441 if (s == 0 || (size < 8 && s >= size && s <= 7)) {
442 /* valid */
443 } else {
444 goto fail;
445 }
446 }
447
448 return false;
449
450 fail:
451 if (first_invalid) {
452 /* XXX: calculate the exact first byte that failed */
453 *first_invalid = base + i*8;
454 }
455 return true;
456 }
457
458 static void NOINLINE
459 kasan_init_globals(vm_offset_t base, vm_size_t size)
460 {
461 struct asan_global *glob = (struct asan_global *)base;
462 struct asan_global *glob_end = (struct asan_global *)(base + size);
463 for (; glob < glob_end; glob++) {
464 /* handle one global */
465 kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
466 }
467 }
468
469 void NOINLINE
470 kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid)
471 {
472 unsigned long sectsz;
473 void *sect;
474
475 /* find the kasan globals segment/section */
476 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
477 if (sect) {
478 kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz);
479 kexts_loaded++;
480 }
481
482 #if KASAN_DYNAMIC_BLACKLIST
483 kasan_dybl_load_kext(base, bundleid);
484 #endif
485 }
486
487 void NOINLINE
488 kasan_unload_kext(vm_offset_t base, vm_size_t size)
489 {
490 unsigned long sectsz;
491 void *sect;
492
493 /* find the kasan globals segment/section */
494 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
495 if (sect) {
496 kasan_unpoison((void *)base, size);
497 kexts_loaded--;
498 }
499
500 #if KASAN_DYNAMIC_BLACKLIST
501 kasan_dybl_unload_kext(base);
502 #endif
503 }
504
505 void NOINLINE
506 kasan_disable(void)
507 {
508 __asan_option_detect_stack_use_after_return = 0;
509 kasan_enabled = 0;
510 }
511
512 static void NOINLINE
513 kasan_init_xnu_globals(void)
514 {
515 const char *seg = KASAN_GLOBAL_SEGNAME;
516 const char *sect = KASAN_GLOBAL_SECTNAME;
517 unsigned long _size;
518 vm_offset_t globals;
519 vm_size_t size;
520 kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header;
521
522 if (!header) {
523 printf("KASAN: failed to find kernel mach header\n");
524 printf("KASAN: redzones for globals not poisoned\n");
525 return;
526 }
527
528 globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size);
529 if (!globals) {
530 printf("KASAN: failed to find segment %s section %s\n", seg, sect);
531 printf("KASAN: redzones for globals not poisoned\n");
532 return;
533 }
534 size = (vm_size_t)_size;
535
536 printf("KASAN: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
537 printf("KASAN: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global));
538
539 kasan_init_globals(globals, size);
540 }
541
542 void NOINLINE
543 kasan_late_init(void)
544 {
545 kasan_init_fakestack();
546 kasan_init_xnu_globals();
547
548 #if KASAN_DYNAMIC_BLACKLIST
549 kasan_init_dybl();
550 #endif
551 }
552
553 void NOINLINE
554 kasan_notify_stolen(vm_offset_t top)
555 {
556 kasan_map_shadow(kernel_vtop, top - kernel_vtop, false);
557 }
558
559 static void NOINLINE
560 kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz)
561 {
562 #if KASAN_DEBUG
563 vm_size_t i;
564 uint8_t tmp1, tmp2;
565
566 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
567 for (i = 0; i < sz; i += sizeof(uint64_t)) {
568 vm_offset_t addr = base + i;
569 uint8_t *x = SHADOW_FOR_ADDRESS(addr);
570 tmp1 = *x;
571 asm volatile("" ::: "memory");
572 tmp2 = *x;
573 asm volatile("" ::: "memory");
574 assert(tmp1 == tmp2);
575 }
576 #else
577 (void)base;
578 (void)sz;
579 #endif
580 }
581
582 void NOINLINE
583 kasan_init(void)
584 {
585 simple_lock_init(&kasan_vm_lock, 0);
586
587 /* Map all of the kernel text and data */
588 kasan_map_shadow(kernel_vbase, kernel_vtop - kernel_vbase, false);
589
590 kasan_arch_init();
591
592 kasan_initialized = 1;
593 kasan_enabled = 1;
594 }
595
596 static void NOINLINE
597 kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool is_zero)
598 {
599 assert(address < VM_MAX_KERNEL_ADDRESS);
600
601 if (!kasan_initialized || !kasan_enabled) {
602 return;
603 }
604
605 if (address < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
606 /* only map kernel addresses */
607 return;
608 }
609
610 if (!size) {
611 /* nothing to map */
612 return;
613 }
614
615 boolean_t flags;
616 kasan_lock(&flags);
617 kasan_map_shadow(address, size, is_zero);
618 kasan_unlock(flags);
619 kasan_debug_touch_mappings(address, size);
620 }
621
622 void
623 kasan_notify_address(vm_offset_t address, vm_size_t size)
624 {
625 kasan_notify_address_internal(address, size, false);
626 }
627
628 /*
629 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
630 */
631 void
632 kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size)
633 {
634 kasan_notify_address_internal(address, size, true);
635 }
636
637 /*
638 *
639 * allocator hooks
640 *
641 */
642
643 struct kasan_alloc_header {
644 uint32_t magic;
645 uint32_t alloc_size;
646 uint32_t user_size;
647 struct {
648 uint32_t left_rz : 28;
649 uint32_t frames : 4;
650 };
651 };
652 _Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
653
654 struct kasan_alloc_footer {
655 uint32_t backtrace[0];
656 };
657 _Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
658
659 #define MAGIC_XOR ((uint32_t)0xA110C8ED)
660 static uint32_t
661 magic_for_addr(vm_offset_t addr)
662 {
663 return (uint32_t)addr ^ MAGIC_XOR;
664 }
665
666 static struct kasan_alloc_header *
667 header_for_user_addr(vm_offset_t addr)
668 {
669 return (void *)(addr - sizeof(struct kasan_alloc_header));
670 }
671
672 static struct kasan_alloc_footer *
673 footer_for_user_addr(vm_offset_t addr, vm_size_t *size)
674 {
675 struct kasan_alloc_header *h = header_for_user_addr(addr);
676 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
677 *size = rightrz;
678 return (void *)(addr + h->user_size);
679 }
680
681 /*
682 * size: user-requested allocation size
683 * ret: minimum size for the real allocation
684 */
685 vm_size_t
686 kasan_alloc_resize(vm_size_t size)
687 {
688 vm_size_t tmp;
689 if (os_add_overflow(size, 4 * PAGE_SIZE, &tmp)) {
690 panic("allocation size overflow (%lu)", size);
691 }
692
693 /* add left and right redzones */
694 size += KASAN_GUARD_PAD;
695
696 /* ensure the final allocation is an 8-byte multiple */
697 size += 8 - (size % 8);
698
699 return size;
700 }
701
702 extern vm_offset_t vm_kernel_slid_base;
703
704 static vm_size_t
705 kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip)
706 {
707 uintptr_t buf[BACKTRACE_MAXFRAMES];
708 uintptr_t *bt = buf;
709
710 sz /= sizeof(uint32_t);
711 vm_size_t frames = sz;
712
713 if (frames > 0) {
714 frames = min(frames + skip, BACKTRACE_MAXFRAMES);
715 frames = backtrace(bt, frames);
716
717 while (frames > sz && skip > 0) {
718 bt++;
719 frames--;
720 skip--;
721 }
722
723 /* only store the offset from kernel base, and cram that into 32
724 * bits */
725 for (vm_size_t i = 0; i < frames; i++) {
726 ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base);
727 }
728 }
729 return frames;
730 }
731
732 /*
733 * addr: base address of full allocation (including redzones)
734 * size: total size of allocation (include redzones)
735 * req: user-requested allocation size
736 * lrz: size of the left redzone in bytes
737 * ret: address of usable allocation
738 */
739 vm_address_t
740 kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
741 {
742 if (!addr) {
743 return 0;
744 }
745 assert(size > 0);
746 assert((addr % 8) == 0);
747 assert((size % 8) == 0);
748
749 vm_size_t rightrz = size - req - leftrz;
750
751 kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ);
752 kasan_rz_clobber(addr, req, leftrz, rightrz);
753
754 addr += leftrz;
755
756 /* stash the allocation sizes in the left redzone */
757 struct kasan_alloc_header *h = header_for_user_addr(addr);
758 h->magic = magic_for_addr(addr);
759 h->left_rz = leftrz;
760 h->alloc_size = size;
761 h->user_size = req;
762
763 /* ... and a backtrace in the right redzone */
764 vm_size_t fsize;
765 struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
766 h->frames = kasan_alloc_bt(f->backtrace, fsize, 2);
767
768 return addr;
769 }
770
771 /*
772 * addr: user pointer
773 * size: returns full original allocation size
774 * ret: original allocation ptr
775 */
776 vm_address_t
777 kasan_dealloc(vm_offset_t addr, vm_size_t *size)
778 {
779 assert(size && addr);
780 struct kasan_alloc_header *h = header_for_user_addr(addr);
781 if (h->magic != magic_for_addr(addr)) {
782 /* no point blacklisting here - this is fatal */
783 kasan_crash_report(addr, *size, TYPE_FREE);
784 }
785 *size = h->alloc_size;
786 return addr - h->left_rz;
787 }
788
789 /*
790 * return the original user-requested allocation size
791 * addr: user alloc pointer
792 */
793 vm_size_t
794 kasan_user_size(vm_offset_t addr)
795 {
796 struct kasan_alloc_header *h = header_for_user_addr(addr);
797 assert(h->magic == magic_for_addr(addr));
798 return h->user_size;
799 }
800
801 /*
802 * Verify that `addr' (user pointer) is a valid allocation of `type'
803 */
804 void
805 kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type)
806 {
807 struct kasan_alloc_header *h = header_for_user_addr(addr);
808
809 /* map heap type to an internal access type */
810 unsigned type;
811 if (heap_type == KASAN_HEAP_KALLOC) {
812 type = TYPE_KFREE;
813 } else if (heap_type == KASAN_HEAP_ZALLOC) {
814 type = TYPE_ZFREE;
815 } else if (heap_type == KASAN_HEAP_FAKESTACK) {
816 type = TYPE_FSFREE;
817 }
818
819 /* check the magic matches */
820 if (h->magic != magic_for_addr(addr)) {
821 if (kasan_is_blacklisted(type)) {
822 return;
823 }
824 kasan_crash_report(addr, size, type);
825 }
826
827 /* check the freed size matches what we recorded at alloc time */
828 if (h->user_size != size) {
829 if (kasan_is_blacklisted(type)) {
830 return;
831 }
832 kasan_crash_report(addr, size, type);
833 }
834
835 vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
836
837 /* Check that the redzones are valid */
838 kasan_assert_shadow(addr - h->left_rz, h->left_rz, addr, ASAN_HEAP_LEFT_RZ);
839 kasan_assert_shadow(addr + h->user_size, rightrz_sz, addr, ASAN_HEAP_RIGHT_RZ);
840
841 /* Check the allocated range is not poisoned */
842 kasan_check_range((void *)addr, size, type);
843 }
844
845 /*
846 *
847 * Quarantine
848 *
849 */
850
851 struct freelist_entry {
852 uint32_t magic;
853 uint32_t checksum;
854 STAILQ_ENTRY(freelist_entry) list;
855 union {
856 struct {
857 vm_size_t size : 28;
858 vm_size_t user_size : 28;
859 vm_size_t frames : 4; /* number of frames in backtrace */
860 vm_size_t __unused : 4;
861 };
862 uint64_t bits;
863 };
864 zone_t zone;
865 uint32_t backtrace[];
866 };
867 _Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
868
869 #define FREELIST_MAGIC_XOR ((uint32_t)0xF23333D)
870 static uint32_t
871 freelist_magic(vm_offset_t addr)
872 {
873 return (uint32_t)addr ^ FREELIST_MAGIC_XOR;
874 }
875
876 struct quarantine {
877 STAILQ_HEAD(freelist_head, freelist_entry) freelist;
878 unsigned long entries;
879 unsigned long max_entries;
880 vm_size_t size;
881 vm_size_t max_size;
882 };
883
884 struct quarantine quarantines[] = {
885 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
886 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
887 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
888 };
889
890 /*
891 * addr, sizep: pointer/size of full allocation including redzone
892 */
893 void NOINLINE
894 kasan_free_internal(void **addrp, vm_size_t *sizep, int type,
895 zone_t *zone, vm_size_t user_size, int locked,
896 bool doquarantine)
897 {
898 vm_size_t size = *sizep;
899 vm_offset_t addr = *(vm_offset_t *)addrp;
900
901 assert(type >= 0 && type < KASAN_HEAP_TYPES);
902 if (type == KASAN_HEAP_KALLOC) {
903 /* zero-size kalloc allocations are allowed */
904 assert(!zone);
905 } else if (type == KASAN_HEAP_ZALLOC) {
906 assert(zone && user_size);
907 } else if (type == KASAN_HEAP_FAKESTACK) {
908 assert(zone && user_size);
909 }
910
911 /* clobber the entire freed region */
912 kasan_rz_clobber(addr, 0, size, 0);
913
914 if (!doquarantine || !quarantine_enabled) {
915 goto free_current;
916 }
917
918 /* poison the entire freed region */
919 uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED;
920 kasan_poison(addr, 0, size, 0, flags);
921
922 struct freelist_entry *fle, *tofree = NULL;
923 struct quarantine *q = &quarantines[type];
924 assert(size >= sizeof(struct freelist_entry));
925
926 /* create a new freelist entry */
927 fle = (struct freelist_entry *)addr;
928 fle->magic = freelist_magic((vm_offset_t)fle);
929 fle->size = size;
930 fle->user_size = user_size;
931 fle->frames = 0;
932 fle->zone = ZONE_NULL;
933 if (zone) {
934 fle->zone = *zone;
935 }
936 if (type != KASAN_HEAP_FAKESTACK) {
937 fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
938 }
939
940 boolean_t flg;
941 if (!locked) {
942 kasan_lock(&flg);
943 }
944
945 if (q->size + size > q->max_size) {
946 /*
947 * Adding this entry would put us over the max quarantine size. Free the
948 * larger of the current object and the quarantine head object.
949 */
950 tofree = STAILQ_FIRST(&q->freelist);
951 if (fle->size > tofree->size) {
952 goto free_current_locked;
953 }
954 }
955
956 STAILQ_INSERT_TAIL(&q->freelist, fle, list);
957 q->entries++;
958 q->size += size;
959
960 /* free the oldest entry, if necessary */
961 if (tofree || q->entries > q->max_entries) {
962 tofree = STAILQ_FIRST(&q->freelist);
963 STAILQ_REMOVE_HEAD(&q->freelist, list);
964
965 assert(q->entries > 0 && q->size >= tofree->size);
966 q->entries--;
967 q->size -= tofree->size;
968
969 if (type != KASAN_HEAP_KALLOC) {
970 assert((vm_offset_t)zone >= VM_MIN_KERNEL_AND_KEXT_ADDRESS &&
971 (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS);
972 *zone = tofree->zone;
973 }
974
975 size = tofree->size;
976 addr = (vm_offset_t)tofree;
977 if (tofree->magic != freelist_magic(addr)) {
978 kasan_crash_report(addr, size, TYPE_FREE);
979 }
980
981 /* clobber the quarantine header */
982 kasan_rz_clobber(addr, 0, sizeof(struct freelist_entry), 0);
983
984 } else {
985 /* quarantine is not full - don't really free anything */
986 addr = 0;
987 }
988
989 free_current_locked:
990 if (!locked) {
991 kasan_unlock(flg);
992 }
993
994 free_current:
995 *addrp = (void *)addr;
996 if (addr) {
997 kasan_unpoison((void *)addr, size);
998 *sizep = size;
999 }
1000 }
1001
1002 void NOINLINE
1003 kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone,
1004 vm_size_t user_size, bool quarantine)
1005 {
1006 kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine);
1007 }
1008
1009 uptr
1010 __asan_load_cxx_array_cookie(uptr *p)
1011 {
1012 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)p);
1013 if (*shadow == ASAN_ARRAY_COOKIE) {
1014 return *p;
1015 } else if (*shadow == ASAN_HEAP_FREED) {
1016 return 0;
1017 } else {
1018 return *p;
1019 }
1020 }
1021
1022 void
1023 __asan_poison_cxx_array_cookie(uptr p)
1024 {
1025 uint8_t *shadow = SHADOW_FOR_ADDRESS(p);
1026 *shadow = ASAN_ARRAY_COOKIE;
1027 }
1028
1029 #define ACCESS_CHECK_DECLARE(type, sz, access_type) \
1030 void __asan_##type##sz(uptr addr) { \
1031 kasan_check_range((const void *)addr, sz, access_type); \
1032 } \
1033 void __asan_exp_##type##sz(uptr, int32_t); \
1034 void __asan_exp_##type##sz(uptr __unused addr, int32_t __unused e) { ABI_UNSUPPORTED; }
1035
1036 ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD);
1037 ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD);
1038 ACCESS_CHECK_DECLARE(load, 4, TYPE_LOAD);
1039 ACCESS_CHECK_DECLARE(load, 8, TYPE_LOAD);
1040 ACCESS_CHECK_DECLARE(load, 16, TYPE_LOAD);
1041 ACCESS_CHECK_DECLARE(store, 1, TYPE_STORE);
1042 ACCESS_CHECK_DECLARE(store, 2, TYPE_STORE);
1043 ACCESS_CHECK_DECLARE(store, 4, TYPE_STORE);
1044 ACCESS_CHECK_DECLARE(store, 8, TYPE_STORE);
1045 ACCESS_CHECK_DECLARE(store, 16, TYPE_STORE);
1046
1047 void
1048 __asan_loadN(uptr addr, size_t sz)
1049 {
1050 kasan_check_range((const void *)addr, sz, TYPE_LOAD);
1051 }
1052
1053 void
1054 __asan_storeN(uptr addr, size_t sz)
1055 {
1056 kasan_check_range((const void *)addr, sz, TYPE_STORE);
1057 }
1058
1059 void __asan_exp_loadN(uptr, size_t, int32_t);
1060 void __asan_exp_storeN(uptr, size_t, int32_t);
1061 void __asan_exp_loadN(uptr __unused addr, size_t __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
1062 void __asan_exp_storeN(uptr __unused addr, size_t __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
1063
1064 void __asan_report_exp_load_n(uptr, unsigned long, int32_t);
1065 void __asan_report_exp_store_n(uptr, unsigned long, int32_t);
1066 void __asan_report_exp_load_n(uptr __unused p, unsigned long __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
1067 void __asan_report_exp_store_n(uptr __unused p, unsigned long __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
1068
1069 static void
1070 kasan_set_shadow(uptr addr, size_t sz, uint8_t val)
1071 {
1072 __nosan_memset((void *)addr, val, sz);
1073 }
1074
1075 #define SET_SHADOW_DECLARE(val) \
1076 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1077 kasan_set_shadow(addr, sz, 0x##val); \
1078 }
1079
1080 SET_SHADOW_DECLARE(00)
1081 SET_SHADOW_DECLARE(f1)
1082 SET_SHADOW_DECLARE(f2)
1083 SET_SHADOW_DECLARE(f3)
1084 SET_SHADOW_DECLARE(f5)
1085 SET_SHADOW_DECLARE(f8)
1086
1087 /*
1088 * XXX: implement these
1089 */
1090
1091 void __asan_alloca_poison(uptr addr, uptr size)
1092 {
1093 (void)addr;
1094 (void)size;
1095 }
1096
1097 void __asan_allocas_unpoison(uptr top, uptr bottom)
1098 {
1099 (void)top;
1100 (void)bottom;
1101 }
1102
1103 void
1104 __sanitizer_ptr_sub(uptr a, uptr b)
1105 {
1106 (void)a;
1107 (void)b;
1108 }
1109
1110 void
1111 __sanitizer_ptr_cmp(uptr a, uptr b)
1112 {
1113 (void)a;
1114 (void)b;
1115 }
1116
1117 void
1118 __asan_poison_stack_memory(uptr addr, size_t size)
1119 {
1120 (void)addr;
1121 (void)size;
1122 }
1123
1124 void
1125 __asan_unpoison_stack_memory(uptr addr, size_t size)
1126 {
1127 (void)addr;
1128 (void)size;
1129 }
1130
1131 void
1132 __sanitizer_annotate_contiguous_container(const void *beg,
1133 const void *end,
1134 const void *old_mid,
1135 const void *new_mid)
1136 {
1137 (void)beg;
1138 (void)end;
1139 (void)old_mid;
1140 (void)new_mid;
1141 }
1142
1143 /*
1144 */
1145
1146 void
1147 __asan_init(void)
1148 {
1149 }
1150
1151 #define VERSION_DECLARE(v) \
1152 void __asan_version_mismatch_check_##v(void); \
1153 void __asan_version_mismatch_check_##v(void) {}
1154
1155 VERSION_DECLARE(v8)
1156 VERSION_DECLARE(apple_802)
1157 VERSION_DECLARE(apple_900)
1158
1159 void
1160 __asan_register_globals(uptr __unused a, uptr __unused b)
1161 {
1162 ABI_UNSUPPORTED;
1163 }
1164
1165 void
1166 __asan_unregister_globals(uptr __unused a, uptr __unused b)
1167 {
1168 ABI_UNSUPPORTED;
1169 }
1170
1171 void
1172 __asan_register_image_globals(uptr __unused ptr)
1173 {
1174 }
1175
1176 void
1177 __asan_unregister_image_globals(uptr __unused ptr)
1178 {
1179 }
1180
1181 void
1182 __asan_init_v5(void)
1183 {
1184 }
1185
1186 void
1187 __asan_before_dynamic_init(uptr __unused arg)
1188 {
1189 }
1190
1191 void
1192 __asan_after_dynamic_init(void)
1193 {
1194 }
1195
1196
1197 /*
1198 *
1199 * SYSCTL
1200 *
1201 */
1202
1203 static int
1204 sysctl_kasan_test(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req)
1205 {
1206 int mask = 0;
1207 int ch;
1208 int err;
1209 err = sysctl_io_number(req, 0, sizeof(int), &mask, &ch);
1210
1211 if (!err && mask) {
1212 kasan_test(mask, arg2);
1213 }
1214
1215 return err;
1216 }
1217
1218 SYSCTL_DECL(kasan);
1219 SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
1220
1221 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, "");
1222 SYSCTL_INT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
1223 SYSCTL_INT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
1224 SYSCTL_LONG(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, "");
1225 SYSCTL_LONG(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, "");
1226 SYSCTL_LONG(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, "");
1227
1228 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
1229 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, "");
1230 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, "");
1231 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, fakestack, CTLFLAG_RD, NULL, FAKESTACK, "");
1232 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, "");
1233 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, memintrinsics, CTLFLAG_RD, NULL, MEMINTRINSICS, "");
1234
1235 SYSCTL_PROC(_kern_kasan, OID_AUTO, test,
1236 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1237 0, 0, sysctl_kasan_test, "I", "");
1238
1239 SYSCTL_PROC(_kern_kasan, OID_AUTO, fail,
1240 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1241 0, 1, sysctl_kasan_test, "I", "");