]> git.saurik.com Git - apple/xnu.git/blame - san/kasan.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / san / kasan.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <string.h>
30#include <stdint.h>
31#include <stdbool.h>
32#include <vm/vm_map.h>
33#include <kern/assert.h>
34#include <kern/cpu_data.h>
35#include <kern/backtrace.h>
36#include <machine/machine_routines.h>
37#include <kern/locks.h>
38#include <kern/simple_lock.h>
39#include <kern/debug.h>
40#include <kern/kalloc.h>
41#include <kern/zalloc.h>
42#include <mach/mach_vm.h>
43#include <mach/mach_types.h>
44#include <mach/vm_param.h>
45#include <mach/machine/vm_param.h>
cb323159 46#include <mach/sdt.h>
5ba3f43e
A
47#include <libkern/libkern.h>
48#include <libkern/OSAtomic.h>
49#include <libkern/kernel_mach_header.h>
50#include <sys/queue.h>
51#include <sys/sysctl.h>
52#include <kern/thread.h>
53#include <machine/atomic.h>
54
55#include <kasan.h>
56#include <kasan_internal.h>
57#include <memintrinsics.h>
58
5ba3f43e
A
59const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_SHIFT;
60
a39ff7e2
A
61static unsigned kexts_loaded;
62unsigned shadow_pages_total;
63unsigned shadow_pages_used;
5ba3f43e
A
64
65vm_offset_t kernel_vbase;
66vm_offset_t kernel_vtop;
67
a39ff7e2
A
68static unsigned kasan_enabled;
69static unsigned quarantine_enabled;
cb323159
A
70static unsigned enabled_checks = TYPE_ALL & ~TYPE_LEAK; /* bitmask of enabled checks */
71static unsigned report_ignored; /* issue non-fatal report for disabled/blacklisted checks */
72static unsigned free_yield = 0; /* ms yield after each free */
73static unsigned leak_threshold = 3; /* threshold for uninitialized memory leak detection */
74static unsigned leak_fatal_threshold = 0; /* threshold for treating leaks as fatal errors (0 means never) */
a39ff7e2
A
75
76/* forward decls */
77static void kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason);
78static void kasan_log_report(uptr p, uptr width, access_t access, violation_t reason);
5ba3f43e 79
a39ff7e2 80/* imported osfmk functions */
5ba3f43e
A
81extern vm_offset_t ml_stack_base(void);
82extern vm_size_t ml_stack_size(void);
83
a39ff7e2
A
84/*
85 * unused: expected to be called, but (currently) does nothing
86 */
87#define UNUSED_ABI(func, ...) \
88 _Pragma("clang diagnostic push") \
89 _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
90 void func(__VA_ARGS__); \
91 void func(__VA_ARGS__) {}; \
92 _Pragma("clang diagnostic pop") \
5ba3f43e 93
a39ff7e2
A
94static const size_t BACKTRACE_BITS = 4;
95static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1;
5ba3f43e 96
cb323159
A
97static vm_size_t kasan_alloc_retrieve_bt(vm_address_t addr, uintptr_t frames[static BACKTRACE_MAXFRAMES]);
98
5ba3f43e 99decl_simple_lock_data(, kasan_vm_lock);
a39ff7e2 100static thread_t kasan_lock_holder;
5ba3f43e
A
101
102/*
103 * kasan is called from the interrupt path, so we need to disable interrupts to
104 * ensure atomicity manipulating the global objects
105 */
106void
107kasan_lock(boolean_t *b)
108{
109 *b = ml_set_interrupts_enabled(false);
0a7de745 110 simple_lock(&kasan_vm_lock, LCK_GRP_NULL);
a39ff7e2 111 kasan_lock_holder = current_thread();
5ba3f43e
A
112}
113
114void
115kasan_unlock(boolean_t b)
116{
a39ff7e2 117 kasan_lock_holder = THREAD_NULL;
5ba3f43e
A
118 simple_unlock(&kasan_vm_lock);
119 ml_set_interrupts_enabled(b);
120}
121
a39ff7e2
A
122/* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
123 * thread */
124bool
125kasan_lock_held(thread_t thread)
126{
127 return thread && thread == kasan_lock_holder;
128}
129
130static inline bool
131kasan_check_enabled(access_t access)
132{
133 return kasan_enabled && (enabled_checks & access) && !kasan_is_blacklisted(access);
134}
135
136static inline bool
137kasan_poison_active(uint8_t flags)
138{
139 switch (flags) {
140 case ASAN_GLOBAL_RZ:
141 return kasan_check_enabled(TYPE_POISON_GLOBAL);
142 case ASAN_HEAP_RZ:
143 case ASAN_HEAP_LEFT_RZ:
144 case ASAN_HEAP_RIGHT_RZ:
145 case ASAN_HEAP_FREED:
146 return kasan_check_enabled(TYPE_POISON_HEAP);
147 default:
148 return true;
149 };
150}
151
5ba3f43e
A
152/*
153 * poison redzones in the shadow map
154 */
155void NOINLINE
156kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz, uint8_t flags)
157{
158 uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
159 uint8_t partial = size & 0x07;
160 vm_size_t total = leftrz + size + rightrz;
161 vm_size_t i = 0;
162
163 /* base must be 8-byte aligned */
164 /* any left redzone must be a multiple of 8 */
165 /* total region must cover 8-byte multiple */
166 assert((base & 0x07) == 0);
167 assert((leftrz & 0x07) == 0);
168 assert((total & 0x07) == 0);
169
a39ff7e2 170 if (!kasan_enabled || !kasan_poison_active(flags)) {
5ba3f43e
A
171 return;
172 }
173
174 leftrz /= 8;
175 size /= 8;
176 total /= 8;
177
178 uint8_t l_flags = flags;
179 uint8_t r_flags = flags;
180
181 if (flags == ASAN_STACK_RZ) {
182 l_flags = ASAN_STACK_LEFT_RZ;
183 r_flags = ASAN_STACK_RIGHT_RZ;
184 } else if (flags == ASAN_HEAP_RZ) {
185 l_flags = ASAN_HEAP_LEFT_RZ;
186 r_flags = ASAN_HEAP_RIGHT_RZ;
187 }
188
189 /*
190 * poison the redzones and unpoison the valid bytes
191 */
192 for (; i < leftrz; i++) {
193 shadow[i] = l_flags;
194 }
195 for (; i < leftrz + size; i++) {
a39ff7e2 196 shadow[i] = ASAN_VALID; /* XXX: should not be necessary */
5ba3f43e
A
197 }
198 if (partial && (i < total)) {
199 shadow[i] = partial;
200 i++;
201 }
202 for (; i < total; i++) {
203 shadow[i] = r_flags;
204 }
5ba3f43e
A
205}
206
207void
208kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags)
209{
210 /* base must be 8-byte aligned */
211 /* total region must cover 8-byte multiple */
212 assert((base & 0x07) == 0);
213 assert((size & 0x07) == 0);
214 kasan_poison(base, 0, 0, size, flags);
215}
216
217void NOINLINE
218kasan_unpoison(void *base, vm_size_t size)
219{
220 kasan_poison((vm_offset_t)base, size, 0, 0, 0);
221}
222
223void NOINLINE
224kasan_unpoison_stack(vm_offset_t base, vm_size_t size)
225{
226 assert(base);
227 assert(size);
a39ff7e2
A
228
229 /* align base and size to 8 bytes */
230 vm_offset_t align = base & 0x7;
231 base -= align;
232 size += align;
233 size = (size + 7) & ~0x7;
234
5ba3f43e
A
235 kasan_unpoison((void *)base, size);
236}
237
238/*
239 * write junk into the redzones
a39ff7e2 240 */
5ba3f43e
A
241static void NOINLINE
242kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
243{
244#if KASAN_DEBUG
245 vm_size_t i;
246 const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef };
247 const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 };
248 uint8_t *buf = (uint8_t *)base;
249
250 /* base must be 8-byte aligned */
251 /* any left redzone must be a multiple of 8 */
252 /* total region must cover 8-byte multiple */
253 assert((base & 0x07) == 0);
254 assert((leftrz & 0x07) == 0);
255 assert(((size + leftrz + rightrz) & 0x07) == 0);
256
257 for (i = 0; i < leftrz; i++) {
258 buf[i] = deadbeef[i % 4];
259 }
260
261 for (i = 0; i < rightrz; i++) {
262 buf[i + size + leftrz] = c0ffee[i % 4];
263 }
264#else
265 (void)base;
266 (void)size;
267 (void)leftrz;
268 (void)rightrz;
269#endif
270}
271
a39ff7e2
A
272/*
273 * Report a violation that may be disabled and/or blacklisted. This can only be
274 * called for dynamic checks (i.e. where the fault is recoverable). Use
275 * kasan_crash_report() for static (unrecoverable) violations.
276 *
277 * access: what we were trying to do when the violation occured
278 * reason: what failed about the access
279 */
280static void
281kasan_violation(uintptr_t addr, size_t size, access_t access, violation_t reason)
5ba3f43e 282{
a39ff7e2
A
283 assert(__builtin_popcount(access) == 1);
284 if (!kasan_check_enabled(access)) {
285 if (report_ignored) {
286 kasan_log_report(addr, size, access, reason);
287 }
5ba3f43e
A
288 return;
289 }
a39ff7e2
A
290 kasan_crash_report(addr, size, access, reason);
291}
5ba3f43e 292
a39ff7e2
A
293void NOINLINE
294kasan_check_range(const void *x, size_t sz, access_t access)
295{
296 uintptr_t invalid;
297 uintptr_t ptr = (uintptr_t)x;
298 if (kasan_range_poisoned(ptr, sz, &invalid)) {
299 size_t remaining = sz - (invalid - ptr);
300 kasan_violation(invalid, remaining, access, 0);
5ba3f43e
A
301 }
302}
303
304/*
a39ff7e2 305 * Return true if [base, base+sz) is unpoisoned or has given shadow value.
5ba3f43e 306 */
d9a64523 307bool
a39ff7e2 308kasan_check_shadow(vm_address_t base, vm_size_t sz, uint8_t shadow)
5ba3f43e
A
309{
310 sz -= 8 - (base % 8);
311 base += 8 - (base % 8);
312
313 vm_address_t end = base + sz;
314
315 while (base < end) {
316 uint8_t *sh = SHADOW_FOR_ADDRESS(base);
a39ff7e2
A
317 if (*sh && *sh != shadow) {
318 return false;
5ba3f43e
A
319 }
320 base += 8;
321 }
a39ff7e2 322 return true;
5ba3f43e
A
323}
324
cb323159
A
325static void
326kasan_report_leak(vm_address_t base, vm_size_t sz, vm_offset_t offset, vm_size_t leak_sz)
327{
328 if (leak_fatal_threshold > leak_threshold && leak_sz >= leak_fatal_threshold){
329 kasan_violation(base + offset, leak_sz, TYPE_LEAK, REASON_UNINITIALIZED);
330 }
331
332 char string_rep[BACKTRACE_MAXFRAMES * 20] = {};
333 vm_offset_t stack_base = dtrace_get_kernel_stack(current_thread());
334 bool is_stack = (base >= stack_base && base < (stack_base + kernel_stack_size));
335
336 if (!is_stack) {
337 uintptr_t alloc_bt[BACKTRACE_MAXFRAMES] = {};
338 vm_size_t num_frames = 0;
339 size_t l = 0;
340 num_frames = kasan_alloc_retrieve_bt(base, alloc_bt);
341 for (vm_size_t i = 0; i < num_frames; i++) {
342 l += snprintf(string_rep + l, sizeof(string_rep) - l, " %lx", alloc_bt[i]);
343 }
344 }
345
346 DTRACE_KASAN5(leak_detected,
347 vm_address_t, base,
348 vm_size_t, sz,
349 vm_offset_t, offset,
350 vm_size_t, leak_sz,
351 char *, string_rep);
352}
353
354/*
355 * Check for possible uninitialized memory contained in [base, base+sz).
356 */
357void
358kasan_check_uninitialized(vm_address_t base, vm_size_t sz)
359{
360 if (!(enabled_checks & TYPE_LEAK) || sz < leak_threshold) {
361 return;
362 }
363
364 vm_address_t cur = base;
365 vm_address_t end = base + sz;
366 vm_size_t count = 0;
367 vm_size_t max_count = 0;
368 vm_address_t leak_offset = 0;
369 uint8_t byte = 0;
370
371 while (cur < end) {
372 byte = *(uint8_t *)cur;
373 count = (byte == KASAN_UNINITIALIZED_HEAP) ? (count + 1) : 0;
374 if (count > max_count) {
375 max_count = count;
376 leak_offset = cur - (count - 1) - base;
377 }
378 cur += 1;
379 }
380
381 if (max_count >= leak_threshold) {
382 kasan_report_leak(base, sz, leak_offset, max_count);
383 }
384}
385
5ba3f43e
A
386/*
387 *
388 * KASAN violation reporting
389 *
390 */
391
392static const char *
a39ff7e2 393access_str(access_t type)
5ba3f43e 394{
a39ff7e2
A
395 if (type & TYPE_READ) {
396 return "load from";
397 } else if (type & TYPE_WRITE) {
398 return "store to";
5ba3f43e 399 } else if (type & TYPE_FREE) {
a39ff7e2 400 return "free of";
cb323159
A
401 } else if (type & TYPE_LEAK) {
402 return "leak from";
5ba3f43e 403 } else {
a39ff7e2 404 return "access of";
5ba3f43e
A
405 }
406}
407
408static const char *shadow_strings[] = {
409 [ASAN_VALID] = "VALID",
410 [ASAN_PARTIAL1] = "PARTIAL1",
411 [ASAN_PARTIAL2] = "PARTIAL2",
412 [ASAN_PARTIAL3] = "PARTIAL3",
413 [ASAN_PARTIAL4] = "PARTIAL4",
414 [ASAN_PARTIAL5] = "PARTIAL5",
415 [ASAN_PARTIAL6] = "PARTIAL6",
416 [ASAN_PARTIAL7] = "PARTIAL7",
5ba3f43e
A
417 [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
418 [ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
419 [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
420 [ASAN_STACK_FREED] = "STACK_FREED",
5c9f4661 421 [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE",
5ba3f43e 422 [ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
5ba3f43e
A
423 [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
424 [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
425 [ASAN_HEAP_FREED] = "HEAP_FREED",
5c9f4661 426 [0xff] = NULL
5ba3f43e
A
427};
428
429#define CRASH_CONTEXT_BEFORE 5
430#define CRASH_CONTEXT_AFTER 5
431
432static size_t
433kasan_shadow_crashlog(uptr p, char *buf, size_t len)
434{
435 int i,j;
a39ff7e2 436 size_t n = 0;
5ba3f43e
A
437 int before = CRASH_CONTEXT_BEFORE;
438 int after = CRASH_CONTEXT_AFTER;
439
440 uptr shadow = (uptr)SHADOW_FOR_ADDRESS(p);
441 uptr shadow_p = shadow;
d9a64523 442 uptr shadow_page = vm_map_round_page(shadow_p, HW_PAGE_MASK);
5ba3f43e
A
443
444 /* rewind to start of context block */
445 shadow &= ~((uptr)0xf);
446 shadow -= 16 * before;
447
a39ff7e2
A
448 n += snprintf(buf+n, len-n,
449 " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
450
5ba3f43e 451 for (i = 0; i < 1 + before + after; i++, shadow += 16) {
d9a64523 452 if ((vm_map_round_page(shadow, HW_PAGE_MASK) != shadow_page) && !kasan_is_shadow_mapped(shadow)) {
a39ff7e2 453 /* avoid unmapped shadow when crossing page boundaries */
5ba3f43e
A
454 continue;
455 }
456
a39ff7e2
A
457 n += snprintf(buf+n, len-n, " %16lx:", shadow);
458
459 char *left = " ";
460 char *right;
5ba3f43e
A
461
462 for (j = 0; j < 16; j++) {
463 uint8_t *x = (uint8_t *)(shadow + j);
a39ff7e2
A
464
465 right = " ";
466 if ((uptr)x == shadow_p) {
467 left = "[";
468 right = "]";
469 } else if ((uptr)(x + 1) == shadow_p) {
470 right = "";
471 }
472
473 n += snprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right);
474 left = "";
5ba3f43e 475 }
a39ff7e2 476 n += snprintf(buf+n, len-n, "\n");
5ba3f43e
A
477 }
478
a39ff7e2
A
479 n += snprintf(buf+n, len-n, "\n");
480 return n;
5ba3f43e
A
481}
482
a39ff7e2
A
483static void
484kasan_report_internal(uptr p, uptr width, access_t access, violation_t reason, bool dopanic)
5ba3f43e
A
485{
486 const size_t len = 4096;
487 static char buf[len];
a39ff7e2 488 size_t n = 0;
5ba3f43e
A
489
490 uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
491 uint8_t shadow_type = *shadow_ptr;
492 const char *shadow_str = shadow_strings[shadow_type];
5c9f4661
A
493 if (!shadow_str) {
494 shadow_str = "<invalid>";
495 }
a39ff7e2
A
496 buf[0] = '\0';
497
498 if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
499 n += snprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p);
500 } else if (reason == REASON_MOD_AFTER_FREE) {
501 n += snprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p);
502 } else {
503 n += snprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n",
504 width, access_str(access), p, shadow_str);
505 }
506 n += kasan_shadow_crashlog(p, buf+n, len-n);
5ba3f43e 507
a39ff7e2
A
508 if (dopanic) {
509 panic("%s", buf);
510 } else {
511 printf("%s", buf);
512 }
513}
514
515static void NOINLINE OS_NORETURN
516kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason)
517{
5ba3f43e 518 kasan_handle_test();
a39ff7e2
A
519 kasan_report_internal(p, width, access, reason, true);
520 __builtin_unreachable(); /* we cant handle this returning anyway */
521}
5ba3f43e 522
a39ff7e2
A
523static void
524kasan_log_report(uptr p, uptr width, access_t access, violation_t reason)
525{
526 const size_t len = 256;
527 char buf[len];
528 size_t l = 0;
529 uint32_t nframes = 14;
530 uintptr_t frames[nframes];
531 uintptr_t *bt = frames;
532
533 kasan_report_internal(p, width, access, reason, false);
534
535 /*
536 * print a backtrace
537 */
5ba3f43e 538
cb323159
A
539 nframes = backtrace_frame(bt, nframes, __builtin_frame_address(0),
540 NULL); /* ignore current frame */
5ba3f43e 541
a39ff7e2
A
542 buf[0] = '\0';
543 l += snprintf(buf+l, len-l, "Backtrace: ");
544 for (uint32_t i = 0; i < nframes; i++) {
545 l += snprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i]));
546 }
547 l += snprintf(buf+l, len-l, "\n");
548
549 printf("%s", buf);
5ba3f43e
A
550}
551
552#define REPORT_DECLARE(n) \
a39ff7e2
A
553 void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
554 void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
cb323159
A
555 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
556 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
5ba3f43e
A
557
558REPORT_DECLARE(1)
559REPORT_DECLARE(2)
560REPORT_DECLARE(4)
561REPORT_DECLARE(8)
562REPORT_DECLARE(16)
563
a39ff7e2
A
564void OS_NORETURN __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD, 0); }
565void OS_NORETURN __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE, 0); }
5ba3f43e
A
566
567/* unpoison the current stack */
5ba3f43e 568void NOINLINE
a39ff7e2 569kasan_unpoison_curstack(bool whole_stack)
5ba3f43e 570{
a39ff7e2
A
571 uintptr_t base = ml_stack_base();
572 size_t sz = ml_stack_size();
573 uintptr_t cur = (uintptr_t)&base;
574
575 if (whole_stack) {
576 cur = base;
577 }
578
579 if (cur >= base && cur < base + sz) {
580 /* unpoison from current stack depth to the top */
581 size_t unused = cur - base;
582 kasan_unpoison_stack(cur, sz - unused);
583 }
5ba3f43e
A
584}
585
586void NOINLINE
587__asan_handle_no_return(void)
588{
a39ff7e2 589 kasan_unpoison_curstack(false);
d9a64523
A
590
591 /*
592 * No need to free any fakestack objects because they must stay alive until
593 * we drop the real stack, at which point we can drop the entire fakestack
594 * anyway.
595 */
5ba3f43e
A
596}
597
598bool NOINLINE
599kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
600{
601 uint8_t *shadow;
602 vm_size_t i;
603
a39ff7e2 604 if (!kasan_enabled) {
5ba3f43e
A
605 return false;
606 }
607
608 size += base & 0x07;
609 base &= ~(vm_offset_t)0x07;
610
611 shadow = SHADOW_FOR_ADDRESS(base);
612 vm_size_t limit = (size + 7) / 8;
613
614 /* XXX: to make debugging easier, catch unmapped shadow here */
615
616 for (i = 0; i < limit; i++, size -= 8) {
617 assert(size > 0);
618 uint8_t s = shadow[i];
619 if (s == 0 || (size < 8 && s >= size && s <= 7)) {
620 /* valid */
621 } else {
622 goto fail;
623 }
624 }
625
626 return false;
627
628 fail:
629 if (first_invalid) {
630 /* XXX: calculate the exact first byte that failed */
631 *first_invalid = base + i*8;
632 }
633 return true;
634}
635
636static void NOINLINE
637kasan_init_globals(vm_offset_t base, vm_size_t size)
638{
639 struct asan_global *glob = (struct asan_global *)base;
640 struct asan_global *glob_end = (struct asan_global *)(base + size);
641 for (; glob < glob_end; glob++) {
642 /* handle one global */
643 kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
644 }
645}
646
647void NOINLINE
648kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid)
649{
650 unsigned long sectsz;
651 void *sect;
652
a39ff7e2
A
653#if KASAN_DYNAMIC_BLACKLIST
654 kasan_dybl_load_kext(base, bundleid);
655#endif
656
5ba3f43e
A
657 /* find the kasan globals segment/section */
658 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
659 if (sect) {
660 kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz);
661 kexts_loaded++;
662 }
5ba3f43e
A
663}
664
665void NOINLINE
666kasan_unload_kext(vm_offset_t base, vm_size_t size)
667{
668 unsigned long sectsz;
669 void *sect;
670
671 /* find the kasan globals segment/section */
672 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
673 if (sect) {
674 kasan_unpoison((void *)base, size);
675 kexts_loaded--;
676 }
677
678#if KASAN_DYNAMIC_BLACKLIST
679 kasan_dybl_unload_kext(base);
680#endif
681}
682
a39ff7e2
A
683/*
684 * Turn off as much as possible for panic path etc. There's no way to turn it back
685 * on.
686 */
5ba3f43e
A
687void NOINLINE
688kasan_disable(void)
689{
690 __asan_option_detect_stack_use_after_return = 0;
a39ff7e2 691 fakestack_enabled = 0;
5ba3f43e 692 kasan_enabled = 0;
a39ff7e2
A
693 quarantine_enabled = 0;
694 enabled_checks = 0;
5ba3f43e
A
695}
696
697static void NOINLINE
698kasan_init_xnu_globals(void)
699{
700 const char *seg = KASAN_GLOBAL_SEGNAME;
701 const char *sect = KASAN_GLOBAL_SECTNAME;
702 unsigned long _size;
703 vm_offset_t globals;
704 vm_size_t size;
705 kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header;
706
707 if (!header) {
a39ff7e2
A
708 printf("KASan: failed to find kernel mach header\n");
709 printf("KASan: redzones for globals not poisoned\n");
5ba3f43e
A
710 return;
711 }
712
713 globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size);
714 if (!globals) {
a39ff7e2
A
715 printf("KASan: failed to find segment %s section %s\n", seg, sect);
716 printf("KASan: redzones for globals not poisoned\n");
5ba3f43e
A
717 return;
718 }
719 size = (vm_size_t)_size;
720
a39ff7e2
A
721 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
722 printf("KASan: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global));
5ba3f43e
A
723
724 kasan_init_globals(globals, size);
725}
726
727void NOINLINE
728kasan_late_init(void)
729{
5ba3f43e
A
730#if KASAN_DYNAMIC_BLACKLIST
731 kasan_init_dybl();
732#endif
a39ff7e2
A
733
734 kasan_init_fakestack();
735 kasan_init_xnu_globals();
5ba3f43e
A
736}
737
738void NOINLINE
739kasan_notify_stolen(vm_offset_t top)
740{
741 kasan_map_shadow(kernel_vtop, top - kernel_vtop, false);
742}
743
744static void NOINLINE
745kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz)
746{
747#if KASAN_DEBUG
748 vm_size_t i;
749 uint8_t tmp1, tmp2;
750
751 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
752 for (i = 0; i < sz; i += sizeof(uint64_t)) {
753 vm_offset_t addr = base + i;
754 uint8_t *x = SHADOW_FOR_ADDRESS(addr);
755 tmp1 = *x;
756 asm volatile("" ::: "memory");
757 tmp2 = *x;
758 asm volatile("" ::: "memory");
759 assert(tmp1 == tmp2);
760 }
761#else
762 (void)base;
763 (void)sz;
764#endif
765}
766
767void NOINLINE
768kasan_init(void)
769{
a39ff7e2
A
770 unsigned arg;
771
5ba3f43e
A
772 simple_lock_init(&kasan_vm_lock, 0);
773
774 /* Map all of the kernel text and data */
775 kasan_map_shadow(kernel_vbase, kernel_vtop - kernel_vbase, false);
776
777 kasan_arch_init();
778
a39ff7e2
A
779 /*
780 * handle KASan boot-args
781 */
782
783 if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) {
784 enabled_checks = arg;
785 }
786
787 if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) {
788 if (arg & KASAN_ARGS_FAKESTACK) {
789 fakestack_enabled = 1;
790 }
791 if (arg & KASAN_ARGS_REPORTIGNORED) {
792 report_ignored = 1;
793 }
794 if (arg & KASAN_ARGS_NODYCHECKS) {
795 enabled_checks &= ~TYPE_DYNAMIC;
796 }
797 if (arg & KASAN_ARGS_NOPOISON_HEAP) {
798 enabled_checks &= ~TYPE_POISON_HEAP;
799 }
800 if (arg & KASAN_ARGS_NOPOISON_GLOBAL) {
801 enabled_checks &= ~TYPE_POISON_GLOBAL;
802 }
cb323159
A
803 if (arg & KASAN_ARGS_CHECK_LEAKS) {
804 enabled_checks |= TYPE_LEAK;
805 }
a39ff7e2
A
806 }
807
808 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) {
809 free_yield = arg;
810 }
811
cb323159
A
812 if (PE_parse_boot_argn("kasan.leak_threshold", &arg, sizeof(arg))) {
813 leak_threshold = arg;
814 }
815
816 if (PE_parse_boot_argn("kasan.leak_fatal_threshold", &arg, sizeof(arg))) {
817 leak_fatal_threshold = arg;
818 }
819
a39ff7e2
A
820 /* kasan.bl boot-arg handled in kasan_init_dybl() */
821
822 quarantine_enabled = 1;
5ba3f43e
A
823 kasan_enabled = 1;
824}
825
826static void NOINLINE
827kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool is_zero)
828{
829 assert(address < VM_MAX_KERNEL_ADDRESS);
830
a39ff7e2 831 if (!kasan_enabled) {
5ba3f43e
A
832 return;
833 }
834
835 if (address < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
836 /* only map kernel addresses */
837 return;
838 }
839
840 if (!size) {
841 /* nothing to map */
842 return;
843 }
844
845 boolean_t flags;
846 kasan_lock(&flags);
847 kasan_map_shadow(address, size, is_zero);
848 kasan_unlock(flags);
849 kasan_debug_touch_mappings(address, size);
850}
851
852void
853kasan_notify_address(vm_offset_t address, vm_size_t size)
854{
855 kasan_notify_address_internal(address, size, false);
856}
857
858/*
859 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
860 */
861void
862kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size)
863{
864 kasan_notify_address_internal(address, size, true);
865}
866
867/*
868 *
869 * allocator hooks
870 *
871 */
872
873struct kasan_alloc_header {
a39ff7e2
A
874 uint16_t magic;
875 uint16_t crc;
5ba3f43e
A
876 uint32_t alloc_size;
877 uint32_t user_size;
878 struct {
a39ff7e2
A
879 uint32_t left_rz : 32 - BACKTRACE_BITS;
880 uint32_t frames : BACKTRACE_BITS;
5ba3f43e
A
881 };
882};
883_Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
884
885struct kasan_alloc_footer {
886 uint32_t backtrace[0];
887};
888_Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
889
a39ff7e2
A
890#define LIVE_XOR ((uint16_t)0x3a65)
891#define FREE_XOR ((uint16_t)0xf233)
892
893static uint16_t
894magic_for_addr(vm_offset_t addr, uint16_t magic_xor)
5ba3f43e 895{
a39ff7e2
A
896 uint16_t magic = addr & 0xFFFF;
897 magic ^= (addr >> 16) & 0xFFFF;
898 magic ^= (addr >> 32) & 0xFFFF;
899 magic ^= (addr >> 48) & 0xFFFF;
900 magic ^= magic_xor;
901 return magic;
5ba3f43e
A
902}
903
904static struct kasan_alloc_header *
905header_for_user_addr(vm_offset_t addr)
906{
907 return (void *)(addr - sizeof(struct kasan_alloc_header));
908}
909
910static struct kasan_alloc_footer *
911footer_for_user_addr(vm_offset_t addr, vm_size_t *size)
912{
913 struct kasan_alloc_header *h = header_for_user_addr(addr);
914 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
915 *size = rightrz;
916 return (void *)(addr + h->user_size);
917}
918
919/*
920 * size: user-requested allocation size
921 * ret: minimum size for the real allocation
922 */
923vm_size_t
924kasan_alloc_resize(vm_size_t size)
925{
926 vm_size_t tmp;
927 if (os_add_overflow(size, 4 * PAGE_SIZE, &tmp)) {
928 panic("allocation size overflow (%lu)", size);
929 }
930
931 /* add left and right redzones */
932 size += KASAN_GUARD_PAD;
933
934 /* ensure the final allocation is an 8-byte multiple */
935 size += 8 - (size % 8);
936
937 return size;
938}
939
940extern vm_offset_t vm_kernel_slid_base;
941
942static vm_size_t
943kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip)
944{
945 uintptr_t buf[BACKTRACE_MAXFRAMES];
946 uintptr_t *bt = buf;
947
948 sz /= sizeof(uint32_t);
949 vm_size_t frames = sz;
950
951 if (frames > 0) {
952 frames = min(frames + skip, BACKTRACE_MAXFRAMES);
cb323159 953 frames = backtrace(bt, frames, NULL);
5ba3f43e
A
954
955 while (frames > sz && skip > 0) {
956 bt++;
957 frames--;
958 skip--;
959 }
960
961 /* only store the offset from kernel base, and cram that into 32
962 * bits */
963 for (vm_size_t i = 0; i < frames; i++) {
964 ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base);
965 }
966 }
967 return frames;
968}
969
a39ff7e2
A
970/* addr: user address of allocation */
971static uint16_t
972kasan_alloc_crc(vm_offset_t addr)
973{
974 struct kasan_alloc_header *h = header_for_user_addr(addr);
975 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
976
977 uint16_t crc_orig = h->crc;
978 h->crc = 0;
979
980 uint16_t crc = 0;
981 crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz);
982 crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz);
983
984 h->crc = crc_orig;
985
986 return crc;
987}
988
cb323159
A
989static vm_size_t
990kasan_alloc_retrieve_bt(vm_address_t addr, uintptr_t frames[static BACKTRACE_MAXFRAMES])
991{
992 vm_size_t num_frames = 0;
993 uptr shadow = (uptr)SHADOW_FOR_ADDRESS(addr);
994 uptr max_search = shadow - 4096;
995 vm_address_t alloc_base = 0;
996 size_t fsize = 0;
997
998 /* walk the shadow backwards to find the allocation base */
999 while (shadow >= max_search) {
1000 if (*(uint8_t *)shadow == ASAN_HEAP_LEFT_RZ) {
1001 alloc_base = ADDRESS_FOR_SHADOW(shadow) + 8;
1002 break;
1003 }
1004 shadow--;
1005 }
1006
1007 if (alloc_base) {
1008 struct kasan_alloc_header *header = header_for_user_addr(alloc_base);
1009 if (magic_for_addr(alloc_base, LIVE_XOR) == header->magic) {
1010 struct kasan_alloc_footer *footer = footer_for_user_addr(alloc_base, &fsize);
1011 if ((fsize/sizeof(footer->backtrace[0])) >= header->frames) {
1012 num_frames = header->frames;
1013 for (size_t i = 0; i < num_frames; i++) {
1014 frames[i] = footer->backtrace[i] + vm_kernel_slid_base;
1015 }
1016 }
1017 }
1018 }
1019
1020 return num_frames;
1021}
1022
5ba3f43e
A
1023/*
1024 * addr: base address of full allocation (including redzones)
1025 * size: total size of allocation (include redzones)
1026 * req: user-requested allocation size
1027 * lrz: size of the left redzone in bytes
1028 * ret: address of usable allocation
1029 */
1030vm_address_t
1031kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
1032{
1033 if (!addr) {
1034 return 0;
1035 }
1036 assert(size > 0);
1037 assert((addr % 8) == 0);
1038 assert((size % 8) == 0);
1039
1040 vm_size_t rightrz = size - req - leftrz;
1041
1042 kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ);
1043 kasan_rz_clobber(addr, req, leftrz, rightrz);
1044
1045 addr += leftrz;
1046
cb323159
A
1047 if (enabled_checks & TYPE_LEAK) {
1048 __nosan_memset((void *)addr, KASAN_UNINITIALIZED_HEAP, req);
1049 }
1050
5ba3f43e
A
1051 /* stash the allocation sizes in the left redzone */
1052 struct kasan_alloc_header *h = header_for_user_addr(addr);
a39ff7e2 1053 h->magic = magic_for_addr(addr, LIVE_XOR);
5ba3f43e
A
1054 h->left_rz = leftrz;
1055 h->alloc_size = size;
1056 h->user_size = req;
1057
1058 /* ... and a backtrace in the right redzone */
1059 vm_size_t fsize;
1060 struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
1061 h->frames = kasan_alloc_bt(f->backtrace, fsize, 2);
1062
a39ff7e2
A
1063 /* checksum the whole object, minus the user part */
1064 h->crc = kasan_alloc_crc(addr);
1065
5ba3f43e
A
1066 return addr;
1067}
1068
1069/*
1070 * addr: user pointer
1071 * size: returns full original allocation size
1072 * ret: original allocation ptr
1073 */
1074vm_address_t
1075kasan_dealloc(vm_offset_t addr, vm_size_t *size)
1076{
1077 assert(size && addr);
1078 struct kasan_alloc_header *h = header_for_user_addr(addr);
5ba3f43e
A
1079 *size = h->alloc_size;
1080 return addr - h->left_rz;
1081}
1082
1083/*
1084 * return the original user-requested allocation size
1085 * addr: user alloc pointer
1086 */
1087vm_size_t
1088kasan_user_size(vm_offset_t addr)
1089{
1090 struct kasan_alloc_header *h = header_for_user_addr(addr);
a39ff7e2 1091 assert(h->magic == magic_for_addr(addr, LIVE_XOR));
5ba3f43e
A
1092 return h->user_size;
1093}
1094
1095/*
1096 * Verify that `addr' (user pointer) is a valid allocation of `type'
1097 */
1098void
1099kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type)
1100{
1101 struct kasan_alloc_header *h = header_for_user_addr(addr);
1102
1103 /* map heap type to an internal access type */
a39ff7e2
A
1104 access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE :
1105 heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE :
1106 heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0;
5ba3f43e 1107
a39ff7e2
A
1108 /* check the magic and crc match */
1109 if (h->magic != magic_for_addr(addr, LIVE_XOR)) {
1110 kasan_violation(addr, size, type, REASON_BAD_METADATA);
1111 }
1112 if (h->crc != kasan_alloc_crc(addr)) {
1113 kasan_violation(addr, size, type, REASON_MOD_OOB);
5ba3f43e
A
1114 }
1115
1116 /* check the freed size matches what we recorded at alloc time */
1117 if (h->user_size != size) {
a39ff7e2 1118 kasan_violation(addr, size, type, REASON_INVALID_SIZE);
5ba3f43e
A
1119 }
1120
1121 vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
1122
1123 /* Check that the redzones are valid */
a39ff7e2
A
1124 if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
1125 !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
1126 kasan_violation(addr, size, type, REASON_BAD_METADATA);
1127 }
5ba3f43e
A
1128
1129 /* Check the allocated range is not poisoned */
1130 kasan_check_range((void *)addr, size, type);
1131}
1132
1133/*
1134 *
1135 * Quarantine
1136 *
1137 */
1138
1139struct freelist_entry {
a39ff7e2
A
1140 uint16_t magic;
1141 uint16_t crc;
5ba3f43e
A
1142 STAILQ_ENTRY(freelist_entry) list;
1143 union {
1144 struct {
1145 vm_size_t size : 28;
1146 vm_size_t user_size : 28;
a39ff7e2
A
1147 vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */
1148 vm_size_t __unused : 8 - BACKTRACE_BITS;
5ba3f43e
A
1149 };
1150 uint64_t bits;
1151 };
1152 zone_t zone;
1153 uint32_t backtrace[];
1154};
1155_Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
1156
5ba3f43e
A
1157struct quarantine {
1158 STAILQ_HEAD(freelist_head, freelist_entry) freelist;
1159 unsigned long entries;
1160 unsigned long max_entries;
1161 vm_size_t size;
1162 vm_size_t max_size;
1163};
1164
1165struct quarantine quarantines[] = {
1166 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
1167 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
1168 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
1169};
1170
a39ff7e2
A
1171static uint16_t
1172fle_crc(struct freelist_entry *fle)
1173{
1174 return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits));
1175}
1176
5ba3f43e
A
1177/*
1178 * addr, sizep: pointer/size of full allocation including redzone
1179 */
1180void NOINLINE
1181kasan_free_internal(void **addrp, vm_size_t *sizep, int type,
1182 zone_t *zone, vm_size_t user_size, int locked,
1183 bool doquarantine)
1184{
1185 vm_size_t size = *sizep;
1186 vm_offset_t addr = *(vm_offset_t *)addrp;
1187
1188 assert(type >= 0 && type < KASAN_HEAP_TYPES);
1189 if (type == KASAN_HEAP_KALLOC) {
1190 /* zero-size kalloc allocations are allowed */
1191 assert(!zone);
1192 } else if (type == KASAN_HEAP_ZALLOC) {
1193 assert(zone && user_size);
1194 } else if (type == KASAN_HEAP_FAKESTACK) {
1195 assert(zone && user_size);
1196 }
1197
1198 /* clobber the entire freed region */
1199 kasan_rz_clobber(addr, 0, size, 0);
1200
1201 if (!doquarantine || !quarantine_enabled) {
1202 goto free_current;
1203 }
1204
1205 /* poison the entire freed region */
1206 uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED;
1207 kasan_poison(addr, 0, size, 0, flags);
1208
1209 struct freelist_entry *fle, *tofree = NULL;
1210 struct quarantine *q = &quarantines[type];
1211 assert(size >= sizeof(struct freelist_entry));
1212
1213 /* create a new freelist entry */
1214 fle = (struct freelist_entry *)addr;
a39ff7e2 1215 fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR);
5ba3f43e
A
1216 fle->size = size;
1217 fle->user_size = user_size;
1218 fle->frames = 0;
1219 fle->zone = ZONE_NULL;
1220 if (zone) {
1221 fle->zone = *zone;
1222 }
1223 if (type != KASAN_HEAP_FAKESTACK) {
a39ff7e2 1224 /* don't do expensive things on the fakestack path */
5ba3f43e 1225 fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
a39ff7e2 1226 fle->crc = fle_crc(fle);
5ba3f43e
A
1227 }
1228
1229 boolean_t flg;
1230 if (!locked) {
1231 kasan_lock(&flg);
1232 }
1233
1234 if (q->size + size > q->max_size) {
1235 /*
1236 * Adding this entry would put us over the max quarantine size. Free the
1237 * larger of the current object and the quarantine head object.
1238 */
1239 tofree = STAILQ_FIRST(&q->freelist);
1240 if (fle->size > tofree->size) {
1241 goto free_current_locked;
1242 }
1243 }
1244
1245 STAILQ_INSERT_TAIL(&q->freelist, fle, list);
1246 q->entries++;
1247 q->size += size;
1248
1249 /* free the oldest entry, if necessary */
1250 if (tofree || q->entries > q->max_entries) {
1251 tofree = STAILQ_FIRST(&q->freelist);
1252 STAILQ_REMOVE_HEAD(&q->freelist, list);
1253
1254 assert(q->entries > 0 && q->size >= tofree->size);
1255 q->entries--;
1256 q->size -= tofree->size;
1257
1258 if (type != KASAN_HEAP_KALLOC) {
1259 assert((vm_offset_t)zone >= VM_MIN_KERNEL_AND_KEXT_ADDRESS &&
1260 (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS);
1261 *zone = tofree->zone;
1262 }
1263
1264 size = tofree->size;
1265 addr = (vm_offset_t)tofree;
a39ff7e2
A
1266
1267 /* check the magic and crc match */
1268 if (tofree->magic != magic_for_addr(addr, FREE_XOR)) {
1269 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
1270 }
1271 if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) {
1272 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
5ba3f43e
A
1273 }
1274
1275 /* clobber the quarantine header */
a39ff7e2 1276 __nosan_bzero((void *)addr, sizeof(struct freelist_entry));
5ba3f43e
A
1277
1278 } else {
1279 /* quarantine is not full - don't really free anything */
1280 addr = 0;
1281 }
1282
1283 free_current_locked:
1284 if (!locked) {
1285 kasan_unlock(flg);
1286 }
1287
1288 free_current:
1289 *addrp = (void *)addr;
1290 if (addr) {
1291 kasan_unpoison((void *)addr, size);
1292 *sizep = size;
1293 }
1294}
1295
1296void NOINLINE
1297kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone,
1298 vm_size_t user_size, bool quarantine)
1299{
1300 kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine);
a39ff7e2
A
1301
1302 if (free_yield) {
1303 thread_yield_internal(free_yield);
1304 }
5ba3f43e
A
1305}
1306
1307uptr
1308__asan_load_cxx_array_cookie(uptr *p)
1309{
1310 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)p);
1311 if (*shadow == ASAN_ARRAY_COOKIE) {
1312 return *p;
1313 } else if (*shadow == ASAN_HEAP_FREED) {
1314 return 0;
1315 } else {
1316 return *p;
1317 }
1318}
1319
1320void
1321__asan_poison_cxx_array_cookie(uptr p)
1322{
1323 uint8_t *shadow = SHADOW_FOR_ADDRESS(p);
1324 *shadow = ASAN_ARRAY_COOKIE;
1325}
1326
cb323159
A
1327/*
1328 * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
1329 * lives relative to the start of the buffer, but it's always the word immediately
1330 * before the start of the array data, so for naturally-aligned objects we need to
1331 * search at most 2 shadow bytes.
1332 */
1333void
1334kasan_unpoison_cxx_array_cookie(void *ptr)
1335{
1336 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)ptr);
1337 for (size_t i = 0; i < 2; i++) {
1338 if (shadow[i] == ASAN_ARRAY_COOKIE) {
1339 shadow[i] = ASAN_VALID;
1340 return;
1341 } else if (shadow[i] != ASAN_VALID) {
1342 /* must have seen the cookie by now */
1343 return;
1344 }
1345 }
1346}
1347
a39ff7e2 1348#define ACCESS_CHECK_DECLARE(type, sz, access) \
5ba3f43e 1349 void __asan_##type##sz(uptr addr) { \
a39ff7e2 1350 kasan_check_range((const void *)addr, sz, access); \
5ba3f43e 1351 } \
cb323159 1352 void OS_NORETURN UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
5ba3f43e
A
1353
1354ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD);
1355ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD);
1356ACCESS_CHECK_DECLARE(load, 4, TYPE_LOAD);
1357ACCESS_CHECK_DECLARE(load, 8, TYPE_LOAD);
1358ACCESS_CHECK_DECLARE(load, 16, TYPE_LOAD);
1359ACCESS_CHECK_DECLARE(store, 1, TYPE_STORE);
1360ACCESS_CHECK_DECLARE(store, 2, TYPE_STORE);
1361ACCESS_CHECK_DECLARE(store, 4, TYPE_STORE);
1362ACCESS_CHECK_DECLARE(store, 8, TYPE_STORE);
1363ACCESS_CHECK_DECLARE(store, 16, TYPE_STORE);
1364
1365void
1366__asan_loadN(uptr addr, size_t sz)
1367{
1368 kasan_check_range((const void *)addr, sz, TYPE_LOAD);
1369}
1370
1371void
1372__asan_storeN(uptr addr, size_t sz)
1373{
1374 kasan_check_range((const void *)addr, sz, TYPE_STORE);
1375}
1376
5ba3f43e
A
1377static void
1378kasan_set_shadow(uptr addr, size_t sz, uint8_t val)
1379{
1380 __nosan_memset((void *)addr, val, sz);
1381}
1382
1383#define SET_SHADOW_DECLARE(val) \
1384 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
1385 kasan_set_shadow(addr, sz, 0x##val); \
1386 }
1387
1388SET_SHADOW_DECLARE(00)
1389SET_SHADOW_DECLARE(f1)
1390SET_SHADOW_DECLARE(f2)
1391SET_SHADOW_DECLARE(f3)
1392SET_SHADOW_DECLARE(f5)
1393SET_SHADOW_DECLARE(f8)
1394
a39ff7e2 1395
5ba3f43e 1396/*
a39ff7e2
A
1397 * Call 'cb' for each contiguous range of the shadow map. This could be more
1398 * efficient by walking the page table directly.
5ba3f43e 1399 */
a39ff7e2
A
1400int
1401kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx)
5ba3f43e 1402{
a39ff7e2
A
1403 uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS);
1404 uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS);
d9a64523
A
1405 shadow_base = vm_map_trunc_page(shadow_base, HW_PAGE_MASK);
1406 shadow_top = vm_map_round_page(shadow_top, HW_PAGE_MASK);
a39ff7e2
A
1407
1408 uintptr_t start = 0, end = 0;
1409
d9a64523 1410 for (uintptr_t addr = shadow_base; addr < shadow_top; addr += HW_PAGE_SIZE) {
a39ff7e2
A
1411 if (kasan_is_shadow_mapped(addr)) {
1412 if (start == 0) {
1413 start = addr;
1414 }
d9a64523 1415 end = addr + HW_PAGE_SIZE;
a39ff7e2
A
1416 } else if (start && end) {
1417 cb(start, end, ctx);
1418 start = end = 0;
1419 }
1420 }
5ba3f43e 1421
a39ff7e2
A
1422 if (start && end) {
1423 cb(start, end, ctx);
1424 }
5ba3f43e 1425
a39ff7e2 1426 return 0;
5ba3f43e
A
1427}
1428
1429/*
a39ff7e2 1430 * XXX: implement these
5ba3f43e
A
1431 */
1432
a39ff7e2
A
1433UNUSED_ABI(__asan_alloca_poison, uptr addr, uptr size);
1434UNUSED_ABI(__asan_allocas_unpoison, uptr top, uptr bottom);
1435UNUSED_ABI(__sanitizer_ptr_sub, uptr a, uptr b);
1436UNUSED_ABI(__sanitizer_ptr_cmp, uptr a, uptr b);
1437UNUSED_ABI(__sanitizer_annotate_contiguous_container, const void *a, const void *b, const void *c, const void *d);
1438UNUSED_ABI(__asan_poison_stack_memory, uptr addr, size_t size);
1439UNUSED_ABI(__asan_unpoison_stack_memory, uptr a, uptr b);
5ba3f43e 1440
a39ff7e2
A
1441/*
1442 * Miscellaneous unimplemented asan ABI
1443 */
5ba3f43e 1444
a39ff7e2
A
1445UNUSED_ABI(__asan_init, void);
1446UNUSED_ABI(__asan_register_image_globals, uptr a);
1447UNUSED_ABI(__asan_unregister_image_globals, uptr a);
1448UNUSED_ABI(__asan_before_dynamic_init, uptr a);
1449UNUSED_ABI(__asan_after_dynamic_init, void);
1450UNUSED_ABI(__asan_version_mismatch_check_v8, void);
1451UNUSED_ABI(__asan_version_mismatch_check_apple_802, void);
1452UNUSED_ABI(__asan_version_mismatch_check_apple_900, void);
1453UNUSED_ABI(__asan_version_mismatch_check_apple_902, void);
d9a64523 1454UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void);
0a7de745 1455UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void);
cb323159 1456UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100, void);
a39ff7e2 1457
cb323159
A
1458void OS_NORETURN UNSUPPORTED_API(__asan_init_v5, void);
1459void OS_NORETURN UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b);
1460void OS_NORETURN UNSUPPORTED_API(__asan_unregister_globals, uptr a, uptr b);
1461void OS_NORETURN UNSUPPORTED_API(__asan_register_elf_globals, uptr a, uptr b, uptr c);
1462void OS_NORETURN UNSUPPORTED_API(__asan_unregister_elf_globals, uptr a, uptr b, uptr c);
a39ff7e2 1463
cb323159
A
1464void OS_NORETURN UNSUPPORTED_API(__asan_exp_loadN, uptr addr, size_t sz, int32_t e);
1465void OS_NORETURN UNSUPPORTED_API(__asan_exp_storeN, uptr addr, size_t sz, int32_t e);
1466void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load_n, uptr addr, unsigned long b, int32_t c);
1467void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store_n, uptr addr, unsigned long b, int32_t c);
5ba3f43e
A
1468
1469/*
1470 *
1471 * SYSCTL
1472 *
1473 */
1474
1475static int
1476sysctl_kasan_test(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req)
1477{
1478 int mask = 0;
1479 int ch;
1480 int err;
1481 err = sysctl_io_number(req, 0, sizeof(int), &mask, &ch);
1482
1483 if (!err && mask) {
1484 kasan_test(mask, arg2);
1485 }
1486
1487 return err;
1488}
1489
a39ff7e2
A
1490static int
1491sysctl_fakestack_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, int __unused arg2, struct sysctl_req *req)
1492{
1493 int ch, err, val;
1494
1495 err = sysctl_io_number(req, fakestack_enabled, sizeof(fakestack_enabled), &val, &ch);
1496 if (err == 0 && ch) {
1497 fakestack_enabled = !!val;
1498 __asan_option_detect_stack_use_after_return = !!val;
1499 }
1500
1501 return err;
1502}
1503
5ba3f43e
A
1504SYSCTL_DECL(kasan);
1505SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
1506
1507SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, "");
a39ff7e2
A
1508SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
1509SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &enabled_checks, 0, "");
1510SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
1511SYSCTL_UINT(_kern_kasan, OID_AUTO, report_ignored, CTLFLAG_RW, &report_ignored, 0, "");
1512SYSCTL_UINT(_kern_kasan, OID_AUTO, free_yield_ms, CTLFLAG_RW, &free_yield, 0, "");
cb323159
A
1513SYSCTL_UINT(_kern_kasan, OID_AUTO, leak_threshold, CTLFLAG_RW, &leak_threshold, 0, "");
1514SYSCTL_UINT(_kern_kasan, OID_AUTO, leak_fatal_threshold, CTLFLAG_RW, &leak_fatal_threshold, 0, "");
a39ff7e2
A
1515SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, "");
1516SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, "");
1517SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, "");
1518SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
1519SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, "");
1520SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, "");
1521SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, "");
1522
1523SYSCTL_PROC(_kern_kasan, OID_AUTO, fakestack,
1524 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1525 0, 0, sysctl_fakestack_enable, "I", "");
5ba3f43e
A
1526
1527SYSCTL_PROC(_kern_kasan, OID_AUTO, test,
1528 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1529 0, 0, sysctl_kasan_test, "I", "");
1530
1531SYSCTL_PROC(_kern_kasan, OID_AUTO, fail,
1532 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1533 0, 1, sysctl_kasan_test, "I", "");