]> git.saurik.com Git - apple/xnu.git/blame - san/kasan.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / san / kasan.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <string.h>
30#include <stdint.h>
31#include <stdbool.h>
32#include <vm/vm_map.h>
33#include <kern/assert.h>
34#include <kern/cpu_data.h>
35#include <kern/backtrace.h>
36#include <machine/machine_routines.h>
37#include <kern/locks.h>
38#include <kern/simple_lock.h>
39#include <kern/debug.h>
5ba3f43e
A
40#include <mach/mach_vm.h>
41#include <mach/mach_types.h>
42#include <mach/vm_param.h>
43#include <mach/machine/vm_param.h>
cb323159 44#include <mach/sdt.h>
5ba3f43e
A
45#include <libkern/libkern.h>
46#include <libkern/OSAtomic.h>
47#include <libkern/kernel_mach_header.h>
48#include <sys/queue.h>
49#include <sys/sysctl.h>
50#include <kern/thread.h>
51#include <machine/atomic.h>
52
53#include <kasan.h>
54#include <kasan_internal.h>
55#include <memintrinsics.h>
56
f427ee49 57const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_OFFSET;
5ba3f43e 58
a39ff7e2
A
59static unsigned kexts_loaded;
60unsigned shadow_pages_total;
61unsigned shadow_pages_used;
5ba3f43e
A
62
63vm_offset_t kernel_vbase;
64vm_offset_t kernel_vtop;
65
a39ff7e2
A
66static unsigned kasan_enabled;
67static unsigned quarantine_enabled;
cb323159
A
68static unsigned enabled_checks = TYPE_ALL & ~TYPE_LEAK; /* bitmask of enabled checks */
69static unsigned report_ignored; /* issue non-fatal report for disabled/blacklisted checks */
70static unsigned free_yield = 0; /* ms yield after each free */
71static unsigned leak_threshold = 3; /* threshold for uninitialized memory leak detection */
72static unsigned leak_fatal_threshold = 0; /* threshold for treating leaks as fatal errors (0 means never) */
a39ff7e2
A
73
74/* forward decls */
75static void kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason);
76static void kasan_log_report(uptr p, uptr width, access_t access, violation_t reason);
5ba3f43e 77
a39ff7e2 78/* imported osfmk functions */
5ba3f43e
A
79extern vm_offset_t ml_stack_base(void);
80extern vm_size_t ml_stack_size(void);
81
a39ff7e2
A
82/*
83 * unused: expected to be called, but (currently) does nothing
84 */
85#define UNUSED_ABI(func, ...) \
86 _Pragma("clang diagnostic push") \
87 _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
88 void func(__VA_ARGS__); \
89 void func(__VA_ARGS__) {}; \
90 _Pragma("clang diagnostic pop") \
5ba3f43e 91
a39ff7e2
A
92static const size_t BACKTRACE_BITS = 4;
93static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1;
5ba3f43e 94
cb323159
A
95static vm_size_t kasan_alloc_retrieve_bt(vm_address_t addr, uintptr_t frames[static BACKTRACE_MAXFRAMES]);
96
5ba3f43e 97decl_simple_lock_data(, kasan_vm_lock);
a39ff7e2 98static thread_t kasan_lock_holder;
5ba3f43e
A
99
100/*
101 * kasan is called from the interrupt path, so we need to disable interrupts to
102 * ensure atomicity manipulating the global objects
103 */
104void
105kasan_lock(boolean_t *b)
106{
107 *b = ml_set_interrupts_enabled(false);
0a7de745 108 simple_lock(&kasan_vm_lock, LCK_GRP_NULL);
a39ff7e2 109 kasan_lock_holder = current_thread();
5ba3f43e
A
110}
111
112void
113kasan_unlock(boolean_t b)
114{
a39ff7e2 115 kasan_lock_holder = THREAD_NULL;
5ba3f43e
A
116 simple_unlock(&kasan_vm_lock);
117 ml_set_interrupts_enabled(b);
118}
119
a39ff7e2
A
120/* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
121 * thread */
122bool
123kasan_lock_held(thread_t thread)
124{
125 return thread && thread == kasan_lock_holder;
126}
127
128static inline bool
129kasan_check_enabled(access_t access)
130{
131 return kasan_enabled && (enabled_checks & access) && !kasan_is_blacklisted(access);
132}
133
134static inline bool
135kasan_poison_active(uint8_t flags)
136{
137 switch (flags) {
138 case ASAN_GLOBAL_RZ:
139 return kasan_check_enabled(TYPE_POISON_GLOBAL);
140 case ASAN_HEAP_RZ:
141 case ASAN_HEAP_LEFT_RZ:
142 case ASAN_HEAP_RIGHT_RZ:
143 case ASAN_HEAP_FREED:
144 return kasan_check_enabled(TYPE_POISON_HEAP);
145 default:
146 return true;
f427ee49 147 }
a39ff7e2
A
148}
149
5ba3f43e
A
150/*
151 * poison redzones in the shadow map
152 */
153void NOINLINE
154kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz, uint8_t flags)
155{
156 uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
f427ee49 157 uint8_t partial = (uint8_t)kasan_granule_partial(size);
5ba3f43e
A
158 vm_size_t total = leftrz + size + rightrz;
159 vm_size_t i = 0;
160
f427ee49
A
161 /* ensure base, leftrz and total allocation size are granule-aligned */
162 assert(kasan_granule_partial(base) == 0);
163 assert(kasan_granule_partial(leftrz) == 0);
164 assert(kasan_granule_partial(total) == 0);
5ba3f43e 165
a39ff7e2 166 if (!kasan_enabled || !kasan_poison_active(flags)) {
5ba3f43e
A
167 return;
168 }
169
f427ee49
A
170 leftrz >>= KASAN_SCALE;
171 size >>= KASAN_SCALE;
172 total >>= KASAN_SCALE;
5ba3f43e
A
173
174 uint8_t l_flags = flags;
175 uint8_t r_flags = flags;
176
177 if (flags == ASAN_STACK_RZ) {
178 l_flags = ASAN_STACK_LEFT_RZ;
179 r_flags = ASAN_STACK_RIGHT_RZ;
180 } else if (flags == ASAN_HEAP_RZ) {
181 l_flags = ASAN_HEAP_LEFT_RZ;
182 r_flags = ASAN_HEAP_RIGHT_RZ;
183 }
184
185 /*
186 * poison the redzones and unpoison the valid bytes
187 */
188 for (; i < leftrz; i++) {
189 shadow[i] = l_flags;
190 }
191 for (; i < leftrz + size; i++) {
a39ff7e2 192 shadow[i] = ASAN_VALID; /* XXX: should not be necessary */
5ba3f43e
A
193 }
194 if (partial && (i < total)) {
195 shadow[i] = partial;
196 i++;
197 }
198 for (; i < total; i++) {
199 shadow[i] = r_flags;
200 }
5ba3f43e
A
201}
202
203void
204kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags)
205{
f427ee49
A
206 assert(kasan_granule_partial(base) == 0);
207 assert(kasan_granule_partial(size) == 0);
5ba3f43e
A
208 kasan_poison(base, 0, 0, size, flags);
209}
210
211void NOINLINE
212kasan_unpoison(void *base, vm_size_t size)
213{
214 kasan_poison((vm_offset_t)base, size, 0, 0, 0);
215}
216
217void NOINLINE
f427ee49 218kasan_unpoison_stack(uintptr_t base, size_t size)
5ba3f43e 219{
f427ee49
A
220 assert(base > 0);
221 assert(size > 0);
a39ff7e2 222
f427ee49
A
223 size_t partial = kasan_granule_partial(base);
224 base = kasan_granule_trunc(base);
225 size = kasan_granule_round(size + partial);
a39ff7e2 226
5ba3f43e
A
227 kasan_unpoison((void *)base, size);
228}
229
230/*
231 * write junk into the redzones
a39ff7e2 232 */
5ba3f43e
A
233static void NOINLINE
234kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
235{
236#if KASAN_DEBUG
237 vm_size_t i;
238 const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef };
239 const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 };
240 uint8_t *buf = (uint8_t *)base;
241
f427ee49
A
242 assert(kasan_granule_partial(base) == 0);
243 assert(kasan_granule_partial(leftrz) == 0);
244 assert(kasan_granule_partial(size + leftrz + rightrz) == 0);
5ba3f43e
A
245
246 for (i = 0; i < leftrz; i++) {
247 buf[i] = deadbeef[i % 4];
248 }
249
250 for (i = 0; i < rightrz; i++) {
251 buf[i + size + leftrz] = c0ffee[i % 4];
252 }
253#else
254 (void)base;
255 (void)size;
256 (void)leftrz;
257 (void)rightrz;
258#endif
259}
260
a39ff7e2
A
261/*
262 * Report a violation that may be disabled and/or blacklisted. This can only be
263 * called for dynamic checks (i.e. where the fault is recoverable). Use
264 * kasan_crash_report() for static (unrecoverable) violations.
265 *
266 * access: what we were trying to do when the violation occured
267 * reason: what failed about the access
268 */
269static void
270kasan_violation(uintptr_t addr, size_t size, access_t access, violation_t reason)
5ba3f43e 271{
a39ff7e2
A
272 assert(__builtin_popcount(access) == 1);
273 if (!kasan_check_enabled(access)) {
274 if (report_ignored) {
275 kasan_log_report(addr, size, access, reason);
276 }
5ba3f43e
A
277 return;
278 }
a39ff7e2
A
279 kasan_crash_report(addr, size, access, reason);
280}
5ba3f43e 281
a39ff7e2
A
282void NOINLINE
283kasan_check_range(const void *x, size_t sz, access_t access)
284{
285 uintptr_t invalid;
286 uintptr_t ptr = (uintptr_t)x;
287 if (kasan_range_poisoned(ptr, sz, &invalid)) {
288 size_t remaining = sz - (invalid - ptr);
289 kasan_violation(invalid, remaining, access, 0);
5ba3f43e
A
290 }
291}
292
293/*
a39ff7e2 294 * Return true if [base, base+sz) is unpoisoned or has given shadow value.
5ba3f43e 295 */
d9a64523 296bool
f427ee49 297kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow)
5ba3f43e 298{
f427ee49
A
299 /* round 'base' up to skip any partial, which won't match 'shadow' */
300 uintptr_t base = kasan_granule_round(addr);
301 sz -= base - addr;
5ba3f43e 302
f427ee49 303 uintptr_t end = base + sz;
5ba3f43e
A
304
305 while (base < end) {
306 uint8_t *sh = SHADOW_FOR_ADDRESS(base);
a39ff7e2
A
307 if (*sh && *sh != shadow) {
308 return false;
5ba3f43e 309 }
f427ee49 310 base += KASAN_GRANULE;
5ba3f43e 311 }
a39ff7e2 312 return true;
5ba3f43e
A
313}
314
cb323159
A
315static void
316kasan_report_leak(vm_address_t base, vm_size_t sz, vm_offset_t offset, vm_size_t leak_sz)
317{
f427ee49 318 if (leak_fatal_threshold > leak_threshold && leak_sz >= leak_fatal_threshold) {
cb323159
A
319 kasan_violation(base + offset, leak_sz, TYPE_LEAK, REASON_UNINITIALIZED);
320 }
321
322 char string_rep[BACKTRACE_MAXFRAMES * 20] = {};
323 vm_offset_t stack_base = dtrace_get_kernel_stack(current_thread());
324 bool is_stack = (base >= stack_base && base < (stack_base + kernel_stack_size));
325
326 if (!is_stack) {
327 uintptr_t alloc_bt[BACKTRACE_MAXFRAMES] = {};
328 vm_size_t num_frames = 0;
329 size_t l = 0;
330 num_frames = kasan_alloc_retrieve_bt(base, alloc_bt);
331 for (vm_size_t i = 0; i < num_frames; i++) {
4ba76501 332 l += scnprintf(string_rep + l, sizeof(string_rep) - l, " %lx", alloc_bt[i]);
cb323159
A
333 }
334 }
335
336 DTRACE_KASAN5(leak_detected,
f427ee49
A
337 vm_address_t, base,
338 vm_size_t, sz,
339 vm_offset_t, offset,
340 vm_size_t, leak_sz,
341 char *, string_rep);
342}
343
344/*
345 * Initialize buffer by writing unique pattern that can be looked for
346 * in copyout path to detect uninitialized memory leaks.
347 */
348void
349kasan_leak_init(vm_address_t addr, vm_size_t sz)
350{
351 if (enabled_checks & TYPE_LEAK) {
352 __nosan_memset((void *)addr, KASAN_UNINITIALIZED_HEAP, sz);
353 }
cb323159
A
354}
355
356/*
357 * Check for possible uninitialized memory contained in [base, base+sz).
358 */
359void
360kasan_check_uninitialized(vm_address_t base, vm_size_t sz)
361{
362 if (!(enabled_checks & TYPE_LEAK) || sz < leak_threshold) {
363 return;
364 }
365
366 vm_address_t cur = base;
367 vm_address_t end = base + sz;
368 vm_size_t count = 0;
369 vm_size_t max_count = 0;
370 vm_address_t leak_offset = 0;
371 uint8_t byte = 0;
372
373 while (cur < end) {
374 byte = *(uint8_t *)cur;
375 count = (byte == KASAN_UNINITIALIZED_HEAP) ? (count + 1) : 0;
376 if (count > max_count) {
377 max_count = count;
378 leak_offset = cur - (count - 1) - base;
379 }
380 cur += 1;
381 }
382
383 if (max_count >= leak_threshold) {
384 kasan_report_leak(base, sz, leak_offset, max_count);
385 }
386}
387
5ba3f43e
A
388/*
389 *
390 * KASAN violation reporting
391 *
392 */
393
394static const char *
a39ff7e2 395access_str(access_t type)
5ba3f43e 396{
a39ff7e2
A
397 if (type & TYPE_READ) {
398 return "load from";
399 } else if (type & TYPE_WRITE) {
400 return "store to";
5ba3f43e 401 } else if (type & TYPE_FREE) {
a39ff7e2 402 return "free of";
cb323159
A
403 } else if (type & TYPE_LEAK) {
404 return "leak from";
5ba3f43e 405 } else {
a39ff7e2 406 return "access of";
5ba3f43e
A
407 }
408}
409
410static const char *shadow_strings[] = {
411 [ASAN_VALID] = "VALID",
412 [ASAN_PARTIAL1] = "PARTIAL1",
413 [ASAN_PARTIAL2] = "PARTIAL2",
414 [ASAN_PARTIAL3] = "PARTIAL3",
415 [ASAN_PARTIAL4] = "PARTIAL4",
416 [ASAN_PARTIAL5] = "PARTIAL5",
417 [ASAN_PARTIAL6] = "PARTIAL6",
418 [ASAN_PARTIAL7] = "PARTIAL7",
5ba3f43e
A
419 [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
420 [ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
421 [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
422 [ASAN_STACK_FREED] = "STACK_FREED",
5c9f4661 423 [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE",
5ba3f43e 424 [ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
5ba3f43e
A
425 [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
426 [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
427 [ASAN_HEAP_FREED] = "HEAP_FREED",
5c9f4661 428 [0xff] = NULL
5ba3f43e
A
429};
430
431#define CRASH_CONTEXT_BEFORE 5
432#define CRASH_CONTEXT_AFTER 5
433
434static size_t
435kasan_shadow_crashlog(uptr p, char *buf, size_t len)
436{
f427ee49 437 int i, j;
a39ff7e2 438 size_t n = 0;
5ba3f43e
A
439 int before = CRASH_CONTEXT_BEFORE;
440 int after = CRASH_CONTEXT_AFTER;
441
442 uptr shadow = (uptr)SHADOW_FOR_ADDRESS(p);
443 uptr shadow_p = shadow;
d9a64523 444 uptr shadow_page = vm_map_round_page(shadow_p, HW_PAGE_MASK);
5ba3f43e
A
445
446 /* rewind to start of context block */
447 shadow &= ~((uptr)0xf);
448 shadow -= 16 * before;
449
f427ee49
A
450 n += scnprintf(buf + n, len - n,
451 " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
a39ff7e2 452
5ba3f43e 453 for (i = 0; i < 1 + before + after; i++, shadow += 16) {
d9a64523 454 if ((vm_map_round_page(shadow, HW_PAGE_MASK) != shadow_page) && !kasan_is_shadow_mapped(shadow)) {
a39ff7e2 455 /* avoid unmapped shadow when crossing page boundaries */
5ba3f43e
A
456 continue;
457 }
458
f427ee49 459 n += scnprintf(buf + n, len - n, " %16lx:", shadow);
a39ff7e2
A
460
461 char *left = " ";
462 char *right;
5ba3f43e
A
463
464 for (j = 0; j < 16; j++) {
465 uint8_t *x = (uint8_t *)(shadow + j);
a39ff7e2
A
466
467 right = " ";
468 if ((uptr)x == shadow_p) {
469 left = "[";
470 right = "]";
471 } else if ((uptr)(x + 1) == shadow_p) {
472 right = "";
473 }
474
f427ee49 475 n += scnprintf(buf + n, len - n, "%s%02x%s", left, (unsigned)*x, right);
a39ff7e2 476 left = "";
5ba3f43e 477 }
f427ee49 478 n += scnprintf(buf + n, len - n, "\n");
5ba3f43e
A
479 }
480
f427ee49 481 n += scnprintf(buf + n, len - n, "\n");
a39ff7e2 482 return n;
5ba3f43e
A
483}
484
a39ff7e2
A
485static void
486kasan_report_internal(uptr p, uptr width, access_t access, violation_t reason, bool dopanic)
5ba3f43e
A
487{
488 const size_t len = 4096;
489 static char buf[len];
a39ff7e2 490 size_t n = 0;
5ba3f43e
A
491
492 uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
493 uint8_t shadow_type = *shadow_ptr;
494 const char *shadow_str = shadow_strings[shadow_type];
5c9f4661
A
495 if (!shadow_str) {
496 shadow_str = "<invalid>";
497 }
a39ff7e2
A
498 buf[0] = '\0';
499
500 if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
f427ee49 501 n += scnprintf(buf + n, len - n, "KASan: free of corrupted/invalid object %#lx\n", p);
a39ff7e2 502 } else if (reason == REASON_MOD_AFTER_FREE) {
f427ee49 503 n += scnprintf(buf + n, len - n, "KASan: UaF of quarantined object %#lx\n", p);
a39ff7e2 504 } else {
f427ee49
A
505 n += scnprintf(buf + n, len - n, "KASan: invalid %lu-byte %s %#lx [%s]\n",
506 width, access_str(access), p, shadow_str);
a39ff7e2 507 }
f427ee49 508 n += kasan_shadow_crashlog(p, buf + n, len - n);
5ba3f43e 509
a39ff7e2
A
510 if (dopanic) {
511 panic("%s", buf);
512 } else {
513 printf("%s", buf);
514 }
515}
516
517static void NOINLINE OS_NORETURN
518kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason)
519{
5ba3f43e 520 kasan_handle_test();
a39ff7e2
A
521 kasan_report_internal(p, width, access, reason, true);
522 __builtin_unreachable(); /* we cant handle this returning anyway */
523}
5ba3f43e 524
a39ff7e2
A
525static void
526kasan_log_report(uptr p, uptr width, access_t access, violation_t reason)
527{
528 const size_t len = 256;
529 char buf[len];
530 size_t l = 0;
531 uint32_t nframes = 14;
532 uintptr_t frames[nframes];
533 uintptr_t *bt = frames;
534
535 kasan_report_internal(p, width, access, reason, false);
536
537 /*
538 * print a backtrace
539 */
5ba3f43e 540
cb323159
A
541 nframes = backtrace_frame(bt, nframes, __builtin_frame_address(0),
542 NULL); /* ignore current frame */
5ba3f43e 543
a39ff7e2 544 buf[0] = '\0';
f427ee49 545 l += scnprintf(buf + l, len - l, "Backtrace: ");
a39ff7e2 546 for (uint32_t i = 0; i < nframes; i++) {
f427ee49 547 l += scnprintf(buf + l, len - l, "%lx,", VM_KERNEL_UNSLIDE(bt[i]));
a39ff7e2 548 }
f427ee49 549 l += scnprintf(buf + l, len - l, "\n");
a39ff7e2
A
550
551 printf("%s", buf);
5ba3f43e
A
552}
553
554#define REPORT_DECLARE(n) \
a39ff7e2
A
555 void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
556 void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
cb323159
A
557 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
558 void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
5ba3f43e
A
559
560REPORT_DECLARE(1)
561REPORT_DECLARE(2)
562REPORT_DECLARE(4)
563REPORT_DECLARE(8)
564REPORT_DECLARE(16)
565
f427ee49
A
566void OS_NORETURN
567__asan_report_load_n(uptr p, unsigned long sz)
568{
569 kasan_crash_report(p, sz, TYPE_LOAD, 0);
570}
571void OS_NORETURN
572__asan_report_store_n(uptr p, unsigned long sz)
573{
574 kasan_crash_report(p, sz, TYPE_STORE, 0);
575}
5ba3f43e
A
576
577/* unpoison the current stack */
5ba3f43e 578void NOINLINE
a39ff7e2 579kasan_unpoison_curstack(bool whole_stack)
5ba3f43e 580{
a39ff7e2
A
581 uintptr_t base = ml_stack_base();
582 size_t sz = ml_stack_size();
583 uintptr_t cur = (uintptr_t)&base;
584
585 if (whole_stack) {
586 cur = base;
587 }
588
589 if (cur >= base && cur < base + sz) {
590 /* unpoison from current stack depth to the top */
591 size_t unused = cur - base;
592 kasan_unpoison_stack(cur, sz - unused);
593 }
5ba3f43e
A
594}
595
596void NOINLINE
597__asan_handle_no_return(void)
598{
a39ff7e2 599 kasan_unpoison_curstack(false);
d9a64523
A
600
601 /*
602 * No need to free any fakestack objects because they must stay alive until
603 * we drop the real stack, at which point we can drop the entire fakestack
604 * anyway.
605 */
5ba3f43e
A
606}
607
608bool NOINLINE
609kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
610{
611 uint8_t *shadow;
612 vm_size_t i;
613
a39ff7e2 614 if (!kasan_enabled) {
5ba3f43e
A
615 return false;
616 }
617
f427ee49
A
618 size += kasan_granule_partial(base);
619 base = kasan_granule_trunc(base);
5ba3f43e
A
620
621 shadow = SHADOW_FOR_ADDRESS(base);
f427ee49 622 size_t limit = (size + KASAN_GRANULE - 1) / KASAN_GRANULE;
5ba3f43e
A
623
624 /* XXX: to make debugging easier, catch unmapped shadow here */
625
f427ee49 626 for (i = 0; i < limit; i++, size -= KASAN_GRANULE) {
5ba3f43e
A
627 assert(size > 0);
628 uint8_t s = shadow[i];
f427ee49 629 if (s == 0 || (size < KASAN_GRANULE && s >= size && s < KASAN_GRANULE)) {
5ba3f43e
A
630 /* valid */
631 } else {
632 goto fail;
633 }
634 }
635
636 return false;
637
f427ee49 638fail:
5ba3f43e
A
639 if (first_invalid) {
640 /* XXX: calculate the exact first byte that failed */
f427ee49 641 *first_invalid = base + i * 8;
5ba3f43e
A
642 }
643 return true;
644}
645
646static void NOINLINE
647kasan_init_globals(vm_offset_t base, vm_size_t size)
648{
649 struct asan_global *glob = (struct asan_global *)base;
650 struct asan_global *glob_end = (struct asan_global *)(base + size);
651 for (; glob < glob_end; glob++) {
652 /* handle one global */
653 kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
654 }
655}
656
657void NOINLINE
658kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid)
659{
660 unsigned long sectsz;
661 void *sect;
662
a39ff7e2
A
663#if KASAN_DYNAMIC_BLACKLIST
664 kasan_dybl_load_kext(base, bundleid);
665#endif
666
5ba3f43e
A
667 /* find the kasan globals segment/section */
668 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
669 if (sect) {
670 kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz);
671 kexts_loaded++;
672 }
5ba3f43e
A
673}
674
675void NOINLINE
676kasan_unload_kext(vm_offset_t base, vm_size_t size)
677{
678 unsigned long sectsz;
679 void *sect;
680
681 /* find the kasan globals segment/section */
682 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
683 if (sect) {
684 kasan_unpoison((void *)base, size);
685 kexts_loaded--;
686 }
687
688#if KASAN_DYNAMIC_BLACKLIST
689 kasan_dybl_unload_kext(base);
690#endif
691}
692
a39ff7e2
A
693/*
694 * Turn off as much as possible for panic path etc. There's no way to turn it back
695 * on.
696 */
5ba3f43e
A
697void NOINLINE
698kasan_disable(void)
699{
700 __asan_option_detect_stack_use_after_return = 0;
a39ff7e2 701 fakestack_enabled = 0;
5ba3f43e 702 kasan_enabled = 0;
a39ff7e2
A
703 quarantine_enabled = 0;
704 enabled_checks = 0;
5ba3f43e
A
705}
706
707static void NOINLINE
708kasan_init_xnu_globals(void)
709{
710 const char *seg = KASAN_GLOBAL_SEGNAME;
711 const char *sect = KASAN_GLOBAL_SECTNAME;
712 unsigned long _size;
713 vm_offset_t globals;
714 vm_size_t size;
715 kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header;
716
717 if (!header) {
a39ff7e2
A
718 printf("KASan: failed to find kernel mach header\n");
719 printf("KASan: redzones for globals not poisoned\n");
5ba3f43e
A
720 return;
721 }
722
723 globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size);
724 if (!globals) {
a39ff7e2
A
725 printf("KASan: failed to find segment %s section %s\n", seg, sect);
726 printf("KASan: redzones for globals not poisoned\n");
5ba3f43e
A
727 return;
728 }
729 size = (vm_size_t)_size;
730
a39ff7e2
A
731 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
732 printf("KASan: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global));
5ba3f43e
A
733
734 kasan_init_globals(globals, size);
735}
736
737void NOINLINE
738kasan_late_init(void)
739{
5ba3f43e
A
740#if KASAN_DYNAMIC_BLACKLIST
741 kasan_init_dybl();
742#endif
a39ff7e2
A
743
744 kasan_init_fakestack();
745 kasan_init_xnu_globals();
5ba3f43e
A
746}
747
748void NOINLINE
749kasan_notify_stolen(vm_offset_t top)
750{
751 kasan_map_shadow(kernel_vtop, top - kernel_vtop, false);
752}
753
754static void NOINLINE
755kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz)
756{
757#if KASAN_DEBUG
758 vm_size_t i;
759 uint8_t tmp1, tmp2;
760
761 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
762 for (i = 0; i < sz; i += sizeof(uint64_t)) {
763 vm_offset_t addr = base + i;
764 uint8_t *x = SHADOW_FOR_ADDRESS(addr);
765 tmp1 = *x;
f427ee49 766 asm volatile ("" ::: "memory");
5ba3f43e 767 tmp2 = *x;
f427ee49 768 asm volatile ("" ::: "memory");
5ba3f43e
A
769 assert(tmp1 == tmp2);
770 }
771#else
772 (void)base;
773 (void)sz;
774#endif
775}
776
777void NOINLINE
778kasan_init(void)
779{
a39ff7e2
A
780 unsigned arg;
781
5ba3f43e
A
782 simple_lock_init(&kasan_vm_lock, 0);
783
784 /* Map all of the kernel text and data */
785 kasan_map_shadow(kernel_vbase, kernel_vtop - kernel_vbase, false);
786
787 kasan_arch_init();
788
a39ff7e2
A
789 /*
790 * handle KASan boot-args
791 */
792
793 if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) {
794 enabled_checks = arg;
795 }
796
797 if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) {
798 if (arg & KASAN_ARGS_FAKESTACK) {
799 fakestack_enabled = 1;
800 }
801 if (arg & KASAN_ARGS_REPORTIGNORED) {
802 report_ignored = 1;
803 }
804 if (arg & KASAN_ARGS_NODYCHECKS) {
805 enabled_checks &= ~TYPE_DYNAMIC;
806 }
807 if (arg & KASAN_ARGS_NOPOISON_HEAP) {
808 enabled_checks &= ~TYPE_POISON_HEAP;
809 }
810 if (arg & KASAN_ARGS_NOPOISON_GLOBAL) {
811 enabled_checks &= ~TYPE_POISON_GLOBAL;
812 }
cb323159
A
813 if (arg & KASAN_ARGS_CHECK_LEAKS) {
814 enabled_checks |= TYPE_LEAK;
815 }
a39ff7e2
A
816 }
817
818 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) {
819 free_yield = arg;
820 }
821
cb323159
A
822 if (PE_parse_boot_argn("kasan.leak_threshold", &arg, sizeof(arg))) {
823 leak_threshold = arg;
824 }
825
826 if (PE_parse_boot_argn("kasan.leak_fatal_threshold", &arg, sizeof(arg))) {
827 leak_fatal_threshold = arg;
828 }
829
a39ff7e2
A
830 /* kasan.bl boot-arg handled in kasan_init_dybl() */
831
832 quarantine_enabled = 1;
5ba3f43e
A
833 kasan_enabled = 1;
834}
835
836static void NOINLINE
837kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool is_zero)
838{
839 assert(address < VM_MAX_KERNEL_ADDRESS);
840
a39ff7e2 841 if (!kasan_enabled) {
5ba3f43e
A
842 return;
843 }
844
845 if (address < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
846 /* only map kernel addresses */
847 return;
848 }
849
850 if (!size) {
851 /* nothing to map */
852 return;
853 }
854
855 boolean_t flags;
856 kasan_lock(&flags);
857 kasan_map_shadow(address, size, is_zero);
858 kasan_unlock(flags);
859 kasan_debug_touch_mappings(address, size);
860}
861
862void
863kasan_notify_address(vm_offset_t address, vm_size_t size)
864{
865 kasan_notify_address_internal(address, size, false);
866}
867
868/*
869 * Allocate read-only, all-zeros shadow for memory that can never be poisoned
870 */
871void
872kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size)
873{
874 kasan_notify_address_internal(address, size, true);
875}
876
877/*
878 *
879 * allocator hooks
880 *
881 */
882
883struct kasan_alloc_header {
a39ff7e2
A
884 uint16_t magic;
885 uint16_t crc;
5ba3f43e
A
886 uint32_t alloc_size;
887 uint32_t user_size;
888 struct {
a39ff7e2
A
889 uint32_t left_rz : 32 - BACKTRACE_BITS;
890 uint32_t frames : BACKTRACE_BITS;
5ba3f43e
A
891 };
892};
893_Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
894
895struct kasan_alloc_footer {
896 uint32_t backtrace[0];
897};
898_Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
899
a39ff7e2
A
900#define LIVE_XOR ((uint16_t)0x3a65)
901#define FREE_XOR ((uint16_t)0xf233)
902
903static uint16_t
904magic_for_addr(vm_offset_t addr, uint16_t magic_xor)
5ba3f43e 905{
a39ff7e2
A
906 uint16_t magic = addr & 0xFFFF;
907 magic ^= (addr >> 16) & 0xFFFF;
908 magic ^= (addr >> 32) & 0xFFFF;
909 magic ^= (addr >> 48) & 0xFFFF;
910 magic ^= magic_xor;
911 return magic;
5ba3f43e
A
912}
913
914static struct kasan_alloc_header *
915header_for_user_addr(vm_offset_t addr)
916{
917 return (void *)(addr - sizeof(struct kasan_alloc_header));
918}
919
920static struct kasan_alloc_footer *
921footer_for_user_addr(vm_offset_t addr, vm_size_t *size)
922{
923 struct kasan_alloc_header *h = header_for_user_addr(addr);
924 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
925 *size = rightrz;
926 return (void *)(addr + h->user_size);
927}
928
929/*
930 * size: user-requested allocation size
931 * ret: minimum size for the real allocation
932 */
933vm_size_t
934kasan_alloc_resize(vm_size_t size)
935{
936 vm_size_t tmp;
937 if (os_add_overflow(size, 4 * PAGE_SIZE, &tmp)) {
938 panic("allocation size overflow (%lu)", size);
939 }
940
f427ee49
A
941 if (size >= 128) {
942 /* Add a little extra right redzone to larger objects. Gives us extra
943 * overflow protection, and more space for the backtrace. */
944 size += 16;
945 }
946
5ba3f43e
A
947 /* add left and right redzones */
948 size += KASAN_GUARD_PAD;
949
f427ee49
A
950 /* ensure the final allocation is a multiple of the granule */
951 size = kasan_granule_round(size);
5ba3f43e
A
952
953 return size;
954}
955
956extern vm_offset_t vm_kernel_slid_base;
957
958static vm_size_t
959kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip)
960{
961 uintptr_t buf[BACKTRACE_MAXFRAMES];
962 uintptr_t *bt = buf;
963
964 sz /= sizeof(uint32_t);
965 vm_size_t frames = sz;
966
967 if (frames > 0) {
f427ee49
A
968 frames = min((uint32_t)(frames + skip), BACKTRACE_MAXFRAMES);
969 frames = backtrace(bt, (uint32_t)frames, NULL);
5ba3f43e
A
970
971 while (frames > sz && skip > 0) {
972 bt++;
973 frames--;
974 skip--;
975 }
976
977 /* only store the offset from kernel base, and cram that into 32
978 * bits */
979 for (vm_size_t i = 0; i < frames; i++) {
980 ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base);
981 }
982 }
983 return frames;
984}
985
a39ff7e2
A
986/* addr: user address of allocation */
987static uint16_t
988kasan_alloc_crc(vm_offset_t addr)
989{
990 struct kasan_alloc_header *h = header_for_user_addr(addr);
991 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
992
993 uint16_t crc_orig = h->crc;
994 h->crc = 0;
995
996 uint16_t crc = 0;
997 crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz);
998 crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz);
999
1000 h->crc = crc_orig;
1001
1002 return crc;
1003}
1004
cb323159
A
1005static vm_size_t
1006kasan_alloc_retrieve_bt(vm_address_t addr, uintptr_t frames[static BACKTRACE_MAXFRAMES])
1007{
1008 vm_size_t num_frames = 0;
1009 uptr shadow = (uptr)SHADOW_FOR_ADDRESS(addr);
1010 uptr max_search = shadow - 4096;
1011 vm_address_t alloc_base = 0;
1012 size_t fsize = 0;
1013
1014 /* walk the shadow backwards to find the allocation base */
1015 while (shadow >= max_search) {
1016 if (*(uint8_t *)shadow == ASAN_HEAP_LEFT_RZ) {
1017 alloc_base = ADDRESS_FOR_SHADOW(shadow) + 8;
1018 break;
1019 }
1020 shadow--;
1021 }
1022
1023 if (alloc_base) {
1024 struct kasan_alloc_header *header = header_for_user_addr(alloc_base);
1025 if (magic_for_addr(alloc_base, LIVE_XOR) == header->magic) {
1026 struct kasan_alloc_footer *footer = footer_for_user_addr(alloc_base, &fsize);
f427ee49 1027 if ((fsize / sizeof(footer->backtrace[0])) >= header->frames) {
cb323159
A
1028 num_frames = header->frames;
1029 for (size_t i = 0; i < num_frames; i++) {
1030 frames[i] = footer->backtrace[i] + vm_kernel_slid_base;
1031 }
1032 }
1033 }
1034 }
1035
1036 return num_frames;
1037}
1038
5ba3f43e
A
1039/*
1040 * addr: base address of full allocation (including redzones)
1041 * size: total size of allocation (include redzones)
1042 * req: user-requested allocation size
1043 * lrz: size of the left redzone in bytes
1044 * ret: address of usable allocation
1045 */
1046vm_address_t
1047kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
1048{
1049 if (!addr) {
1050 return 0;
1051 }
1052 assert(size > 0);
f427ee49
A
1053 assert(kasan_granule_partial(addr) == 0);
1054 assert(kasan_granule_partial(size) == 0);
5ba3f43e
A
1055
1056 vm_size_t rightrz = size - req - leftrz;
1057
1058 kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ);
1059 kasan_rz_clobber(addr, req, leftrz, rightrz);
1060
1061 addr += leftrz;
1062
1063 /* stash the allocation sizes in the left redzone */
1064 struct kasan_alloc_header *h = header_for_user_addr(addr);
a39ff7e2 1065 h->magic = magic_for_addr(addr, LIVE_XOR);
f427ee49
A
1066 h->left_rz = (uint32_t)leftrz;
1067 h->alloc_size = (uint32_t)size;
1068 h->user_size = (uint32_t)req;
5ba3f43e
A
1069
1070 /* ... and a backtrace in the right redzone */
1071 vm_size_t fsize;
1072 struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
f427ee49 1073 h->frames = (uint32_t)kasan_alloc_bt(f->backtrace, fsize, 2);
5ba3f43e 1074
a39ff7e2
A
1075 /* checksum the whole object, minus the user part */
1076 h->crc = kasan_alloc_crc(addr);
1077
5ba3f43e
A
1078 return addr;
1079}
1080
1081/*
1082 * addr: user pointer
1083 * size: returns full original allocation size
1084 * ret: original allocation ptr
1085 */
1086vm_address_t
1087kasan_dealloc(vm_offset_t addr, vm_size_t *size)
1088{
1089 assert(size && addr);
1090 struct kasan_alloc_header *h = header_for_user_addr(addr);
5ba3f43e 1091 *size = h->alloc_size;
f427ee49 1092 h->magic = 0; /* clear the magic so the debugger doesn't find a bogus object */
5ba3f43e
A
1093 return addr - h->left_rz;
1094}
1095
1096/*
1097 * return the original user-requested allocation size
1098 * addr: user alloc pointer
1099 */
1100vm_size_t
1101kasan_user_size(vm_offset_t addr)
1102{
1103 struct kasan_alloc_header *h = header_for_user_addr(addr);
a39ff7e2 1104 assert(h->magic == magic_for_addr(addr, LIVE_XOR));
5ba3f43e
A
1105 return h->user_size;
1106}
1107
1108/*
1109 * Verify that `addr' (user pointer) is a valid allocation of `type'
1110 */
1111void
1112kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type)
1113{
1114 struct kasan_alloc_header *h = header_for_user_addr(addr);
1115
1116 /* map heap type to an internal access type */
a39ff7e2 1117 access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE :
f427ee49
A
1118 heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE :
1119 heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0;
5ba3f43e 1120
a39ff7e2
A
1121 /* check the magic and crc match */
1122 if (h->magic != magic_for_addr(addr, LIVE_XOR)) {
1123 kasan_violation(addr, size, type, REASON_BAD_METADATA);
1124 }
1125 if (h->crc != kasan_alloc_crc(addr)) {
1126 kasan_violation(addr, size, type, REASON_MOD_OOB);
5ba3f43e
A
1127 }
1128
1129 /* check the freed size matches what we recorded at alloc time */
1130 if (h->user_size != size) {
a39ff7e2 1131 kasan_violation(addr, size, type, REASON_INVALID_SIZE);
5ba3f43e
A
1132 }
1133
1134 vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
1135
1136 /* Check that the redzones are valid */
a39ff7e2 1137 if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
f427ee49 1138 !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
a39ff7e2
A
1139 kasan_violation(addr, size, type, REASON_BAD_METADATA);
1140 }
5ba3f43e
A
1141
1142 /* Check the allocated range is not poisoned */
1143 kasan_check_range((void *)addr, size, type);
1144}
1145
1146/*
1147 *
1148 * Quarantine
1149 *
1150 */
1151
1152struct freelist_entry {
a39ff7e2
A
1153 uint16_t magic;
1154 uint16_t crc;
5ba3f43e
A
1155 STAILQ_ENTRY(freelist_entry) list;
1156 union {
1157 struct {
1158 vm_size_t size : 28;
1159 vm_size_t user_size : 28;
a39ff7e2
A
1160 vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */
1161 vm_size_t __unused : 8 - BACKTRACE_BITS;
5ba3f43e
A
1162 };
1163 uint64_t bits;
1164 };
1165 zone_t zone;
1166 uint32_t backtrace[];
1167};
1168_Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
1169
5ba3f43e
A
1170struct quarantine {
1171 STAILQ_HEAD(freelist_head, freelist_entry) freelist;
1172 unsigned long entries;
1173 unsigned long max_entries;
1174 vm_size_t size;
1175 vm_size_t max_size;
1176};
1177
1178struct quarantine quarantines[] = {
f427ee49
A
1179 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
1180 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
5ba3f43e
A
1181 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
1182};
1183
a39ff7e2
A
1184static uint16_t
1185fle_crc(struct freelist_entry *fle)
1186{
1187 return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits));
1188}
1189
5ba3f43e
A
1190/*
1191 * addr, sizep: pointer/size of full allocation including redzone
1192 */
1193void NOINLINE
1194kasan_free_internal(void **addrp, vm_size_t *sizep, int type,
f427ee49
A
1195 zone_t *zone, vm_size_t user_size, int locked,
1196 bool doquarantine)
5ba3f43e
A
1197{
1198 vm_size_t size = *sizep;
1199 vm_offset_t addr = *(vm_offset_t *)addrp;
1200
1201 assert(type >= 0 && type < KASAN_HEAP_TYPES);
1202 if (type == KASAN_HEAP_KALLOC) {
1203 /* zero-size kalloc allocations are allowed */
1204 assert(!zone);
1205 } else if (type == KASAN_HEAP_ZALLOC) {
1206 assert(zone && user_size);
1207 } else if (type == KASAN_HEAP_FAKESTACK) {
1208 assert(zone && user_size);
1209 }
1210
1211 /* clobber the entire freed region */
1212 kasan_rz_clobber(addr, 0, size, 0);
1213
1214 if (!doquarantine || !quarantine_enabled) {
1215 goto free_current;
1216 }
1217
1218 /* poison the entire freed region */
1219 uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED;
1220 kasan_poison(addr, 0, size, 0, flags);
1221
1222 struct freelist_entry *fle, *tofree = NULL;
1223 struct quarantine *q = &quarantines[type];
1224 assert(size >= sizeof(struct freelist_entry));
1225
1226 /* create a new freelist entry */
1227 fle = (struct freelist_entry *)addr;
a39ff7e2 1228 fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR);
5ba3f43e
A
1229 fle->size = size;
1230 fle->user_size = user_size;
1231 fle->frames = 0;
1232 fle->zone = ZONE_NULL;
1233 if (zone) {
1234 fle->zone = *zone;
1235 }
1236 if (type != KASAN_HEAP_FAKESTACK) {
a39ff7e2 1237 /* don't do expensive things on the fakestack path */
5ba3f43e 1238 fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
a39ff7e2 1239 fle->crc = fle_crc(fle);
5ba3f43e
A
1240 }
1241
1242 boolean_t flg;
1243 if (!locked) {
1244 kasan_lock(&flg);
1245 }
1246
1247 if (q->size + size > q->max_size) {
1248 /*
1249 * Adding this entry would put us over the max quarantine size. Free the
1250 * larger of the current object and the quarantine head object.
1251 */
1252 tofree = STAILQ_FIRST(&q->freelist);
1253 if (fle->size > tofree->size) {
1254 goto free_current_locked;
1255 }
1256 }
1257
1258 STAILQ_INSERT_TAIL(&q->freelist, fle, list);
1259 q->entries++;
1260 q->size += size;
1261
1262 /* free the oldest entry, if necessary */
1263 if (tofree || q->entries > q->max_entries) {
1264 tofree = STAILQ_FIRST(&q->freelist);
1265 STAILQ_REMOVE_HEAD(&q->freelist, list);
1266
1267 assert(q->entries > 0 && q->size >= tofree->size);
1268 q->entries--;
1269 q->size -= tofree->size;
1270
1271 if (type != KASAN_HEAP_KALLOC) {
1272 assert((vm_offset_t)zone >= VM_MIN_KERNEL_AND_KEXT_ADDRESS &&
f427ee49 1273 (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS);
5ba3f43e
A
1274 *zone = tofree->zone;
1275 }
1276
1277 size = tofree->size;
1278 addr = (vm_offset_t)tofree;
a39ff7e2
A
1279
1280 /* check the magic and crc match */
1281 if (tofree->magic != magic_for_addr(addr, FREE_XOR)) {
1282 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
1283 }
1284 if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) {
1285 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
5ba3f43e
A
1286 }
1287
1288 /* clobber the quarantine header */
a39ff7e2 1289 __nosan_bzero((void *)addr, sizeof(struct freelist_entry));
5ba3f43e
A
1290 } else {
1291 /* quarantine is not full - don't really free anything */
1292 addr = 0;
1293 }
1294
f427ee49 1295free_current_locked:
5ba3f43e
A
1296 if (!locked) {
1297 kasan_unlock(flg);
1298 }
1299
f427ee49 1300free_current:
5ba3f43e
A
1301 *addrp = (void *)addr;
1302 if (addr) {
1303 kasan_unpoison((void *)addr, size);
1304 *sizep = size;
1305 }
1306}
1307
1308void NOINLINE
1309kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone,
f427ee49 1310 vm_size_t user_size, bool quarantine)
5ba3f43e
A
1311{
1312 kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine);
a39ff7e2
A
1313
1314 if (free_yield) {
1315 thread_yield_internal(free_yield);
1316 }
5ba3f43e
A
1317}
1318
1319uptr
1320__asan_load_cxx_array_cookie(uptr *p)
1321{
1322 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)p);
1323 if (*shadow == ASAN_ARRAY_COOKIE) {
1324 return *p;
1325 } else if (*shadow == ASAN_HEAP_FREED) {
1326 return 0;
1327 } else {
1328 return *p;
1329 }
1330}
1331
1332void
1333__asan_poison_cxx_array_cookie(uptr p)
1334{
1335 uint8_t *shadow = SHADOW_FOR_ADDRESS(p);
1336 *shadow = ASAN_ARRAY_COOKIE;
1337}
1338
cb323159
A
1339/*
1340 * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
1341 * lives relative to the start of the buffer, but it's always the word immediately
1342 * before the start of the array data, so for naturally-aligned objects we need to
1343 * search at most 2 shadow bytes.
1344 */
1345void
1346kasan_unpoison_cxx_array_cookie(void *ptr)
1347{
1348 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)ptr);
1349 for (size_t i = 0; i < 2; i++) {
1350 if (shadow[i] == ASAN_ARRAY_COOKIE) {
1351 shadow[i] = ASAN_VALID;
1352 return;
1353 } else if (shadow[i] != ASAN_VALID) {
1354 /* must have seen the cookie by now */
1355 return;
1356 }
1357 }
1358}
1359
a39ff7e2 1360#define ACCESS_CHECK_DECLARE(type, sz, access) \
5ba3f43e 1361 void __asan_##type##sz(uptr addr) { \
f427ee49 1362 kasan_check_range((const void *)addr, sz, access); \
5ba3f43e 1363 } \
cb323159 1364 void OS_NORETURN UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
5ba3f43e 1365
f427ee49
A
1366ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD);
1367ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD);
1368ACCESS_CHECK_DECLARE(load, 4, TYPE_LOAD);
1369ACCESS_CHECK_DECLARE(load, 8, TYPE_LOAD);
1370ACCESS_CHECK_DECLARE(load, 16, TYPE_LOAD);
1371ACCESS_CHECK_DECLARE(store, 1, TYPE_STORE);
1372ACCESS_CHECK_DECLARE(store, 2, TYPE_STORE);
1373ACCESS_CHECK_DECLARE(store, 4, TYPE_STORE);
1374ACCESS_CHECK_DECLARE(store, 8, TYPE_STORE);
5ba3f43e
A
1375ACCESS_CHECK_DECLARE(store, 16, TYPE_STORE);
1376
1377void
1378__asan_loadN(uptr addr, size_t sz)
1379{
1380 kasan_check_range((const void *)addr, sz, TYPE_LOAD);
1381}
1382
1383void
1384__asan_storeN(uptr addr, size_t sz)
1385{
1386 kasan_check_range((const void *)addr, sz, TYPE_STORE);
1387}
1388
5ba3f43e
A
1389static void
1390kasan_set_shadow(uptr addr, size_t sz, uint8_t val)
1391{
1392 __nosan_memset((void *)addr, val, sz);
1393}
1394
1395#define SET_SHADOW_DECLARE(val) \
1396 void __asan_set_shadow_##val(uptr addr, size_t sz) { \
f427ee49 1397 kasan_set_shadow(addr, sz, 0x##val); \
5ba3f43e
A
1398 }
1399
1400SET_SHADOW_DECLARE(00)
1401SET_SHADOW_DECLARE(f1)
1402SET_SHADOW_DECLARE(f2)
1403SET_SHADOW_DECLARE(f3)
1404SET_SHADOW_DECLARE(f5)
1405SET_SHADOW_DECLARE(f8)
1406
a39ff7e2 1407
5ba3f43e 1408/*
a39ff7e2
A
1409 * Call 'cb' for each contiguous range of the shadow map. This could be more
1410 * efficient by walking the page table directly.
5ba3f43e 1411 */
a39ff7e2
A
1412int
1413kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx)
5ba3f43e 1414{
a39ff7e2
A
1415 uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS);
1416 uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS);
d9a64523
A
1417 shadow_base = vm_map_trunc_page(shadow_base, HW_PAGE_MASK);
1418 shadow_top = vm_map_round_page(shadow_top, HW_PAGE_MASK);
a39ff7e2
A
1419
1420 uintptr_t start = 0, end = 0;
1421
d9a64523 1422 for (uintptr_t addr = shadow_base; addr < shadow_top; addr += HW_PAGE_SIZE) {
a39ff7e2
A
1423 if (kasan_is_shadow_mapped(addr)) {
1424 if (start == 0) {
1425 start = addr;
1426 }
d9a64523 1427 end = addr + HW_PAGE_SIZE;
a39ff7e2
A
1428 } else if (start && end) {
1429 cb(start, end, ctx);
1430 start = end = 0;
1431 }
1432 }
5ba3f43e 1433
a39ff7e2
A
1434 if (start && end) {
1435 cb(start, end, ctx);
1436 }
5ba3f43e 1437
a39ff7e2 1438 return 0;
5ba3f43e
A
1439}
1440
1441/*
a39ff7e2 1442 * XXX: implement these
5ba3f43e
A
1443 */
1444
a39ff7e2
A
1445UNUSED_ABI(__asan_alloca_poison, uptr addr, uptr size);
1446UNUSED_ABI(__asan_allocas_unpoison, uptr top, uptr bottom);
1447UNUSED_ABI(__sanitizer_ptr_sub, uptr a, uptr b);
1448UNUSED_ABI(__sanitizer_ptr_cmp, uptr a, uptr b);
1449UNUSED_ABI(__sanitizer_annotate_contiguous_container, const void *a, const void *b, const void *c, const void *d);
1450UNUSED_ABI(__asan_poison_stack_memory, uptr addr, size_t size);
1451UNUSED_ABI(__asan_unpoison_stack_memory, uptr a, uptr b);
5ba3f43e 1452
a39ff7e2
A
1453/*
1454 * Miscellaneous unimplemented asan ABI
1455 */
5ba3f43e 1456
a39ff7e2
A
1457UNUSED_ABI(__asan_init, void);
1458UNUSED_ABI(__asan_register_image_globals, uptr a);
1459UNUSED_ABI(__asan_unregister_image_globals, uptr a);
1460UNUSED_ABI(__asan_before_dynamic_init, uptr a);
1461UNUSED_ABI(__asan_after_dynamic_init, void);
1462UNUSED_ABI(__asan_version_mismatch_check_v8, void);
1463UNUSED_ABI(__asan_version_mismatch_check_apple_802, void);
1464UNUSED_ABI(__asan_version_mismatch_check_apple_900, void);
1465UNUSED_ABI(__asan_version_mismatch_check_apple_902, void);
d9a64523 1466UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void);
0a7de745 1467UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void);
cb323159 1468UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100, void);
a991bd8d 1469UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1200, void);
a39ff7e2 1470
cb323159
A
1471void OS_NORETURN UNSUPPORTED_API(__asan_init_v5, void);
1472void OS_NORETURN UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b);
1473void OS_NORETURN UNSUPPORTED_API(__asan_unregister_globals, uptr a, uptr b);
1474void OS_NORETURN UNSUPPORTED_API(__asan_register_elf_globals, uptr a, uptr b, uptr c);
1475void OS_NORETURN UNSUPPORTED_API(__asan_unregister_elf_globals, uptr a, uptr b, uptr c);
a39ff7e2 1476
cb323159
A
1477void OS_NORETURN UNSUPPORTED_API(__asan_exp_loadN, uptr addr, size_t sz, int32_t e);
1478void OS_NORETURN UNSUPPORTED_API(__asan_exp_storeN, uptr addr, size_t sz, int32_t e);
1479void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load_n, uptr addr, unsigned long b, int32_t c);
1480void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store_n, uptr addr, unsigned long b, int32_t c);
5ba3f43e
A
1481
1482/*
1483 *
1484 * SYSCTL
1485 *
1486 */
1487
1488static int
1489sysctl_kasan_test(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req)
1490{
1491 int mask = 0;
1492 int ch;
1493 int err;
1494 err = sysctl_io_number(req, 0, sizeof(int), &mask, &ch);
1495
1496 if (!err && mask) {
1497 kasan_test(mask, arg2);
1498 }
1499
1500 return err;
1501}
1502
a39ff7e2
A
1503static int
1504sysctl_fakestack_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, int __unused arg2, struct sysctl_req *req)
1505{
1506 int ch, err, val;
1507
1508 err = sysctl_io_number(req, fakestack_enabled, sizeof(fakestack_enabled), &val, &ch);
1509 if (err == 0 && ch) {
1510 fakestack_enabled = !!val;
1511 __asan_option_detect_stack_use_after_return = !!val;
1512 }
1513
1514 return err;
1515}
1516
5ba3f43e
A
1517SYSCTL_DECL(kasan);
1518SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
1519
1520SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, "");
a39ff7e2
A
1521SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
1522SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &enabled_checks, 0, "");
1523SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
1524SYSCTL_UINT(_kern_kasan, OID_AUTO, report_ignored, CTLFLAG_RW, &report_ignored, 0, "");
1525SYSCTL_UINT(_kern_kasan, OID_AUTO, free_yield_ms, CTLFLAG_RW, &free_yield, 0, "");
cb323159
A
1526SYSCTL_UINT(_kern_kasan, OID_AUTO, leak_threshold, CTLFLAG_RW, &leak_threshold, 0, "");
1527SYSCTL_UINT(_kern_kasan, OID_AUTO, leak_fatal_threshold, CTLFLAG_RW, &leak_fatal_threshold, 0, "");
a39ff7e2
A
1528SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, "");
1529SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, "");
1530SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, "");
f427ee49
A
1531SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
1532SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, "");
1533SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, "");
a39ff7e2
A
1534SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, "");
1535
1536SYSCTL_PROC(_kern_kasan, OID_AUTO, fakestack,
f427ee49
A
1537 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1538 0, 0, sysctl_fakestack_enable, "I", "");
5ba3f43e
A
1539
1540SYSCTL_PROC(_kern_kasan, OID_AUTO, test,
f427ee49
A
1541 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1542 0, 0, sysctl_kasan_test, "I", "");
5ba3f43e
A
1543
1544SYSCTL_PROC(_kern_kasan, OID_AUTO, fail,
f427ee49
A
1545 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
1546 0, 1, sysctl_kasan_test, "I", "");