]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <string.h> | |
30 | #include <stdint.h> | |
31 | #include <stdbool.h> | |
32 | #include <vm/vm_map.h> | |
33 | #include <kern/assert.h> | |
34 | #include <kern/cpu_data.h> | |
35 | #include <kern/backtrace.h> | |
36 | #include <machine/machine_routines.h> | |
37 | #include <kern/locks.h> | |
38 | #include <kern/simple_lock.h> | |
39 | #include <kern/debug.h> | |
40 | #include <kern/kalloc.h> | |
41 | #include <kern/zalloc.h> | |
42 | #include <mach/mach_vm.h> | |
43 | #include <mach/mach_types.h> | |
44 | #include <mach/vm_param.h> | |
45 | #include <mach/machine/vm_param.h> | |
46 | #include <libkern/libkern.h> | |
47 | #include <libkern/OSAtomic.h> | |
48 | #include <libkern/kernel_mach_header.h> | |
49 | #include <sys/queue.h> | |
50 | #include <sys/sysctl.h> | |
51 | #include <kern/thread.h> | |
52 | #include <machine/atomic.h> | |
53 | ||
54 | #include <kasan.h> | |
55 | #include <kasan_internal.h> | |
56 | #include <memintrinsics.h> | |
57 | ||
5ba3f43e A |
58 | const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_SHIFT; |
59 | ||
a39ff7e2 A |
60 | static unsigned kexts_loaded; |
61 | unsigned shadow_pages_total; | |
62 | unsigned shadow_pages_used; | |
5ba3f43e A |
63 | |
64 | vm_offset_t kernel_vbase; | |
65 | vm_offset_t kernel_vtop; | |
66 | ||
a39ff7e2 A |
67 | static unsigned kasan_enabled; |
68 | static unsigned quarantine_enabled; | |
69 | static unsigned enabled_checks = TYPE_ALL; /* bitmask of enabled checks */ | |
70 | static unsigned report_ignored; /* issue non-fatal report for disabled/blacklisted checks */ | |
71 | static unsigned free_yield = 0; /* ms yield after each free */ | |
72 | ||
73 | /* forward decls */ | |
74 | static void kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason); | |
75 | static void kasan_log_report(uptr p, uptr width, access_t access, violation_t reason); | |
5ba3f43e | 76 | |
a39ff7e2 | 77 | /* imported osfmk functions */ |
5ba3f43e A |
78 | extern vm_offset_t ml_stack_base(void); |
79 | extern vm_size_t ml_stack_size(void); | |
80 | ||
a39ff7e2 A |
81 | /* |
82 | * unused: expected to be called, but (currently) does nothing | |
83 | */ | |
84 | #define UNUSED_ABI(func, ...) \ | |
85 | _Pragma("clang diagnostic push") \ | |
86 | _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \ | |
87 | void func(__VA_ARGS__); \ | |
88 | void func(__VA_ARGS__) {}; \ | |
89 | _Pragma("clang diagnostic pop") \ | |
5ba3f43e | 90 | |
a39ff7e2 A |
91 | static const size_t BACKTRACE_BITS = 4; |
92 | static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1; | |
5ba3f43e A |
93 | |
94 | decl_simple_lock_data(, kasan_vm_lock); | |
a39ff7e2 | 95 | static thread_t kasan_lock_holder; |
5ba3f43e A |
96 | |
97 | /* | |
98 | * kasan is called from the interrupt path, so we need to disable interrupts to | |
99 | * ensure atomicity manipulating the global objects | |
100 | */ | |
101 | void | |
102 | kasan_lock(boolean_t *b) | |
103 | { | |
104 | *b = ml_set_interrupts_enabled(false); | |
105 | simple_lock(&kasan_vm_lock); | |
a39ff7e2 | 106 | kasan_lock_holder = current_thread(); |
5ba3f43e A |
107 | } |
108 | ||
109 | void | |
110 | kasan_unlock(boolean_t b) | |
111 | { | |
a39ff7e2 | 112 | kasan_lock_holder = THREAD_NULL; |
5ba3f43e A |
113 | simple_unlock(&kasan_vm_lock); |
114 | ml_set_interrupts_enabled(b); | |
115 | } | |
116 | ||
a39ff7e2 A |
117 | /* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current |
118 | * thread */ | |
119 | bool | |
120 | kasan_lock_held(thread_t thread) | |
121 | { | |
122 | return thread && thread == kasan_lock_holder; | |
123 | } | |
124 | ||
125 | static inline bool | |
126 | kasan_check_enabled(access_t access) | |
127 | { | |
128 | return kasan_enabled && (enabled_checks & access) && !kasan_is_blacklisted(access); | |
129 | } | |
130 | ||
131 | static inline bool | |
132 | kasan_poison_active(uint8_t flags) | |
133 | { | |
134 | switch (flags) { | |
135 | case ASAN_GLOBAL_RZ: | |
136 | return kasan_check_enabled(TYPE_POISON_GLOBAL); | |
137 | case ASAN_HEAP_RZ: | |
138 | case ASAN_HEAP_LEFT_RZ: | |
139 | case ASAN_HEAP_RIGHT_RZ: | |
140 | case ASAN_HEAP_FREED: | |
141 | return kasan_check_enabled(TYPE_POISON_HEAP); | |
142 | default: | |
143 | return true; | |
144 | }; | |
145 | } | |
146 | ||
5ba3f43e A |
147 | /* |
148 | * poison redzones in the shadow map | |
149 | */ | |
150 | void NOINLINE | |
151 | kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz, uint8_t flags) | |
152 | { | |
153 | uint8_t *shadow = SHADOW_FOR_ADDRESS(base); | |
154 | uint8_t partial = size & 0x07; | |
155 | vm_size_t total = leftrz + size + rightrz; | |
156 | vm_size_t i = 0; | |
157 | ||
158 | /* base must be 8-byte aligned */ | |
159 | /* any left redzone must be a multiple of 8 */ | |
160 | /* total region must cover 8-byte multiple */ | |
161 | assert((base & 0x07) == 0); | |
162 | assert((leftrz & 0x07) == 0); | |
163 | assert((total & 0x07) == 0); | |
164 | ||
a39ff7e2 | 165 | if (!kasan_enabled || !kasan_poison_active(flags)) { |
5ba3f43e A |
166 | return; |
167 | } | |
168 | ||
169 | leftrz /= 8; | |
170 | size /= 8; | |
171 | total /= 8; | |
172 | ||
173 | uint8_t l_flags = flags; | |
174 | uint8_t r_flags = flags; | |
175 | ||
176 | if (flags == ASAN_STACK_RZ) { | |
177 | l_flags = ASAN_STACK_LEFT_RZ; | |
178 | r_flags = ASAN_STACK_RIGHT_RZ; | |
179 | } else if (flags == ASAN_HEAP_RZ) { | |
180 | l_flags = ASAN_HEAP_LEFT_RZ; | |
181 | r_flags = ASAN_HEAP_RIGHT_RZ; | |
182 | } | |
183 | ||
184 | /* | |
185 | * poison the redzones and unpoison the valid bytes | |
186 | */ | |
187 | for (; i < leftrz; i++) { | |
188 | shadow[i] = l_flags; | |
189 | } | |
190 | for (; i < leftrz + size; i++) { | |
a39ff7e2 | 191 | shadow[i] = ASAN_VALID; /* XXX: should not be necessary */ |
5ba3f43e A |
192 | } |
193 | if (partial && (i < total)) { | |
194 | shadow[i] = partial; | |
195 | i++; | |
196 | } | |
197 | for (; i < total; i++) { | |
198 | shadow[i] = r_flags; | |
199 | } | |
5ba3f43e A |
200 | } |
201 | ||
202 | void | |
203 | kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags) | |
204 | { | |
205 | /* base must be 8-byte aligned */ | |
206 | /* total region must cover 8-byte multiple */ | |
207 | assert((base & 0x07) == 0); | |
208 | assert((size & 0x07) == 0); | |
209 | kasan_poison(base, 0, 0, size, flags); | |
210 | } | |
211 | ||
212 | void NOINLINE | |
213 | kasan_unpoison(void *base, vm_size_t size) | |
214 | { | |
215 | kasan_poison((vm_offset_t)base, size, 0, 0, 0); | |
216 | } | |
217 | ||
218 | void NOINLINE | |
219 | kasan_unpoison_stack(vm_offset_t base, vm_size_t size) | |
220 | { | |
221 | assert(base); | |
222 | assert(size); | |
a39ff7e2 A |
223 | |
224 | /* align base and size to 8 bytes */ | |
225 | vm_offset_t align = base & 0x7; | |
226 | base -= align; | |
227 | size += align; | |
228 | size = (size + 7) & ~0x7; | |
229 | ||
5ba3f43e A |
230 | kasan_unpoison((void *)base, size); |
231 | } | |
232 | ||
233 | /* | |
234 | * write junk into the redzones | |
a39ff7e2 | 235 | */ |
5ba3f43e A |
236 | static void NOINLINE |
237 | kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz) | |
238 | { | |
239 | #if KASAN_DEBUG | |
240 | vm_size_t i; | |
241 | const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef }; | |
242 | const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 }; | |
243 | uint8_t *buf = (uint8_t *)base; | |
244 | ||
245 | /* base must be 8-byte aligned */ | |
246 | /* any left redzone must be a multiple of 8 */ | |
247 | /* total region must cover 8-byte multiple */ | |
248 | assert((base & 0x07) == 0); | |
249 | assert((leftrz & 0x07) == 0); | |
250 | assert(((size + leftrz + rightrz) & 0x07) == 0); | |
251 | ||
252 | for (i = 0; i < leftrz; i++) { | |
253 | buf[i] = deadbeef[i % 4]; | |
254 | } | |
255 | ||
256 | for (i = 0; i < rightrz; i++) { | |
257 | buf[i + size + leftrz] = c0ffee[i % 4]; | |
258 | } | |
259 | #else | |
260 | (void)base; | |
261 | (void)size; | |
262 | (void)leftrz; | |
263 | (void)rightrz; | |
264 | #endif | |
265 | } | |
266 | ||
a39ff7e2 A |
267 | /* |
268 | * Report a violation that may be disabled and/or blacklisted. This can only be | |
269 | * called for dynamic checks (i.e. where the fault is recoverable). Use | |
270 | * kasan_crash_report() for static (unrecoverable) violations. | |
271 | * | |
272 | * access: what we were trying to do when the violation occured | |
273 | * reason: what failed about the access | |
274 | */ | |
275 | static void | |
276 | kasan_violation(uintptr_t addr, size_t size, access_t access, violation_t reason) | |
5ba3f43e | 277 | { |
a39ff7e2 A |
278 | assert(__builtin_popcount(access) == 1); |
279 | if (!kasan_check_enabled(access)) { | |
280 | if (report_ignored) { | |
281 | kasan_log_report(addr, size, access, reason); | |
282 | } | |
5ba3f43e A |
283 | return; |
284 | } | |
a39ff7e2 A |
285 | kasan_crash_report(addr, size, access, reason); |
286 | } | |
5ba3f43e | 287 | |
a39ff7e2 A |
288 | void NOINLINE |
289 | kasan_check_range(const void *x, size_t sz, access_t access) | |
290 | { | |
291 | uintptr_t invalid; | |
292 | uintptr_t ptr = (uintptr_t)x; | |
293 | if (kasan_range_poisoned(ptr, sz, &invalid)) { | |
294 | size_t remaining = sz - (invalid - ptr); | |
295 | kasan_violation(invalid, remaining, access, 0); | |
5ba3f43e A |
296 | } |
297 | } | |
298 | ||
299 | /* | |
a39ff7e2 | 300 | * Return true if [base, base+sz) is unpoisoned or has given shadow value. |
5ba3f43e | 301 | */ |
a39ff7e2 A |
302 | static bool |
303 | kasan_check_shadow(vm_address_t base, vm_size_t sz, uint8_t shadow) | |
5ba3f43e A |
304 | { |
305 | sz -= 8 - (base % 8); | |
306 | base += 8 - (base % 8); | |
307 | ||
308 | vm_address_t end = base + sz; | |
309 | ||
310 | while (base < end) { | |
311 | uint8_t *sh = SHADOW_FOR_ADDRESS(base); | |
a39ff7e2 A |
312 | if (*sh && *sh != shadow) { |
313 | return false; | |
5ba3f43e A |
314 | } |
315 | base += 8; | |
316 | } | |
a39ff7e2 | 317 | return true; |
5ba3f43e A |
318 | } |
319 | ||
320 | /* | |
321 | * | |
322 | * KASAN violation reporting | |
323 | * | |
324 | */ | |
325 | ||
326 | static const char * | |
a39ff7e2 | 327 | access_str(access_t type) |
5ba3f43e | 328 | { |
a39ff7e2 A |
329 | if (type & TYPE_READ) { |
330 | return "load from"; | |
331 | } else if (type & TYPE_WRITE) { | |
332 | return "store to"; | |
5ba3f43e | 333 | } else if (type & TYPE_FREE) { |
a39ff7e2 | 334 | return "free of"; |
5ba3f43e | 335 | } else { |
a39ff7e2 | 336 | return "access of"; |
5ba3f43e A |
337 | } |
338 | } | |
339 | ||
340 | static const char *shadow_strings[] = { | |
341 | [ASAN_VALID] = "VALID", | |
342 | [ASAN_PARTIAL1] = "PARTIAL1", | |
343 | [ASAN_PARTIAL2] = "PARTIAL2", | |
344 | [ASAN_PARTIAL3] = "PARTIAL3", | |
345 | [ASAN_PARTIAL4] = "PARTIAL4", | |
346 | [ASAN_PARTIAL5] = "PARTIAL5", | |
347 | [ASAN_PARTIAL6] = "PARTIAL6", | |
348 | [ASAN_PARTIAL7] = "PARTIAL7", | |
5ba3f43e A |
349 | [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ", |
350 | [ASAN_STACK_MID_RZ] = "STACK_MID_RZ", | |
351 | [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ", | |
352 | [ASAN_STACK_FREED] = "STACK_FREED", | |
5c9f4661 | 353 | [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE", |
5ba3f43e | 354 | [ASAN_GLOBAL_RZ] = "GLOBAL_RZ", |
5ba3f43e A |
355 | [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ", |
356 | [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ", | |
357 | [ASAN_HEAP_FREED] = "HEAP_FREED", | |
5c9f4661 | 358 | [0xff] = NULL |
5ba3f43e A |
359 | }; |
360 | ||
361 | #define CRASH_CONTEXT_BEFORE 5 | |
362 | #define CRASH_CONTEXT_AFTER 5 | |
363 | ||
364 | static size_t | |
365 | kasan_shadow_crashlog(uptr p, char *buf, size_t len) | |
366 | { | |
367 | int i,j; | |
a39ff7e2 | 368 | size_t n = 0; |
5ba3f43e A |
369 | int before = CRASH_CONTEXT_BEFORE; |
370 | int after = CRASH_CONTEXT_AFTER; | |
371 | ||
372 | uptr shadow = (uptr)SHADOW_FOR_ADDRESS(p); | |
373 | uptr shadow_p = shadow; | |
a39ff7e2 | 374 | uptr shadow_page = vm_map_round_page(shadow_p, PAGE_MASK); |
5ba3f43e A |
375 | |
376 | /* rewind to start of context block */ | |
377 | shadow &= ~((uptr)0xf); | |
378 | shadow -= 16 * before; | |
379 | ||
a39ff7e2 A |
380 | n += snprintf(buf+n, len-n, |
381 | " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n"); | |
382 | ||
5ba3f43e | 383 | for (i = 0; i < 1 + before + after; i++, shadow += 16) { |
a39ff7e2 A |
384 | if ((vm_map_round_page(shadow, PAGE_MASK) != shadow_page) && !kasan_is_shadow_mapped(shadow)) { |
385 | /* avoid unmapped shadow when crossing page boundaries */ | |
5ba3f43e A |
386 | continue; |
387 | } | |
388 | ||
a39ff7e2 A |
389 | n += snprintf(buf+n, len-n, " %16lx:", shadow); |
390 | ||
391 | char *left = " "; | |
392 | char *right; | |
5ba3f43e A |
393 | |
394 | for (j = 0; j < 16; j++) { | |
395 | uint8_t *x = (uint8_t *)(shadow + j); | |
a39ff7e2 A |
396 | |
397 | right = " "; | |
398 | if ((uptr)x == shadow_p) { | |
399 | left = "["; | |
400 | right = "]"; | |
401 | } else if ((uptr)(x + 1) == shadow_p) { | |
402 | right = ""; | |
403 | } | |
404 | ||
405 | n += snprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right); | |
406 | left = ""; | |
5ba3f43e | 407 | } |
a39ff7e2 | 408 | n += snprintf(buf+n, len-n, "\n"); |
5ba3f43e A |
409 | } |
410 | ||
a39ff7e2 A |
411 | n += snprintf(buf+n, len-n, "\n"); |
412 | return n; | |
5ba3f43e A |
413 | } |
414 | ||
a39ff7e2 A |
415 | static void |
416 | kasan_report_internal(uptr p, uptr width, access_t access, violation_t reason, bool dopanic) | |
5ba3f43e A |
417 | { |
418 | const size_t len = 4096; | |
419 | static char buf[len]; | |
a39ff7e2 | 420 | size_t n = 0; |
5ba3f43e A |
421 | |
422 | uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p); | |
423 | uint8_t shadow_type = *shadow_ptr; | |
424 | const char *shadow_str = shadow_strings[shadow_type]; | |
5c9f4661 A |
425 | if (!shadow_str) { |
426 | shadow_str = "<invalid>"; | |
427 | } | |
a39ff7e2 A |
428 | buf[0] = '\0'; |
429 | ||
430 | if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) { | |
431 | n += snprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p); | |
432 | } else if (reason == REASON_MOD_AFTER_FREE) { | |
433 | n += snprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p); | |
434 | } else { | |
435 | n += snprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n", | |
436 | width, access_str(access), p, shadow_str); | |
437 | } | |
438 | n += kasan_shadow_crashlog(p, buf+n, len-n); | |
5ba3f43e | 439 | |
a39ff7e2 A |
440 | if (dopanic) { |
441 | panic("%s", buf); | |
442 | } else { | |
443 | printf("%s", buf); | |
444 | } | |
445 | } | |
446 | ||
447 | static void NOINLINE OS_NORETURN | |
448 | kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason) | |
449 | { | |
5ba3f43e | 450 | kasan_handle_test(); |
a39ff7e2 A |
451 | kasan_report_internal(p, width, access, reason, true); |
452 | __builtin_unreachable(); /* we cant handle this returning anyway */ | |
453 | } | |
5ba3f43e | 454 | |
a39ff7e2 A |
455 | static void |
456 | kasan_log_report(uptr p, uptr width, access_t access, violation_t reason) | |
457 | { | |
458 | const size_t len = 256; | |
459 | char buf[len]; | |
460 | size_t l = 0; | |
461 | uint32_t nframes = 14; | |
462 | uintptr_t frames[nframes]; | |
463 | uintptr_t *bt = frames; | |
464 | ||
465 | kasan_report_internal(p, width, access, reason, false); | |
466 | ||
467 | /* | |
468 | * print a backtrace | |
469 | */ | |
5ba3f43e | 470 | |
a39ff7e2 | 471 | nframes = backtrace_frame(bt, nframes, __builtin_frame_address(0)); /* ignore current frame */ |
5ba3f43e | 472 | |
a39ff7e2 A |
473 | buf[0] = '\0'; |
474 | l += snprintf(buf+l, len-l, "Backtrace: "); | |
475 | for (uint32_t i = 0; i < nframes; i++) { | |
476 | l += snprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i])); | |
477 | } | |
478 | l += snprintf(buf+l, len-l, "\n"); | |
479 | ||
480 | printf("%s", buf); | |
5ba3f43e A |
481 | } |
482 | ||
483 | #define REPORT_DECLARE(n) \ | |
a39ff7e2 A |
484 | void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \ |
485 | void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \ | |
486 | void UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \ | |
487 | void UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b); | |
5ba3f43e A |
488 | |
489 | REPORT_DECLARE(1) | |
490 | REPORT_DECLARE(2) | |
491 | REPORT_DECLARE(4) | |
492 | REPORT_DECLARE(8) | |
493 | REPORT_DECLARE(16) | |
494 | ||
a39ff7e2 A |
495 | void OS_NORETURN __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD, 0); } |
496 | void OS_NORETURN __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE, 0); } | |
5ba3f43e A |
497 | |
498 | /* unpoison the current stack */ | |
5ba3f43e | 499 | void NOINLINE |
a39ff7e2 | 500 | kasan_unpoison_curstack(bool whole_stack) |
5ba3f43e | 501 | { |
a39ff7e2 A |
502 | uintptr_t base = ml_stack_base(); |
503 | size_t sz = ml_stack_size(); | |
504 | uintptr_t cur = (uintptr_t)&base; | |
505 | ||
506 | if (whole_stack) { | |
507 | cur = base; | |
508 | } | |
509 | ||
510 | if (cur >= base && cur < base + sz) { | |
511 | /* unpoison from current stack depth to the top */ | |
512 | size_t unused = cur - base; | |
513 | kasan_unpoison_stack(cur, sz - unused); | |
514 | } | |
5ba3f43e A |
515 | } |
516 | ||
517 | void NOINLINE | |
518 | __asan_handle_no_return(void) | |
519 | { | |
a39ff7e2 | 520 | kasan_unpoison_curstack(false); |
5ba3f43e A |
521 | kasan_unpoison_fakestack(current_thread()); |
522 | } | |
523 | ||
524 | bool NOINLINE | |
525 | kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid) | |
526 | { | |
527 | uint8_t *shadow; | |
528 | vm_size_t i; | |
529 | ||
a39ff7e2 | 530 | if (!kasan_enabled) { |
5ba3f43e A |
531 | return false; |
532 | } | |
533 | ||
534 | size += base & 0x07; | |
535 | base &= ~(vm_offset_t)0x07; | |
536 | ||
537 | shadow = SHADOW_FOR_ADDRESS(base); | |
538 | vm_size_t limit = (size + 7) / 8; | |
539 | ||
540 | /* XXX: to make debugging easier, catch unmapped shadow here */ | |
541 | ||
542 | for (i = 0; i < limit; i++, size -= 8) { | |
543 | assert(size > 0); | |
544 | uint8_t s = shadow[i]; | |
545 | if (s == 0 || (size < 8 && s >= size && s <= 7)) { | |
546 | /* valid */ | |
547 | } else { | |
548 | goto fail; | |
549 | } | |
550 | } | |
551 | ||
552 | return false; | |
553 | ||
554 | fail: | |
555 | if (first_invalid) { | |
556 | /* XXX: calculate the exact first byte that failed */ | |
557 | *first_invalid = base + i*8; | |
558 | } | |
559 | return true; | |
560 | } | |
561 | ||
562 | static void NOINLINE | |
563 | kasan_init_globals(vm_offset_t base, vm_size_t size) | |
564 | { | |
565 | struct asan_global *glob = (struct asan_global *)base; | |
566 | struct asan_global *glob_end = (struct asan_global *)(base + size); | |
567 | for (; glob < glob_end; glob++) { | |
568 | /* handle one global */ | |
569 | kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ); | |
570 | } | |
571 | } | |
572 | ||
573 | void NOINLINE | |
574 | kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid) | |
575 | { | |
576 | unsigned long sectsz; | |
577 | void *sect; | |
578 | ||
a39ff7e2 A |
579 | #if KASAN_DYNAMIC_BLACKLIST |
580 | kasan_dybl_load_kext(base, bundleid); | |
581 | #endif | |
582 | ||
5ba3f43e A |
583 | /* find the kasan globals segment/section */ |
584 | sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, §sz); | |
585 | if (sect) { | |
586 | kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz); | |
587 | kexts_loaded++; | |
588 | } | |
5ba3f43e A |
589 | } |
590 | ||
591 | void NOINLINE | |
592 | kasan_unload_kext(vm_offset_t base, vm_size_t size) | |
593 | { | |
594 | unsigned long sectsz; | |
595 | void *sect; | |
596 | ||
597 | /* find the kasan globals segment/section */ | |
598 | sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, §sz); | |
599 | if (sect) { | |
600 | kasan_unpoison((void *)base, size); | |
601 | kexts_loaded--; | |
602 | } | |
603 | ||
604 | #if KASAN_DYNAMIC_BLACKLIST | |
605 | kasan_dybl_unload_kext(base); | |
606 | #endif | |
607 | } | |
608 | ||
a39ff7e2 A |
609 | /* |
610 | * Turn off as much as possible for panic path etc. There's no way to turn it back | |
611 | * on. | |
612 | */ | |
5ba3f43e A |
613 | void NOINLINE |
614 | kasan_disable(void) | |
615 | { | |
616 | __asan_option_detect_stack_use_after_return = 0; | |
a39ff7e2 | 617 | fakestack_enabled = 0; |
5ba3f43e | 618 | kasan_enabled = 0; |
a39ff7e2 A |
619 | quarantine_enabled = 0; |
620 | enabled_checks = 0; | |
5ba3f43e A |
621 | } |
622 | ||
623 | static void NOINLINE | |
624 | kasan_init_xnu_globals(void) | |
625 | { | |
626 | const char *seg = KASAN_GLOBAL_SEGNAME; | |
627 | const char *sect = KASAN_GLOBAL_SECTNAME; | |
628 | unsigned long _size; | |
629 | vm_offset_t globals; | |
630 | vm_size_t size; | |
631 | kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header; | |
632 | ||
633 | if (!header) { | |
a39ff7e2 A |
634 | printf("KASan: failed to find kernel mach header\n"); |
635 | printf("KASan: redzones for globals not poisoned\n"); | |
5ba3f43e A |
636 | return; |
637 | } | |
638 | ||
639 | globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size); | |
640 | if (!globals) { | |
a39ff7e2 A |
641 | printf("KASan: failed to find segment %s section %s\n", seg, sect); |
642 | printf("KASan: redzones for globals not poisoned\n"); | |
5ba3f43e A |
643 | return; |
644 | } | |
645 | size = (vm_size_t)_size; | |
646 | ||
a39ff7e2 A |
647 | printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size); |
648 | printf("KASan: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global)); | |
5ba3f43e A |
649 | |
650 | kasan_init_globals(globals, size); | |
651 | } | |
652 | ||
653 | void NOINLINE | |
654 | kasan_late_init(void) | |
655 | { | |
5ba3f43e A |
656 | #if KASAN_DYNAMIC_BLACKLIST |
657 | kasan_init_dybl(); | |
658 | #endif | |
a39ff7e2 A |
659 | |
660 | kasan_init_fakestack(); | |
661 | kasan_init_xnu_globals(); | |
5ba3f43e A |
662 | } |
663 | ||
664 | void NOINLINE | |
665 | kasan_notify_stolen(vm_offset_t top) | |
666 | { | |
667 | kasan_map_shadow(kernel_vtop, top - kernel_vtop, false); | |
668 | } | |
669 | ||
670 | static void NOINLINE | |
671 | kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz) | |
672 | { | |
673 | #if KASAN_DEBUG | |
674 | vm_size_t i; | |
675 | uint8_t tmp1, tmp2; | |
676 | ||
677 | /* Hit every byte in the shadow map. Don't write due to the zero mappings. */ | |
678 | for (i = 0; i < sz; i += sizeof(uint64_t)) { | |
679 | vm_offset_t addr = base + i; | |
680 | uint8_t *x = SHADOW_FOR_ADDRESS(addr); | |
681 | tmp1 = *x; | |
682 | asm volatile("" ::: "memory"); | |
683 | tmp2 = *x; | |
684 | asm volatile("" ::: "memory"); | |
685 | assert(tmp1 == tmp2); | |
686 | } | |
687 | #else | |
688 | (void)base; | |
689 | (void)sz; | |
690 | #endif | |
691 | } | |
692 | ||
693 | void NOINLINE | |
694 | kasan_init(void) | |
695 | { | |
a39ff7e2 A |
696 | unsigned arg; |
697 | ||
5ba3f43e A |
698 | simple_lock_init(&kasan_vm_lock, 0); |
699 | ||
700 | /* Map all of the kernel text and data */ | |
701 | kasan_map_shadow(kernel_vbase, kernel_vtop - kernel_vbase, false); | |
702 | ||
703 | kasan_arch_init(); | |
704 | ||
a39ff7e2 A |
705 | /* |
706 | * handle KASan boot-args | |
707 | */ | |
708 | ||
709 | if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) { | |
710 | enabled_checks = arg; | |
711 | } | |
712 | ||
713 | if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) { | |
714 | if (arg & KASAN_ARGS_FAKESTACK) { | |
715 | fakestack_enabled = 1; | |
716 | } | |
717 | if (arg & KASAN_ARGS_REPORTIGNORED) { | |
718 | report_ignored = 1; | |
719 | } | |
720 | if (arg & KASAN_ARGS_NODYCHECKS) { | |
721 | enabled_checks &= ~TYPE_DYNAMIC; | |
722 | } | |
723 | if (arg & KASAN_ARGS_NOPOISON_HEAP) { | |
724 | enabled_checks &= ~TYPE_POISON_HEAP; | |
725 | } | |
726 | if (arg & KASAN_ARGS_NOPOISON_GLOBAL) { | |
727 | enabled_checks &= ~TYPE_POISON_GLOBAL; | |
728 | } | |
729 | } | |
730 | ||
731 | if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) { | |
732 | free_yield = arg; | |
733 | } | |
734 | ||
735 | /* kasan.bl boot-arg handled in kasan_init_dybl() */ | |
736 | ||
737 | quarantine_enabled = 1; | |
5ba3f43e A |
738 | kasan_enabled = 1; |
739 | } | |
740 | ||
741 | static void NOINLINE | |
742 | kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool is_zero) | |
743 | { | |
744 | assert(address < VM_MAX_KERNEL_ADDRESS); | |
745 | ||
a39ff7e2 | 746 | if (!kasan_enabled) { |
5ba3f43e A |
747 | return; |
748 | } | |
749 | ||
750 | if (address < VM_MIN_KERNEL_AND_KEXT_ADDRESS) { | |
751 | /* only map kernel addresses */ | |
752 | return; | |
753 | } | |
754 | ||
755 | if (!size) { | |
756 | /* nothing to map */ | |
757 | return; | |
758 | } | |
759 | ||
760 | boolean_t flags; | |
761 | kasan_lock(&flags); | |
762 | kasan_map_shadow(address, size, is_zero); | |
763 | kasan_unlock(flags); | |
764 | kasan_debug_touch_mappings(address, size); | |
765 | } | |
766 | ||
767 | void | |
768 | kasan_notify_address(vm_offset_t address, vm_size_t size) | |
769 | { | |
770 | kasan_notify_address_internal(address, size, false); | |
771 | } | |
772 | ||
773 | /* | |
774 | * Allocate read-only, all-zeros shadow for memory that can never be poisoned | |
775 | */ | |
776 | void | |
777 | kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size) | |
778 | { | |
779 | kasan_notify_address_internal(address, size, true); | |
780 | } | |
781 | ||
782 | /* | |
783 | * | |
784 | * allocator hooks | |
785 | * | |
786 | */ | |
787 | ||
788 | struct kasan_alloc_header { | |
a39ff7e2 A |
789 | uint16_t magic; |
790 | uint16_t crc; | |
5ba3f43e A |
791 | uint32_t alloc_size; |
792 | uint32_t user_size; | |
793 | struct { | |
a39ff7e2 A |
794 | uint32_t left_rz : 32 - BACKTRACE_BITS; |
795 | uint32_t frames : BACKTRACE_BITS; | |
5ba3f43e A |
796 | }; |
797 | }; | |
798 | _Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size"); | |
799 | ||
800 | struct kasan_alloc_footer { | |
801 | uint32_t backtrace[0]; | |
802 | }; | |
803 | _Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size"); | |
804 | ||
a39ff7e2 A |
805 | #define LIVE_XOR ((uint16_t)0x3a65) |
806 | #define FREE_XOR ((uint16_t)0xf233) | |
807 | ||
808 | static uint16_t | |
809 | magic_for_addr(vm_offset_t addr, uint16_t magic_xor) | |
5ba3f43e | 810 | { |
a39ff7e2 A |
811 | uint16_t magic = addr & 0xFFFF; |
812 | magic ^= (addr >> 16) & 0xFFFF; | |
813 | magic ^= (addr >> 32) & 0xFFFF; | |
814 | magic ^= (addr >> 48) & 0xFFFF; | |
815 | magic ^= magic_xor; | |
816 | return magic; | |
5ba3f43e A |
817 | } |
818 | ||
819 | static struct kasan_alloc_header * | |
820 | header_for_user_addr(vm_offset_t addr) | |
821 | { | |
822 | return (void *)(addr - sizeof(struct kasan_alloc_header)); | |
823 | } | |
824 | ||
825 | static struct kasan_alloc_footer * | |
826 | footer_for_user_addr(vm_offset_t addr, vm_size_t *size) | |
827 | { | |
828 | struct kasan_alloc_header *h = header_for_user_addr(addr); | |
829 | vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz; | |
830 | *size = rightrz; | |
831 | return (void *)(addr + h->user_size); | |
832 | } | |
833 | ||
834 | /* | |
835 | * size: user-requested allocation size | |
836 | * ret: minimum size for the real allocation | |
837 | */ | |
838 | vm_size_t | |
839 | kasan_alloc_resize(vm_size_t size) | |
840 | { | |
841 | vm_size_t tmp; | |
842 | if (os_add_overflow(size, 4 * PAGE_SIZE, &tmp)) { | |
843 | panic("allocation size overflow (%lu)", size); | |
844 | } | |
845 | ||
846 | /* add left and right redzones */ | |
847 | size += KASAN_GUARD_PAD; | |
848 | ||
849 | /* ensure the final allocation is an 8-byte multiple */ | |
850 | size += 8 - (size % 8); | |
851 | ||
852 | return size; | |
853 | } | |
854 | ||
855 | extern vm_offset_t vm_kernel_slid_base; | |
856 | ||
857 | static vm_size_t | |
858 | kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip) | |
859 | { | |
860 | uintptr_t buf[BACKTRACE_MAXFRAMES]; | |
861 | uintptr_t *bt = buf; | |
862 | ||
863 | sz /= sizeof(uint32_t); | |
864 | vm_size_t frames = sz; | |
865 | ||
866 | if (frames > 0) { | |
867 | frames = min(frames + skip, BACKTRACE_MAXFRAMES); | |
868 | frames = backtrace(bt, frames); | |
869 | ||
870 | while (frames > sz && skip > 0) { | |
871 | bt++; | |
872 | frames--; | |
873 | skip--; | |
874 | } | |
875 | ||
876 | /* only store the offset from kernel base, and cram that into 32 | |
877 | * bits */ | |
878 | for (vm_size_t i = 0; i < frames; i++) { | |
879 | ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base); | |
880 | } | |
881 | } | |
882 | return frames; | |
883 | } | |
884 | ||
a39ff7e2 A |
885 | /* addr: user address of allocation */ |
886 | static uint16_t | |
887 | kasan_alloc_crc(vm_offset_t addr) | |
888 | { | |
889 | struct kasan_alloc_header *h = header_for_user_addr(addr); | |
890 | vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz; | |
891 | ||
892 | uint16_t crc_orig = h->crc; | |
893 | h->crc = 0; | |
894 | ||
895 | uint16_t crc = 0; | |
896 | crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz); | |
897 | crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz); | |
898 | ||
899 | h->crc = crc_orig; | |
900 | ||
901 | return crc; | |
902 | } | |
903 | ||
5ba3f43e A |
904 | /* |
905 | * addr: base address of full allocation (including redzones) | |
906 | * size: total size of allocation (include redzones) | |
907 | * req: user-requested allocation size | |
908 | * lrz: size of the left redzone in bytes | |
909 | * ret: address of usable allocation | |
910 | */ | |
911 | vm_address_t | |
912 | kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz) | |
913 | { | |
914 | if (!addr) { | |
915 | return 0; | |
916 | } | |
917 | assert(size > 0); | |
918 | assert((addr % 8) == 0); | |
919 | assert((size % 8) == 0); | |
920 | ||
921 | vm_size_t rightrz = size - req - leftrz; | |
922 | ||
923 | kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ); | |
924 | kasan_rz_clobber(addr, req, leftrz, rightrz); | |
925 | ||
926 | addr += leftrz; | |
927 | ||
928 | /* stash the allocation sizes in the left redzone */ | |
929 | struct kasan_alloc_header *h = header_for_user_addr(addr); | |
a39ff7e2 | 930 | h->magic = magic_for_addr(addr, LIVE_XOR); |
5ba3f43e A |
931 | h->left_rz = leftrz; |
932 | h->alloc_size = size; | |
933 | h->user_size = req; | |
934 | ||
935 | /* ... and a backtrace in the right redzone */ | |
936 | vm_size_t fsize; | |
937 | struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize); | |
938 | h->frames = kasan_alloc_bt(f->backtrace, fsize, 2); | |
939 | ||
a39ff7e2 A |
940 | /* checksum the whole object, minus the user part */ |
941 | h->crc = kasan_alloc_crc(addr); | |
942 | ||
5ba3f43e A |
943 | return addr; |
944 | } | |
945 | ||
946 | /* | |
947 | * addr: user pointer | |
948 | * size: returns full original allocation size | |
949 | * ret: original allocation ptr | |
950 | */ | |
951 | vm_address_t | |
952 | kasan_dealloc(vm_offset_t addr, vm_size_t *size) | |
953 | { | |
954 | assert(size && addr); | |
955 | struct kasan_alloc_header *h = header_for_user_addr(addr); | |
5ba3f43e A |
956 | *size = h->alloc_size; |
957 | return addr - h->left_rz; | |
958 | } | |
959 | ||
960 | /* | |
961 | * return the original user-requested allocation size | |
962 | * addr: user alloc pointer | |
963 | */ | |
964 | vm_size_t | |
965 | kasan_user_size(vm_offset_t addr) | |
966 | { | |
967 | struct kasan_alloc_header *h = header_for_user_addr(addr); | |
a39ff7e2 | 968 | assert(h->magic == magic_for_addr(addr, LIVE_XOR)); |
5ba3f43e A |
969 | return h->user_size; |
970 | } | |
971 | ||
972 | /* | |
973 | * Verify that `addr' (user pointer) is a valid allocation of `type' | |
974 | */ | |
975 | void | |
976 | kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type) | |
977 | { | |
978 | struct kasan_alloc_header *h = header_for_user_addr(addr); | |
979 | ||
980 | /* map heap type to an internal access type */ | |
a39ff7e2 A |
981 | access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE : |
982 | heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE : | |
983 | heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0; | |
5ba3f43e | 984 | |
a39ff7e2 A |
985 | /* check the magic and crc match */ |
986 | if (h->magic != magic_for_addr(addr, LIVE_XOR)) { | |
987 | kasan_violation(addr, size, type, REASON_BAD_METADATA); | |
988 | } | |
989 | if (h->crc != kasan_alloc_crc(addr)) { | |
990 | kasan_violation(addr, size, type, REASON_MOD_OOB); | |
5ba3f43e A |
991 | } |
992 | ||
993 | /* check the freed size matches what we recorded at alloc time */ | |
994 | if (h->user_size != size) { | |
a39ff7e2 | 995 | kasan_violation(addr, size, type, REASON_INVALID_SIZE); |
5ba3f43e A |
996 | } |
997 | ||
998 | vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size; | |
999 | ||
1000 | /* Check that the redzones are valid */ | |
a39ff7e2 A |
1001 | if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) || |
1002 | !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) { | |
1003 | kasan_violation(addr, size, type, REASON_BAD_METADATA); | |
1004 | } | |
5ba3f43e A |
1005 | |
1006 | /* Check the allocated range is not poisoned */ | |
1007 | kasan_check_range((void *)addr, size, type); | |
1008 | } | |
1009 | ||
1010 | /* | |
1011 | * | |
1012 | * Quarantine | |
1013 | * | |
1014 | */ | |
1015 | ||
1016 | struct freelist_entry { | |
a39ff7e2 A |
1017 | uint16_t magic; |
1018 | uint16_t crc; | |
5ba3f43e A |
1019 | STAILQ_ENTRY(freelist_entry) list; |
1020 | union { | |
1021 | struct { | |
1022 | vm_size_t size : 28; | |
1023 | vm_size_t user_size : 28; | |
a39ff7e2 A |
1024 | vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */ |
1025 | vm_size_t __unused : 8 - BACKTRACE_BITS; | |
5ba3f43e A |
1026 | }; |
1027 | uint64_t bits; | |
1028 | }; | |
1029 | zone_t zone; | |
1030 | uint32_t backtrace[]; | |
1031 | }; | |
1032 | _Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size"); | |
1033 | ||
5ba3f43e A |
1034 | struct quarantine { |
1035 | STAILQ_HEAD(freelist_head, freelist_entry) freelist; | |
1036 | unsigned long entries; | |
1037 | unsigned long max_entries; | |
1038 | vm_size_t size; | |
1039 | vm_size_t max_size; | |
1040 | }; | |
1041 | ||
1042 | struct quarantine quarantines[] = { | |
1043 | { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }, | |
1044 | { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }, | |
1045 | { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE } | |
1046 | }; | |
1047 | ||
a39ff7e2 A |
1048 | static uint16_t |
1049 | fle_crc(struct freelist_entry *fle) | |
1050 | { | |
1051 | return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits)); | |
1052 | } | |
1053 | ||
5ba3f43e A |
1054 | /* |
1055 | * addr, sizep: pointer/size of full allocation including redzone | |
1056 | */ | |
1057 | void NOINLINE | |
1058 | kasan_free_internal(void **addrp, vm_size_t *sizep, int type, | |
1059 | zone_t *zone, vm_size_t user_size, int locked, | |
1060 | bool doquarantine) | |
1061 | { | |
1062 | vm_size_t size = *sizep; | |
1063 | vm_offset_t addr = *(vm_offset_t *)addrp; | |
1064 | ||
1065 | assert(type >= 0 && type < KASAN_HEAP_TYPES); | |
1066 | if (type == KASAN_HEAP_KALLOC) { | |
1067 | /* zero-size kalloc allocations are allowed */ | |
1068 | assert(!zone); | |
1069 | } else if (type == KASAN_HEAP_ZALLOC) { | |
1070 | assert(zone && user_size); | |
1071 | } else if (type == KASAN_HEAP_FAKESTACK) { | |
1072 | assert(zone && user_size); | |
1073 | } | |
1074 | ||
1075 | /* clobber the entire freed region */ | |
1076 | kasan_rz_clobber(addr, 0, size, 0); | |
1077 | ||
1078 | if (!doquarantine || !quarantine_enabled) { | |
1079 | goto free_current; | |
1080 | } | |
1081 | ||
1082 | /* poison the entire freed region */ | |
1083 | uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED; | |
1084 | kasan_poison(addr, 0, size, 0, flags); | |
1085 | ||
1086 | struct freelist_entry *fle, *tofree = NULL; | |
1087 | struct quarantine *q = &quarantines[type]; | |
1088 | assert(size >= sizeof(struct freelist_entry)); | |
1089 | ||
1090 | /* create a new freelist entry */ | |
1091 | fle = (struct freelist_entry *)addr; | |
a39ff7e2 | 1092 | fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR); |
5ba3f43e A |
1093 | fle->size = size; |
1094 | fle->user_size = user_size; | |
1095 | fle->frames = 0; | |
1096 | fle->zone = ZONE_NULL; | |
1097 | if (zone) { | |
1098 | fle->zone = *zone; | |
1099 | } | |
1100 | if (type != KASAN_HEAP_FAKESTACK) { | |
a39ff7e2 | 1101 | /* don't do expensive things on the fakestack path */ |
5ba3f43e | 1102 | fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3); |
a39ff7e2 | 1103 | fle->crc = fle_crc(fle); |
5ba3f43e A |
1104 | } |
1105 | ||
1106 | boolean_t flg; | |
1107 | if (!locked) { | |
1108 | kasan_lock(&flg); | |
1109 | } | |
1110 | ||
1111 | if (q->size + size > q->max_size) { | |
1112 | /* | |
1113 | * Adding this entry would put us over the max quarantine size. Free the | |
1114 | * larger of the current object and the quarantine head object. | |
1115 | */ | |
1116 | tofree = STAILQ_FIRST(&q->freelist); | |
1117 | if (fle->size > tofree->size) { | |
1118 | goto free_current_locked; | |
1119 | } | |
1120 | } | |
1121 | ||
1122 | STAILQ_INSERT_TAIL(&q->freelist, fle, list); | |
1123 | q->entries++; | |
1124 | q->size += size; | |
1125 | ||
1126 | /* free the oldest entry, if necessary */ | |
1127 | if (tofree || q->entries > q->max_entries) { | |
1128 | tofree = STAILQ_FIRST(&q->freelist); | |
1129 | STAILQ_REMOVE_HEAD(&q->freelist, list); | |
1130 | ||
1131 | assert(q->entries > 0 && q->size >= tofree->size); | |
1132 | q->entries--; | |
1133 | q->size -= tofree->size; | |
1134 | ||
1135 | if (type != KASAN_HEAP_KALLOC) { | |
1136 | assert((vm_offset_t)zone >= VM_MIN_KERNEL_AND_KEXT_ADDRESS && | |
1137 | (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS); | |
1138 | *zone = tofree->zone; | |
1139 | } | |
1140 | ||
1141 | size = tofree->size; | |
1142 | addr = (vm_offset_t)tofree; | |
a39ff7e2 A |
1143 | |
1144 | /* check the magic and crc match */ | |
1145 | if (tofree->magic != magic_for_addr(addr, FREE_XOR)) { | |
1146 | kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE); | |
1147 | } | |
1148 | if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) { | |
1149 | kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE); | |
5ba3f43e A |
1150 | } |
1151 | ||
1152 | /* clobber the quarantine header */ | |
a39ff7e2 | 1153 | __nosan_bzero((void *)addr, sizeof(struct freelist_entry)); |
5ba3f43e A |
1154 | |
1155 | } else { | |
1156 | /* quarantine is not full - don't really free anything */ | |
1157 | addr = 0; | |
1158 | } | |
1159 | ||
1160 | free_current_locked: | |
1161 | if (!locked) { | |
1162 | kasan_unlock(flg); | |
1163 | } | |
1164 | ||
1165 | free_current: | |
1166 | *addrp = (void *)addr; | |
1167 | if (addr) { | |
1168 | kasan_unpoison((void *)addr, size); | |
1169 | *sizep = size; | |
1170 | } | |
1171 | } | |
1172 | ||
1173 | void NOINLINE | |
1174 | kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone, | |
1175 | vm_size_t user_size, bool quarantine) | |
1176 | { | |
1177 | kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine); | |
a39ff7e2 A |
1178 | |
1179 | if (free_yield) { | |
1180 | thread_yield_internal(free_yield); | |
1181 | } | |
5ba3f43e A |
1182 | } |
1183 | ||
1184 | uptr | |
1185 | __asan_load_cxx_array_cookie(uptr *p) | |
1186 | { | |
1187 | uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)p); | |
1188 | if (*shadow == ASAN_ARRAY_COOKIE) { | |
1189 | return *p; | |
1190 | } else if (*shadow == ASAN_HEAP_FREED) { | |
1191 | return 0; | |
1192 | } else { | |
1193 | return *p; | |
1194 | } | |
1195 | } | |
1196 | ||
1197 | void | |
1198 | __asan_poison_cxx_array_cookie(uptr p) | |
1199 | { | |
1200 | uint8_t *shadow = SHADOW_FOR_ADDRESS(p); | |
1201 | *shadow = ASAN_ARRAY_COOKIE; | |
1202 | } | |
1203 | ||
a39ff7e2 | 1204 | #define ACCESS_CHECK_DECLARE(type, sz, access) \ |
5ba3f43e | 1205 | void __asan_##type##sz(uptr addr) { \ |
a39ff7e2 | 1206 | kasan_check_range((const void *)addr, sz, access); \ |
5ba3f43e | 1207 | } \ |
a39ff7e2 | 1208 | void UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b); |
5ba3f43e A |
1209 | |
1210 | ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD); | |
1211 | ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD); | |
1212 | ACCESS_CHECK_DECLARE(load, 4, TYPE_LOAD); | |
1213 | ACCESS_CHECK_DECLARE(load, 8, TYPE_LOAD); | |
1214 | ACCESS_CHECK_DECLARE(load, 16, TYPE_LOAD); | |
1215 | ACCESS_CHECK_DECLARE(store, 1, TYPE_STORE); | |
1216 | ACCESS_CHECK_DECLARE(store, 2, TYPE_STORE); | |
1217 | ACCESS_CHECK_DECLARE(store, 4, TYPE_STORE); | |
1218 | ACCESS_CHECK_DECLARE(store, 8, TYPE_STORE); | |
1219 | ACCESS_CHECK_DECLARE(store, 16, TYPE_STORE); | |
1220 | ||
1221 | void | |
1222 | __asan_loadN(uptr addr, size_t sz) | |
1223 | { | |
1224 | kasan_check_range((const void *)addr, sz, TYPE_LOAD); | |
1225 | } | |
1226 | ||
1227 | void | |
1228 | __asan_storeN(uptr addr, size_t sz) | |
1229 | { | |
1230 | kasan_check_range((const void *)addr, sz, TYPE_STORE); | |
1231 | } | |
1232 | ||
5ba3f43e A |
1233 | static void |
1234 | kasan_set_shadow(uptr addr, size_t sz, uint8_t val) | |
1235 | { | |
1236 | __nosan_memset((void *)addr, val, sz); | |
1237 | } | |
1238 | ||
1239 | #define SET_SHADOW_DECLARE(val) \ | |
1240 | void __asan_set_shadow_##val(uptr addr, size_t sz) { \ | |
1241 | kasan_set_shadow(addr, sz, 0x##val); \ | |
1242 | } | |
1243 | ||
1244 | SET_SHADOW_DECLARE(00) | |
1245 | SET_SHADOW_DECLARE(f1) | |
1246 | SET_SHADOW_DECLARE(f2) | |
1247 | SET_SHADOW_DECLARE(f3) | |
1248 | SET_SHADOW_DECLARE(f5) | |
1249 | SET_SHADOW_DECLARE(f8) | |
1250 | ||
a39ff7e2 | 1251 | |
5ba3f43e | 1252 | /* |
a39ff7e2 A |
1253 | * Call 'cb' for each contiguous range of the shadow map. This could be more |
1254 | * efficient by walking the page table directly. | |
5ba3f43e | 1255 | */ |
a39ff7e2 A |
1256 | int |
1257 | kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx) | |
5ba3f43e | 1258 | { |
a39ff7e2 A |
1259 | uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS); |
1260 | uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS); | |
1261 | shadow_base = vm_map_trunc_page(shadow_base, PAGE_MASK); | |
1262 | shadow_top = vm_map_round_page(shadow_top, PAGE_MASK); | |
1263 | ||
1264 | uintptr_t start = 0, end = 0; | |
1265 | ||
1266 | for (uintptr_t addr = shadow_base; addr < shadow_top; addr += PAGE_SIZE) { | |
1267 | if (kasan_is_shadow_mapped(addr)) { | |
1268 | if (start == 0) { | |
1269 | start = addr; | |
1270 | } | |
1271 | end = addr + PAGE_SIZE; | |
1272 | } else if (start && end) { | |
1273 | cb(start, end, ctx); | |
1274 | start = end = 0; | |
1275 | } | |
1276 | } | |
5ba3f43e | 1277 | |
a39ff7e2 A |
1278 | if (start && end) { |
1279 | cb(start, end, ctx); | |
1280 | } | |
5ba3f43e | 1281 | |
a39ff7e2 | 1282 | return 0; |
5ba3f43e A |
1283 | } |
1284 | ||
1285 | /* | |
a39ff7e2 | 1286 | * XXX: implement these |
5ba3f43e A |
1287 | */ |
1288 | ||
a39ff7e2 A |
1289 | UNUSED_ABI(__asan_alloca_poison, uptr addr, uptr size); |
1290 | UNUSED_ABI(__asan_allocas_unpoison, uptr top, uptr bottom); | |
1291 | UNUSED_ABI(__sanitizer_ptr_sub, uptr a, uptr b); | |
1292 | UNUSED_ABI(__sanitizer_ptr_cmp, uptr a, uptr b); | |
1293 | UNUSED_ABI(__sanitizer_annotate_contiguous_container, const void *a, const void *b, const void *c, const void *d); | |
1294 | UNUSED_ABI(__asan_poison_stack_memory, uptr addr, size_t size); | |
1295 | UNUSED_ABI(__asan_unpoison_stack_memory, uptr a, uptr b); | |
5ba3f43e | 1296 | |
a39ff7e2 A |
1297 | /* |
1298 | * Miscellaneous unimplemented asan ABI | |
1299 | */ | |
5ba3f43e | 1300 | |
a39ff7e2 A |
1301 | UNUSED_ABI(__asan_init, void); |
1302 | UNUSED_ABI(__asan_register_image_globals, uptr a); | |
1303 | UNUSED_ABI(__asan_unregister_image_globals, uptr a); | |
1304 | UNUSED_ABI(__asan_before_dynamic_init, uptr a); | |
1305 | UNUSED_ABI(__asan_after_dynamic_init, void); | |
1306 | UNUSED_ABI(__asan_version_mismatch_check_v8, void); | |
1307 | UNUSED_ABI(__asan_version_mismatch_check_apple_802, void); | |
1308 | UNUSED_ABI(__asan_version_mismatch_check_apple_900, void); | |
1309 | UNUSED_ABI(__asan_version_mismatch_check_apple_902, void); | |
1310 | ||
1311 | void UNSUPPORTED_API(__asan_init_v5, void); | |
1312 | void UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b); | |
1313 | void UNSUPPORTED_API(__asan_unregister_globals, uptr a, uptr b); | |
1314 | void UNSUPPORTED_API(__asan_register_elf_globals, uptr a, uptr b, uptr c); | |
1315 | void UNSUPPORTED_API(__asan_unregister_elf_globals, uptr a, uptr b, uptr c); | |
1316 | ||
1317 | void UNSUPPORTED_API(__asan_exp_loadN, uptr addr, size_t sz, int32_t e); | |
1318 | void UNSUPPORTED_API(__asan_exp_storeN, uptr addr, size_t sz, int32_t e); | |
1319 | void UNSUPPORTED_API(__asan_report_exp_load_n, uptr addr, unsigned long b, int32_t c); | |
1320 | void UNSUPPORTED_API(__asan_report_exp_store_n, uptr addr, unsigned long b, int32_t c); | |
5ba3f43e A |
1321 | |
1322 | /* | |
1323 | * | |
1324 | * SYSCTL | |
1325 | * | |
1326 | */ | |
1327 | ||
1328 | static int | |
1329 | sysctl_kasan_test(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg2, struct sysctl_req *req) | |
1330 | { | |
1331 | int mask = 0; | |
1332 | int ch; | |
1333 | int err; | |
1334 | err = sysctl_io_number(req, 0, sizeof(int), &mask, &ch); | |
1335 | ||
1336 | if (!err && mask) { | |
1337 | kasan_test(mask, arg2); | |
1338 | } | |
1339 | ||
1340 | return err; | |
1341 | } | |
1342 | ||
a39ff7e2 A |
1343 | static int |
1344 | sysctl_fakestack_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, int __unused arg2, struct sysctl_req *req) | |
1345 | { | |
1346 | int ch, err, val; | |
1347 | ||
1348 | err = sysctl_io_number(req, fakestack_enabled, sizeof(fakestack_enabled), &val, &ch); | |
1349 | if (err == 0 && ch) { | |
1350 | fakestack_enabled = !!val; | |
1351 | __asan_option_detect_stack_use_after_return = !!val; | |
1352 | } | |
1353 | ||
1354 | return err; | |
1355 | } | |
1356 | ||
5ba3f43e A |
1357 | SYSCTL_DECL(kasan); |
1358 | SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, ""); | |
1359 | ||
1360 | SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, ""); | |
a39ff7e2 A |
1361 | SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, ""); |
1362 | SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &enabled_checks, 0, ""); | |
1363 | SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, ""); | |
1364 | SYSCTL_UINT(_kern_kasan, OID_AUTO, report_ignored, CTLFLAG_RW, &report_ignored, 0, ""); | |
1365 | SYSCTL_UINT(_kern_kasan, OID_AUTO, free_yield_ms, CTLFLAG_RW, &free_yield, 0, ""); | |
1366 | SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, ""); | |
1367 | SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, ""); | |
1368 | SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, ""); | |
1369 | SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, ""); | |
1370 | SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, ""); | |
1371 | SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, ""); | |
1372 | SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, ""); | |
1373 | ||
1374 | SYSCTL_PROC(_kern_kasan, OID_AUTO, fakestack, | |
1375 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, | |
1376 | 0, 0, sysctl_fakestack_enable, "I", ""); | |
5ba3f43e A |
1377 | |
1378 | SYSCTL_PROC(_kern_kasan, OID_AUTO, test, | |
1379 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, | |
1380 | 0, 0, sysctl_kasan_test, "I", ""); | |
1381 | ||
1382 | SYSCTL_PROC(_kern_kasan, OID_AUTO, fail, | |
1383 | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, | |
1384 | 0, 1, sysctl_kasan_test, "I", ""); |