X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/5ba3f43ea354af8ad55bea84372a2bc834d8757c..d26ffc64f583ab2d29df48f13518685602bc8832:/san/kasan.c diff --git a/san/kasan.c b/san/kasan.c index 960dc25b4..01faa3801 100644 --- a/san/kasan.c +++ b/san/kasan.c @@ -55,58 +55,44 @@ #include #include -#if !KASAN_DEBUG -# undef NOINLINE -# define NOINLINE -#endif - const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_SHIFT; -static long kexts_loaded; - -long shadow_pages_total; -long shadow_pages_used; +static unsigned kexts_loaded; +unsigned shadow_pages_total; +unsigned shadow_pages_used; vm_offset_t kernel_vbase; vm_offset_t kernel_vtop; -static bool kasan_initialized; -static int kasan_enabled; -static int quarantine_enabled = 1; +static unsigned kasan_enabled; +static unsigned quarantine_enabled; +static unsigned enabled_checks = TYPE_ALL; /* bitmask of enabled checks */ +static unsigned report_ignored; /* issue non-fatal report for disabled/blacklisted checks */ +static unsigned free_yield = 0; /* ms yield after each free */ + +/* forward decls */ +static void kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason); +static void kasan_log_report(uptr p, uptr width, access_t access, violation_t reason); -static void kasan_crash_report(uptr p, uptr width, unsigned access_type); +/* imported osfmk functions */ extern vm_offset_t ml_stack_base(void); extern vm_size_t ml_stack_size(void); -#define ABI_UNSUPPORTED do { panic("KASan: unsupported ABI: %s\n", __func__); } while (0) +/* + * unused: expected to be called, but (currently) does nothing + */ +#define UNUSED_ABI(func, ...) \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \ + void func(__VA_ARGS__); \ + void func(__VA_ARGS__) {}; \ + _Pragma("clang diagnostic pop") \ -#define BACKTRACE_MAXFRAMES 16 +static const size_t BACKTRACE_BITS = 4; +static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1; decl_simple_lock_data(, kasan_vm_lock); - -_Atomic int unsafe_count = 0; - -void -kasan_unsafe_start(void) -{ - if (__c11_atomic_fetch_add(&unsafe_count, 1, memory_order_relaxed) == 128) { - panic("kasan_unsafe_start overflow"); - } -} - -void -kasan_unsafe_end(void) -{ - if (__c11_atomic_fetch_sub(&unsafe_count, 1, memory_order_relaxed) == 0) { - panic("kasan_unsafe_end underflow"); - } -} - -static bool -kasan_in_unsafe(void) -{ - return atomic_load_explicit(&unsafe_count, memory_order_relaxed) != 0; -} +static thread_t kasan_lock_holder; /* * kasan is called from the interrupt path, so we need to disable interrupts to @@ -117,15 +103,47 @@ kasan_lock(boolean_t *b) { *b = ml_set_interrupts_enabled(false); simple_lock(&kasan_vm_lock); + kasan_lock_holder = current_thread(); } void kasan_unlock(boolean_t b) { + kasan_lock_holder = THREAD_NULL; simple_unlock(&kasan_vm_lock); ml_set_interrupts_enabled(b); } +/* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current + * thread */ +bool +kasan_lock_held(thread_t thread) +{ + return thread && thread == kasan_lock_holder; +} + +static inline bool +kasan_check_enabled(access_t access) +{ + return kasan_enabled && (enabled_checks & access) && !kasan_is_blacklisted(access); +} + +static inline bool +kasan_poison_active(uint8_t flags) +{ + switch (flags) { + case ASAN_GLOBAL_RZ: + return kasan_check_enabled(TYPE_POISON_GLOBAL); + case ASAN_HEAP_RZ: + case ASAN_HEAP_LEFT_RZ: + case ASAN_HEAP_RIGHT_RZ: + case ASAN_HEAP_FREED: + return kasan_check_enabled(TYPE_POISON_HEAP); + default: + return true; + }; +} + /* * poison redzones in the shadow map */ @@ -144,7 +162,7 @@ kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t right assert((leftrz & 0x07) == 0); assert((total & 0x07) == 0); - if (!kasan_enabled || !kasan_initialized) { + if (!kasan_enabled || !kasan_poison_active(flags)) { return; } @@ -170,7 +188,7 @@ kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t right shadow[i] = l_flags; } for (; i < leftrz + size; i++) { - shadow[i] = ASAN_VALID; /* not strictly necessary */ + shadow[i] = ASAN_VALID; /* XXX: should not be necessary */ } if (partial && (i < total)) { shadow[i] = partial; @@ -179,8 +197,6 @@ kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t right for (; i < total; i++) { shadow[i] = r_flags; } - - asm volatile("" ::: "memory"); /* compiler barrier XXX: is this needed? */ } void @@ -204,12 +220,19 @@ kasan_unpoison_stack(vm_offset_t base, vm_size_t size) { assert(base); assert(size); + + /* align base and size to 8 bytes */ + vm_offset_t align = base & 0x7; + base -= align; + size += align; + size = (size + 7) & ~0x7; + kasan_unpoison((void *)base, size); } /* * write junk into the redzones -*/ + */ static void NOINLINE kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz) { @@ -241,30 +264,43 @@ kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t r #endif } -void NOINLINE -kasan_check_range(const void *x, size_t sz, unsigned access_type) +/* + * Report a violation that may be disabled and/or blacklisted. This can only be + * called for dynamic checks (i.e. where the fault is recoverable). Use + * kasan_crash_report() for static (unrecoverable) violations. + * + * access: what we were trying to do when the violation occured + * reason: what failed about the access + */ +static void +kasan_violation(uintptr_t addr, size_t size, access_t access, violation_t reason) { - vm_offset_t invalid; - - if (kasan_in_unsafe()) { + assert(__builtin_popcount(access) == 1); + if (!kasan_check_enabled(access)) { + if (report_ignored) { + kasan_log_report(addr, size, access, reason); + } return; } + kasan_crash_report(addr, size, access, reason); +} - if (kasan_range_poisoned((vm_offset_t)x, sz, &invalid)) { - if (kasan_is_blacklisted(access_type)) { - return; - } - kasan_crash_report(invalid, sz, access_type); - /* NOTREACHED */ +void NOINLINE +kasan_check_range(const void *x, size_t sz, access_t access) +{ + uintptr_t invalid; + uintptr_t ptr = (uintptr_t)x; + if (kasan_range_poisoned(ptr, sz, &invalid)) { + size_t remaining = sz - (invalid - ptr); + kasan_violation(invalid, remaining, access, 0); } } /* - * Check that [base, base+sz) has shadow value `shadow' - * If not, report a KASan-violation on `addr' + * Return true if [base, base+sz) is unpoisoned or has given shadow value. */ -static void -kasan_assert_shadow(vm_address_t base, vm_size_t sz, vm_address_t addr, uint8_t shadow) +static bool +kasan_check_shadow(vm_address_t base, vm_size_t sz, uint8_t shadow) { sz -= 8 - (base % 8); base += 8 - (base % 8); @@ -273,11 +309,12 @@ kasan_assert_shadow(vm_address_t base, vm_size_t sz, vm_address_t addr, uint8_t while (base < end) { uint8_t *sh = SHADOW_FOR_ADDRESS(base); - if (*sh != shadow) { - __asan_report_load1(addr); + if (*sh && *sh != shadow) { + return false; } base += 8; } + return true; } /* @@ -287,16 +324,16 @@ kasan_assert_shadow(vm_address_t base, vm_size_t sz, vm_address_t addr, uint8_t */ static const char * -access_type_str(unsigned type) +access_str(access_t type) { - if (type & TYPE_LOAD_ALL) { - return "load"; - } else if (type & TYPE_STORE_ALL) { - return "store"; + if (type & TYPE_READ) { + return "load from"; + } else if (type & TYPE_WRITE) { + return "store to"; } else if (type & TYPE_FREE) { - return "free"; + return "free of"; } else { - return "access"; + return "access of"; } } @@ -309,17 +346,16 @@ static const char *shadow_strings[] = { [ASAN_PARTIAL5] = "PARTIAL5", [ASAN_PARTIAL6] = "PARTIAL6", [ASAN_PARTIAL7] = "PARTIAL7", - [ASAN_STACK_RZ] = "", [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ", [ASAN_STACK_MID_RZ] = "STACK_MID_RZ", [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ", [ASAN_STACK_FREED] = "STACK_FREED", + [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE", [ASAN_GLOBAL_RZ] = "GLOBAL_RZ", - [ASAN_HEAP_RZ] = "", [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ", [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ", [ASAN_HEAP_FREED] = "HEAP_FREED", - [0xff] = "", + [0xff] = NULL }; #define CRASH_CONTEXT_BEFORE 5 @@ -329,69 +365,126 @@ static size_t kasan_shadow_crashlog(uptr p, char *buf, size_t len) { int i,j; - size_t l = 0; + size_t n = 0; int before = CRASH_CONTEXT_BEFORE; int after = CRASH_CONTEXT_AFTER; uptr shadow = (uptr)SHADOW_FOR_ADDRESS(p); uptr shadow_p = shadow; + uptr shadow_page = vm_map_round_page(shadow_p, PAGE_MASK); /* rewind to start of context block */ shadow &= ~((uptr)0xf); shadow -= 16 * before; + n += snprintf(buf+n, len-n, + " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n"); + for (i = 0; i < 1 + before + after; i++, shadow += 16) { - if (vm_map_round_page(shadow, PAGE_MASK) != vm_map_round_page(shadow_p, PAGE_MASK)) { - /* don't cross a page boundary, in case the shadow is unmapped */ - /* XXX: ideally we check instead of ignore */ + if ((vm_map_round_page(shadow, PAGE_MASK) != shadow_page) && !kasan_is_shadow_mapped(shadow)) { + /* avoid unmapped shadow when crossing page boundaries */ continue; } - l += snprintf(buf+l, len-l, " %#16lx: ", shadow); + n += snprintf(buf+n, len-n, " %16lx:", shadow); + + char *left = " "; + char *right; for (j = 0; j < 16; j++) { uint8_t *x = (uint8_t *)(shadow + j); - l += snprintf(buf+l, len-l, "%02x ", (unsigned)*x); + + right = " "; + if ((uptr)x == shadow_p) { + left = "["; + right = "]"; + } else if ((uptr)(x + 1) == shadow_p) { + right = ""; + } + + n += snprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right); + left = ""; } - l += snprintf(buf+l, len-l, "\n"); + n += snprintf(buf+n, len-n, "\n"); } - l += snprintf(buf+l, len-l, "\n"); - return l; + n += snprintf(buf+n, len-n, "\n"); + return n; } -static void NOINLINE -kasan_crash_report(uptr p, uptr width, unsigned access_type) +static void +kasan_report_internal(uptr p, uptr width, access_t access, violation_t reason, bool dopanic) { const size_t len = 4096; static char buf[len]; - size_t l = 0; + size_t n = 0; uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p); uint8_t shadow_type = *shadow_ptr; const char *shadow_str = shadow_strings[shadow_type]; + if (!shadow_str) { + shadow_str = ""; + } + buf[0] = '\0'; + + if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) { + n += snprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p); + } else if (reason == REASON_MOD_AFTER_FREE) { + n += snprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p); + } else { + n += snprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n", + width, access_str(access), p, shadow_str); + } + n += kasan_shadow_crashlog(p, buf+n, len-n); + if (dopanic) { + panic("%s", buf); + } else { + printf("%s", buf); + } +} + +static void NOINLINE OS_NORETURN +kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason) +{ kasan_handle_test(); + kasan_report_internal(p, width, access, reason, true); + __builtin_unreachable(); /* we cant handle this returning anyway */ +} - buf[0] = '\0'; - l += snprintf(buf+l, len-l, - "KASan: invalid %lu-byte %s @ %#lx [%s]\n" - "Shadow %#02x @ %#lx\n\n", - width, access_type_str(access_type), p, shadow_str, - (unsigned)shadow_type, (unsigned long)shadow_ptr); +static void +kasan_log_report(uptr p, uptr width, access_t access, violation_t reason) +{ + const size_t len = 256; + char buf[len]; + size_t l = 0; + uint32_t nframes = 14; + uintptr_t frames[nframes]; + uintptr_t *bt = frames; + + kasan_report_internal(p, width, access, reason, false); + + /* + * print a backtrace + */ - l += kasan_shadow_crashlog(p, buf+l, len-l); + nframes = backtrace_frame(bt, nframes, __builtin_frame_address(0)); /* ignore current frame */ - panic("%s", buf); + buf[0] = '\0'; + l += snprintf(buf+l, len-l, "Backtrace: "); + for (uint32_t i = 0; i < nframes; i++) { + l += snprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i])); + } + l += snprintf(buf+l, len-l, "\n"); + + printf("%s", buf); } #define REPORT_DECLARE(n) \ - void __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD); } \ - void __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE); } \ - void __asan_report_exp_load##n(uptr, int32_t); \ - void __asan_report_exp_store##n(uptr, int32_t); \ - void __asan_report_exp_load##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; } \ - void __asan_report_exp_store##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; } + void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \ + void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \ + void UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \ + void UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b); REPORT_DECLARE(1) REPORT_DECLARE(2) @@ -399,21 +492,32 @@ REPORT_DECLARE(4) REPORT_DECLARE(8) REPORT_DECLARE(16) -void __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD); } -void __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE); } +void OS_NORETURN __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD, 0); } +void OS_NORETURN __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE, 0); } /* unpoison the current stack */ -/* XXX: as an optimization, we could unpoison only up to the current stack depth */ void NOINLINE -kasan_unpoison_curstack(void) +kasan_unpoison_curstack(bool whole_stack) { - kasan_unpoison_stack(ml_stack_base(), ml_stack_size()); + uintptr_t base = ml_stack_base(); + size_t sz = ml_stack_size(); + uintptr_t cur = (uintptr_t)&base; + + if (whole_stack) { + cur = base; + } + + if (cur >= base && cur < base + sz) { + /* unpoison from current stack depth to the top */ + size_t unused = cur - base; + kasan_unpoison_stack(cur, sz - unused); + } } void NOINLINE __asan_handle_no_return(void) { - kasan_unpoison_curstack(); + kasan_unpoison_curstack(false); kasan_unpoison_fakestack(current_thread()); } @@ -423,7 +527,7 @@ kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invali uint8_t *shadow; vm_size_t i; - if (!kasan_initialized || !kasan_enabled) { + if (!kasan_enabled) { return false; } @@ -472,16 +576,16 @@ kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid) unsigned long sectsz; void *sect; +#if KASAN_DYNAMIC_BLACKLIST + kasan_dybl_load_kext(base, bundleid); +#endif + /* find the kasan globals segment/section */ sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, §sz); if (sect) { kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz); kexts_loaded++; } - -#if KASAN_DYNAMIC_BLACKLIST - kasan_dybl_load_kext(base, bundleid); -#endif } void NOINLINE @@ -502,11 +606,18 @@ kasan_unload_kext(vm_offset_t base, vm_size_t size) #endif } +/* + * Turn off as much as possible for panic path etc. There's no way to turn it back + * on. + */ void NOINLINE kasan_disable(void) { __asan_option_detect_stack_use_after_return = 0; + fakestack_enabled = 0; kasan_enabled = 0; + quarantine_enabled = 0; + enabled_checks = 0; } static void NOINLINE @@ -520,21 +631,21 @@ kasan_init_xnu_globals(void) kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header; if (!header) { - printf("KASAN: failed to find kernel mach header\n"); - printf("KASAN: redzones for globals not poisoned\n"); + printf("KASan: failed to find kernel mach header\n"); + printf("KASan: redzones for globals not poisoned\n"); return; } globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size); if (!globals) { - printf("KASAN: failed to find segment %s section %s\n", seg, sect); - printf("KASAN: redzones for globals not poisoned\n"); + printf("KASan: failed to find segment %s section %s\n", seg, sect); + printf("KASan: redzones for globals not poisoned\n"); return; } size = (vm_size_t)_size; - printf("KASAN: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size); - printf("KASAN: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global)); + printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size); + printf("KASan: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global)); kasan_init_globals(globals, size); } @@ -542,12 +653,12 @@ kasan_init_xnu_globals(void) void NOINLINE kasan_late_init(void) { - kasan_init_fakestack(); - kasan_init_xnu_globals(); - #if KASAN_DYNAMIC_BLACKLIST kasan_init_dybl(); #endif + + kasan_init_fakestack(); + kasan_init_xnu_globals(); } void NOINLINE @@ -582,6 +693,8 @@ kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz) void NOINLINE kasan_init(void) { + unsigned arg; + simple_lock_init(&kasan_vm_lock, 0); /* Map all of the kernel text and data */ @@ -589,7 +702,39 @@ kasan_init(void) kasan_arch_init(); - kasan_initialized = 1; + /* + * handle KASan boot-args + */ + + if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) { + enabled_checks = arg; + } + + if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) { + if (arg & KASAN_ARGS_FAKESTACK) { + fakestack_enabled = 1; + } + if (arg & KASAN_ARGS_REPORTIGNORED) { + report_ignored = 1; + } + if (arg & KASAN_ARGS_NODYCHECKS) { + enabled_checks &= ~TYPE_DYNAMIC; + } + if (arg & KASAN_ARGS_NOPOISON_HEAP) { + enabled_checks &= ~TYPE_POISON_HEAP; + } + if (arg & KASAN_ARGS_NOPOISON_GLOBAL) { + enabled_checks &= ~TYPE_POISON_GLOBAL; + } + } + + if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) { + free_yield = arg; + } + + /* kasan.bl boot-arg handled in kasan_init_dybl() */ + + quarantine_enabled = 1; kasan_enabled = 1; } @@ -598,7 +743,7 @@ kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool is_zero) { assert(address < VM_MAX_KERNEL_ADDRESS); - if (!kasan_initialized || !kasan_enabled) { + if (!kasan_enabled) { return; } @@ -641,12 +786,13 @@ kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size) */ struct kasan_alloc_header { - uint32_t magic; + uint16_t magic; + uint16_t crc; uint32_t alloc_size; uint32_t user_size; struct { - uint32_t left_rz : 28; - uint32_t frames : 4; + uint32_t left_rz : 32 - BACKTRACE_BITS; + uint32_t frames : BACKTRACE_BITS; }; }; _Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size"); @@ -656,11 +802,18 @@ struct kasan_alloc_footer { }; _Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size"); -#define MAGIC_XOR ((uint32_t)0xA110C8ED) -static uint32_t -magic_for_addr(vm_offset_t addr) +#define LIVE_XOR ((uint16_t)0x3a65) +#define FREE_XOR ((uint16_t)0xf233) + +static uint16_t +magic_for_addr(vm_offset_t addr, uint16_t magic_xor) { - return (uint32_t)addr ^ MAGIC_XOR; + uint16_t magic = addr & 0xFFFF; + magic ^= (addr >> 16) & 0xFFFF; + magic ^= (addr >> 32) & 0xFFFF; + magic ^= (addr >> 48) & 0xFFFF; + magic ^= magic_xor; + return magic; } static struct kasan_alloc_header * @@ -729,6 +882,25 @@ kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip) return frames; } +/* addr: user address of allocation */ +static uint16_t +kasan_alloc_crc(vm_offset_t addr) +{ + struct kasan_alloc_header *h = header_for_user_addr(addr); + vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz; + + uint16_t crc_orig = h->crc; + h->crc = 0; + + uint16_t crc = 0; + crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz); + crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz); + + h->crc = crc_orig; + + return crc; +} + /* * addr: base address of full allocation (including redzones) * size: total size of allocation (include redzones) @@ -755,7 +927,7 @@ kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz) /* stash the allocation sizes in the left redzone */ struct kasan_alloc_header *h = header_for_user_addr(addr); - h->magic = magic_for_addr(addr); + h->magic = magic_for_addr(addr, LIVE_XOR); h->left_rz = leftrz; h->alloc_size = size; h->user_size = req; @@ -765,6 +937,9 @@ kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz) struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize); h->frames = kasan_alloc_bt(f->backtrace, fsize, 2); + /* checksum the whole object, minus the user part */ + h->crc = kasan_alloc_crc(addr); + return addr; } @@ -778,10 +953,6 @@ kasan_dealloc(vm_offset_t addr, vm_size_t *size) { assert(size && addr); struct kasan_alloc_header *h = header_for_user_addr(addr); - if (h->magic != magic_for_addr(addr)) { - /* no point blacklisting here - this is fatal */ - kasan_crash_report(addr, *size, TYPE_FREE); - } *size = h->alloc_size; return addr - h->left_rz; } @@ -794,7 +965,7 @@ vm_size_t kasan_user_size(vm_offset_t addr) { struct kasan_alloc_header *h = header_for_user_addr(addr); - assert(h->magic == magic_for_addr(addr)); + assert(h->magic == magic_for_addr(addr, LIVE_XOR)); return h->user_size; } @@ -807,36 +978,30 @@ kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type) struct kasan_alloc_header *h = header_for_user_addr(addr); /* map heap type to an internal access type */ - unsigned type; - if (heap_type == KASAN_HEAP_KALLOC) { - type = TYPE_KFREE; - } else if (heap_type == KASAN_HEAP_ZALLOC) { - type = TYPE_ZFREE; - } else if (heap_type == KASAN_HEAP_FAKESTACK) { - type = TYPE_FSFREE; - } + access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE : + heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE : + heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0; - /* check the magic matches */ - if (h->magic != magic_for_addr(addr)) { - if (kasan_is_blacklisted(type)) { - return; - } - kasan_crash_report(addr, size, type); + /* check the magic and crc match */ + if (h->magic != magic_for_addr(addr, LIVE_XOR)) { + kasan_violation(addr, size, type, REASON_BAD_METADATA); + } + if (h->crc != kasan_alloc_crc(addr)) { + kasan_violation(addr, size, type, REASON_MOD_OOB); } /* check the freed size matches what we recorded at alloc time */ if (h->user_size != size) { - if (kasan_is_blacklisted(type)) { - return; - } - kasan_crash_report(addr, size, type); + kasan_violation(addr, size, type, REASON_INVALID_SIZE); } vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size; /* Check that the redzones are valid */ - kasan_assert_shadow(addr - h->left_rz, h->left_rz, addr, ASAN_HEAP_LEFT_RZ); - kasan_assert_shadow(addr + h->user_size, rightrz_sz, addr, ASAN_HEAP_RIGHT_RZ); + if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) || + !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) { + kasan_violation(addr, size, type, REASON_BAD_METADATA); + } /* Check the allocated range is not poisoned */ kasan_check_range((void *)addr, size, type); @@ -849,15 +1014,15 @@ kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type) */ struct freelist_entry { - uint32_t magic; - uint32_t checksum; + uint16_t magic; + uint16_t crc; STAILQ_ENTRY(freelist_entry) list; union { struct { vm_size_t size : 28; vm_size_t user_size : 28; - vm_size_t frames : 4; /* number of frames in backtrace */ - vm_size_t __unused : 4; + vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */ + vm_size_t __unused : 8 - BACKTRACE_BITS; }; uint64_t bits; }; @@ -866,13 +1031,6 @@ struct freelist_entry { }; _Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size"); -#define FREELIST_MAGIC_XOR ((uint32_t)0xF23333D) -static uint32_t -freelist_magic(vm_offset_t addr) -{ - return (uint32_t)addr ^ FREELIST_MAGIC_XOR; -} - struct quarantine { STAILQ_HEAD(freelist_head, freelist_entry) freelist; unsigned long entries; @@ -887,6 +1045,12 @@ struct quarantine quarantines[] = { { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE } }; +static uint16_t +fle_crc(struct freelist_entry *fle) +{ + return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits)); +} + /* * addr, sizep: pointer/size of full allocation including redzone */ @@ -925,7 +1089,7 @@ kasan_free_internal(void **addrp, vm_size_t *sizep, int type, /* create a new freelist entry */ fle = (struct freelist_entry *)addr; - fle->magic = freelist_magic((vm_offset_t)fle); + fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR); fle->size = size; fle->user_size = user_size; fle->frames = 0; @@ -934,7 +1098,9 @@ kasan_free_internal(void **addrp, vm_size_t *sizep, int type, fle->zone = *zone; } if (type != KASAN_HEAP_FAKESTACK) { + /* don't do expensive things on the fakestack path */ fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3); + fle->crc = fle_crc(fle); } boolean_t flg; @@ -974,12 +1140,17 @@ kasan_free_internal(void **addrp, vm_size_t *sizep, int type, size = tofree->size; addr = (vm_offset_t)tofree; - if (tofree->magic != freelist_magic(addr)) { - kasan_crash_report(addr, size, TYPE_FREE); + + /* check the magic and crc match */ + if (tofree->magic != magic_for_addr(addr, FREE_XOR)) { + kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE); + } + if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) { + kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE); } /* clobber the quarantine header */ - kasan_rz_clobber(addr, 0, sizeof(struct freelist_entry), 0); + __nosan_bzero((void *)addr, sizeof(struct freelist_entry)); } else { /* quarantine is not full - don't really free anything */ @@ -1004,6 +1175,10 @@ kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone, vm_size_t user_size, bool quarantine) { kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine); + + if (free_yield) { + thread_yield_internal(free_yield); + } } uptr @@ -1026,12 +1201,11 @@ __asan_poison_cxx_array_cookie(uptr p) *shadow = ASAN_ARRAY_COOKIE; } -#define ACCESS_CHECK_DECLARE(type, sz, access_type) \ +#define ACCESS_CHECK_DECLARE(type, sz, access) \ void __asan_##type##sz(uptr addr) { \ - kasan_check_range((const void *)addr, sz, access_type); \ + kasan_check_range((const void *)addr, sz, access); \ } \ - void __asan_exp_##type##sz(uptr, int32_t); \ - void __asan_exp_##type##sz(uptr __unused addr, int32_t __unused e) { ABI_UNSUPPORTED; } + void UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b); ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD); ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD); @@ -1056,16 +1230,6 @@ __asan_storeN(uptr addr, size_t sz) kasan_check_range((const void *)addr, sz, TYPE_STORE); } -void __asan_exp_loadN(uptr, size_t, int32_t); -void __asan_exp_storeN(uptr, size_t, int32_t); -void __asan_exp_loadN(uptr __unused addr, size_t __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; } -void __asan_exp_storeN(uptr __unused addr, size_t __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; } - -void __asan_report_exp_load_n(uptr, unsigned long, int32_t); -void __asan_report_exp_store_n(uptr, unsigned long, int32_t); -void __asan_report_exp_load_n(uptr __unused p, unsigned long __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; } -void __asan_report_exp_store_n(uptr __unused p, unsigned long __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; } - static void kasan_set_shadow(uptr addr, size_t sz, uint8_t val) { @@ -1084,115 +1248,76 @@ SET_SHADOW_DECLARE(f3) SET_SHADOW_DECLARE(f5) SET_SHADOW_DECLARE(f8) + /* - * XXX: implement these + * Call 'cb' for each contiguous range of the shadow map. This could be more + * efficient by walking the page table directly. */ - -void __asan_alloca_poison(uptr addr, uptr size) -{ - (void)addr; - (void)size; -} - -void __asan_allocas_unpoison(uptr top, uptr bottom) -{ - (void)top; - (void)bottom; -} - -void -__sanitizer_ptr_sub(uptr a, uptr b) -{ - (void)a; - (void)b; -} - -void -__sanitizer_ptr_cmp(uptr a, uptr b) -{ - (void)a; - (void)b; -} - -void -__asan_poison_stack_memory(uptr addr, size_t size) +int +kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx) { - (void)addr; - (void)size; -} + uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS); + uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS); + shadow_base = vm_map_trunc_page(shadow_base, PAGE_MASK); + shadow_top = vm_map_round_page(shadow_top, PAGE_MASK); + + uintptr_t start = 0, end = 0; + + for (uintptr_t addr = shadow_base; addr < shadow_top; addr += PAGE_SIZE) { + if (kasan_is_shadow_mapped(addr)) { + if (start == 0) { + start = addr; + } + end = addr + PAGE_SIZE; + } else if (start && end) { + cb(start, end, ctx); + start = end = 0; + } + } -void -__asan_unpoison_stack_memory(uptr addr, size_t size) -{ - (void)addr; - (void)size; -} + if (start && end) { + cb(start, end, ctx); + } -void -__sanitizer_annotate_contiguous_container(const void *beg, - const void *end, - const void *old_mid, - const void *new_mid) -{ - (void)beg; - (void)end; - (void)old_mid; - (void)new_mid; + return 0; } /* + * XXX: implement these */ -void -__asan_init(void) -{ -} - -#define VERSION_DECLARE(v) \ - void __asan_version_mismatch_check_##v(void); \ - void __asan_version_mismatch_check_##v(void) {} - -VERSION_DECLARE(v8) -VERSION_DECLARE(apple_802) -VERSION_DECLARE(apple_900) - -void -__asan_register_globals(uptr __unused a, uptr __unused b) -{ - ABI_UNSUPPORTED; -} - -void -__asan_unregister_globals(uptr __unused a, uptr __unused b) -{ - ABI_UNSUPPORTED; -} +UNUSED_ABI(__asan_alloca_poison, uptr addr, uptr size); +UNUSED_ABI(__asan_allocas_unpoison, uptr top, uptr bottom); +UNUSED_ABI(__sanitizer_ptr_sub, uptr a, uptr b); +UNUSED_ABI(__sanitizer_ptr_cmp, uptr a, uptr b); +UNUSED_ABI(__sanitizer_annotate_contiguous_container, const void *a, const void *b, const void *c, const void *d); +UNUSED_ABI(__asan_poison_stack_memory, uptr addr, size_t size); +UNUSED_ABI(__asan_unpoison_stack_memory, uptr a, uptr b); -void -__asan_register_image_globals(uptr __unused ptr) -{ -} - -void -__asan_unregister_image_globals(uptr __unused ptr) -{ -} - -void -__asan_init_v5(void) -{ -} - -void -__asan_before_dynamic_init(uptr __unused arg) -{ -} - -void -__asan_after_dynamic_init(void) -{ -} +/* + * Miscellaneous unimplemented asan ABI + */ +UNUSED_ABI(__asan_init, void); +UNUSED_ABI(__asan_register_image_globals, uptr a); +UNUSED_ABI(__asan_unregister_image_globals, uptr a); +UNUSED_ABI(__asan_before_dynamic_init, uptr a); +UNUSED_ABI(__asan_after_dynamic_init, void); +UNUSED_ABI(__asan_version_mismatch_check_v8, void); +UNUSED_ABI(__asan_version_mismatch_check_apple_802, void); +UNUSED_ABI(__asan_version_mismatch_check_apple_900, void); +UNUSED_ABI(__asan_version_mismatch_check_apple_902, void); + +void UNSUPPORTED_API(__asan_init_v5, void); +void UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b); +void UNSUPPORTED_API(__asan_unregister_globals, uptr a, uptr b); +void UNSUPPORTED_API(__asan_register_elf_globals, uptr a, uptr b, uptr c); +void UNSUPPORTED_API(__asan_unregister_elf_globals, uptr a, uptr b, uptr c); + +void UNSUPPORTED_API(__asan_exp_loadN, uptr addr, size_t sz, int32_t e); +void UNSUPPORTED_API(__asan_exp_storeN, uptr addr, size_t sz, int32_t e); +void UNSUPPORTED_API(__asan_report_exp_load_n, uptr addr, unsigned long b, int32_t c); +void UNSUPPORTED_API(__asan_report_exp_store_n, uptr addr, unsigned long b, int32_t c); /* * @@ -1215,22 +1340,40 @@ sysctl_kasan_test(__unused struct sysctl_oid *oidp, __unused void *arg1, int arg return err; } +static int +sysctl_fakestack_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, int __unused arg2, struct sysctl_req *req) +{ + int ch, err, val; + + err = sysctl_io_number(req, fakestack_enabled, sizeof(fakestack_enabled), &val, &ch); + if (err == 0 && ch) { + fakestack_enabled = !!val; + __asan_option_detect_stack_use_after_return = !!val; + } + + return err; +} + SYSCTL_DECL(kasan); SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, ""); SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, ""); -SYSCTL_INT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, ""); -SYSCTL_INT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, ""); -SYSCTL_LONG(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, ""); -SYSCTL_LONG(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, ""); -SYSCTL_LONG(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, ""); - -SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, ""); -SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, ""); -SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, ""); -SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, fakestack, CTLFLAG_RD, NULL, FAKESTACK, ""); -SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, ""); -SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, memintrinsics, CTLFLAG_RD, NULL, MEMINTRINSICS, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &enabled_checks, 0, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, report_ignored, CTLFLAG_RW, &report_ignored, 0, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, free_yield_ms, CTLFLAG_RW, &free_yield, 0, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, ""); +SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, ""); +SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, ""); +SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, ""); +SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, ""); +SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, ""); + +SYSCTL_PROC(_kern_kasan, OID_AUTO, fakestack, + CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, + 0, 0, sysctl_fakestack_enable, "I", ""); SYSCTL_PROC(_kern_kasan, OID_AUTO, test, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,