#include <mach/mach_types.h>
#include <mach/vm_param.h>
#include <mach/machine/vm_param.h>
+#include <mach/sdt.h>
#include <libkern/libkern.h>
#include <libkern/OSAtomic.h>
#include <libkern/kernel_mach_header.h>
#include <kasan_internal.h>
#include <memintrinsics.h>
-#if !KASAN_DEBUG
-# undef NOINLINE
-# define NOINLINE
-#endif
-
const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_SHIFT;
-static long kexts_loaded;
-
-long shadow_pages_total;
-long shadow_pages_used;
+static unsigned kexts_loaded;
+unsigned shadow_pages_total;
+unsigned shadow_pages_used;
vm_offset_t kernel_vbase;
vm_offset_t kernel_vtop;
-static bool kasan_initialized;
-static int kasan_enabled;
-static int quarantine_enabled = 1;
+static unsigned kasan_enabled;
+static unsigned quarantine_enabled;
+static unsigned enabled_checks = TYPE_ALL & ~TYPE_LEAK; /* bitmask of enabled checks */
+static unsigned report_ignored; /* issue non-fatal report for disabled/blacklisted checks */
+static unsigned free_yield = 0; /* ms yield after each free */
+static unsigned leak_threshold = 3; /* threshold for uninitialized memory leak detection */
+static unsigned leak_fatal_threshold = 0; /* threshold for treating leaks as fatal errors (0 means never) */
-static void kasan_crash_report(uptr p, uptr width, unsigned access_type);
+/* forward decls */
+static void kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason);
+static void kasan_log_report(uptr p, uptr width, access_t access, violation_t reason);
+
+/* imported osfmk functions */
extern vm_offset_t ml_stack_base(void);
extern vm_size_t ml_stack_size(void);
-#define ABI_UNSUPPORTED do { panic("KASan: unsupported ABI: %s\n", __func__); } while (0)
-
-#define BACKTRACE_MAXFRAMES 16
-
-decl_simple_lock_data(, kasan_vm_lock);
-
-_Atomic int unsafe_count = 0;
+/*
+ * unused: expected to be called, but (currently) does nothing
+ */
+#define UNUSED_ABI(func, ...) \
+ _Pragma("clang diagnostic push") \
+ _Pragma("clang diagnostic ignored \"-Wunused-parameter\"") \
+ void func(__VA_ARGS__); \
+ void func(__VA_ARGS__) {}; \
+ _Pragma("clang diagnostic pop") \
-void
-kasan_unsafe_start(void)
-{
- if (__c11_atomic_fetch_add(&unsafe_count, 1, memory_order_relaxed) == 128) {
- panic("kasan_unsafe_start overflow");
- }
-}
+static const size_t BACKTRACE_BITS = 4;
+static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1;
-void
-kasan_unsafe_end(void)
-{
- if (__c11_atomic_fetch_sub(&unsafe_count, 1, memory_order_relaxed) == 0) {
- panic("kasan_unsafe_end underflow");
- }
-}
+static vm_size_t kasan_alloc_retrieve_bt(vm_address_t addr, uintptr_t frames[static BACKTRACE_MAXFRAMES]);
-static bool
-kasan_in_unsafe(void)
-{
- return atomic_load_explicit(&unsafe_count, memory_order_relaxed) != 0;
-}
+decl_simple_lock_data(, kasan_vm_lock);
+static thread_t kasan_lock_holder;
/*
* kasan is called from the interrupt path, so we need to disable interrupts to
kasan_lock(boolean_t *b)
{
*b = ml_set_interrupts_enabled(false);
- simple_lock(&kasan_vm_lock);
+ simple_lock(&kasan_vm_lock, LCK_GRP_NULL);
+ kasan_lock_holder = current_thread();
}
void
kasan_unlock(boolean_t b)
{
+ kasan_lock_holder = THREAD_NULL;
simple_unlock(&kasan_vm_lock);
ml_set_interrupts_enabled(b);
}
+/* Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
+ * thread */
+bool
+kasan_lock_held(thread_t thread)
+{
+ return thread && thread == kasan_lock_holder;
+}
+
+static inline bool
+kasan_check_enabled(access_t access)
+{
+ return kasan_enabled && (enabled_checks & access) && !kasan_is_blacklisted(access);
+}
+
+static inline bool
+kasan_poison_active(uint8_t flags)
+{
+ switch (flags) {
+ case ASAN_GLOBAL_RZ:
+ return kasan_check_enabled(TYPE_POISON_GLOBAL);
+ case ASAN_HEAP_RZ:
+ case ASAN_HEAP_LEFT_RZ:
+ case ASAN_HEAP_RIGHT_RZ:
+ case ASAN_HEAP_FREED:
+ return kasan_check_enabled(TYPE_POISON_HEAP);
+ default:
+ return true;
+ };
+}
+
/*
* poison redzones in the shadow map
*/
assert((leftrz & 0x07) == 0);
assert((total & 0x07) == 0);
- if (!kasan_enabled || !kasan_initialized) {
+ if (!kasan_enabled || !kasan_poison_active(flags)) {
return;
}
shadow[i] = l_flags;
}
for (; i < leftrz + size; i++) {
- shadow[i] = ASAN_VALID; /* not strictly necessary */
+ shadow[i] = ASAN_VALID; /* XXX: should not be necessary */
}
if (partial && (i < total)) {
shadow[i] = partial;
for (; i < total; i++) {
shadow[i] = r_flags;
}
-
- asm volatile("" ::: "memory"); /* compiler barrier XXX: is this needed? */
}
void
{
assert(base);
assert(size);
+
+ /* align base and size to 8 bytes */
+ vm_offset_t align = base & 0x7;
+ base -= align;
+ size += align;
+ size = (size + 7) & ~0x7;
+
kasan_unpoison((void *)base, size);
}
/*
* write junk into the redzones
-*/
+ */
static void NOINLINE
kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
{
#endif
}
-void NOINLINE
-kasan_check_range(const void *x, size_t sz, unsigned access_type)
+/*
+ * Report a violation that may be disabled and/or blacklisted. This can only be
+ * called for dynamic checks (i.e. where the fault is recoverable). Use
+ * kasan_crash_report() for static (unrecoverable) violations.
+ *
+ * access: what we were trying to do when the violation occured
+ * reason: what failed about the access
+ */
+static void
+kasan_violation(uintptr_t addr, size_t size, access_t access, violation_t reason)
{
- vm_offset_t invalid;
-
- if (kasan_in_unsafe()) {
+ assert(__builtin_popcount(access) == 1);
+ if (!kasan_check_enabled(access)) {
+ if (report_ignored) {
+ kasan_log_report(addr, size, access, reason);
+ }
return;
}
+ kasan_crash_report(addr, size, access, reason);
+}
- if (kasan_range_poisoned((vm_offset_t)x, sz, &invalid)) {
- if (kasan_is_blacklisted(access_type)) {
- return;
- }
- kasan_crash_report(invalid, sz, access_type);
- /* NOTREACHED */
+void NOINLINE
+kasan_check_range(const void *x, size_t sz, access_t access)
+{
+ uintptr_t invalid;
+ uintptr_t ptr = (uintptr_t)x;
+ if (kasan_range_poisoned(ptr, sz, &invalid)) {
+ size_t remaining = sz - (invalid - ptr);
+ kasan_violation(invalid, remaining, access, 0);
}
}
/*
- * Check that [base, base+sz) has shadow value `shadow'
- * If not, report a KASan-violation on `addr'
+ * Return true if [base, base+sz) is unpoisoned or has given shadow value.
*/
-static void
-kasan_assert_shadow(vm_address_t base, vm_size_t sz, vm_address_t addr, uint8_t shadow)
+bool
+kasan_check_shadow(vm_address_t base, vm_size_t sz, uint8_t shadow)
{
sz -= 8 - (base % 8);
base += 8 - (base % 8);
while (base < end) {
uint8_t *sh = SHADOW_FOR_ADDRESS(base);
- if (*sh != shadow) {
- __asan_report_load1(addr);
+ if (*sh && *sh != shadow) {
+ return false;
}
base += 8;
}
+ return true;
+}
+
+static void
+kasan_report_leak(vm_address_t base, vm_size_t sz, vm_offset_t offset, vm_size_t leak_sz)
+{
+ if (leak_fatal_threshold > leak_threshold && leak_sz >= leak_fatal_threshold){
+ kasan_violation(base + offset, leak_sz, TYPE_LEAK, REASON_UNINITIALIZED);
+ }
+
+ char string_rep[BACKTRACE_MAXFRAMES * 20] = {};
+ vm_offset_t stack_base = dtrace_get_kernel_stack(current_thread());
+ bool is_stack = (base >= stack_base && base < (stack_base + kernel_stack_size));
+
+ if (!is_stack) {
+ uintptr_t alloc_bt[BACKTRACE_MAXFRAMES] = {};
+ vm_size_t num_frames = 0;
+ size_t l = 0;
+ num_frames = kasan_alloc_retrieve_bt(base, alloc_bt);
+ for (vm_size_t i = 0; i < num_frames; i++) {
+ l += scnprintf(string_rep + l, sizeof(string_rep) - l, " %lx", alloc_bt[i]);
+ }
+ }
+
+ DTRACE_KASAN5(leak_detected,
+ vm_address_t, base,
+ vm_size_t, sz,
+ vm_offset_t, offset,
+ vm_size_t, leak_sz,
+ char *, string_rep);
+}
+
+/*
+ * Check for possible uninitialized memory contained in [base, base+sz).
+ */
+void
+kasan_check_uninitialized(vm_address_t base, vm_size_t sz)
+{
+ if (!(enabled_checks & TYPE_LEAK) || sz < leak_threshold) {
+ return;
+ }
+
+ vm_address_t cur = base;
+ vm_address_t end = base + sz;
+ vm_size_t count = 0;
+ vm_size_t max_count = 0;
+ vm_address_t leak_offset = 0;
+ uint8_t byte = 0;
+
+ while (cur < end) {
+ byte = *(uint8_t *)cur;
+ count = (byte == KASAN_UNINITIALIZED_HEAP) ? (count + 1) : 0;
+ if (count > max_count) {
+ max_count = count;
+ leak_offset = cur - (count - 1) - base;
+ }
+ cur += 1;
+ }
+
+ if (max_count >= leak_threshold) {
+ kasan_report_leak(base, sz, leak_offset, max_count);
+ }
}
/*
*/
static const char *
-access_type_str(unsigned type)
+access_str(access_t type)
{
- if (type & TYPE_LOAD_ALL) {
- return "load";
- } else if (type & TYPE_STORE_ALL) {
- return "store";
+ if (type & TYPE_READ) {
+ return "load from";
+ } else if (type & TYPE_WRITE) {
+ return "store to";
} else if (type & TYPE_FREE) {
- return "free";
+ return "free of";
+ } else if (type & TYPE_LEAK) {
+ return "leak from";
} else {
- return "access";
+ return "access of";
}
}
[ASAN_PARTIAL5] = "PARTIAL5",
[ASAN_PARTIAL6] = "PARTIAL6",
[ASAN_PARTIAL7] = "PARTIAL7",
- [ASAN_STACK_RZ] = "<invalid>",
[ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
[ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
[ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
[ASAN_STACK_FREED] = "STACK_FREED",
+ [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE",
[ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
- [ASAN_HEAP_RZ] = "<invalid>",
[ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
[ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
[ASAN_HEAP_FREED] = "HEAP_FREED",
- [0xff] = "<invalid>",
+ [0xff] = NULL
};
#define CRASH_CONTEXT_BEFORE 5
kasan_shadow_crashlog(uptr p, char *buf, size_t len)
{
int i,j;
- size_t l = 0;
+ size_t n = 0;
int before = CRASH_CONTEXT_BEFORE;
int after = CRASH_CONTEXT_AFTER;
uptr shadow = (uptr)SHADOW_FOR_ADDRESS(p);
uptr shadow_p = shadow;
+ uptr shadow_page = vm_map_round_page(shadow_p, HW_PAGE_MASK);
/* rewind to start of context block */
shadow &= ~((uptr)0xf);
shadow -= 16 * before;
+ n += scnprintf(buf+n, len-n,
+ " Shadow 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
+
for (i = 0; i < 1 + before + after; i++, shadow += 16) {
- if (vm_map_round_page(shadow, PAGE_MASK) != vm_map_round_page(shadow_p, PAGE_MASK)) {
- /* don't cross a page boundary, in case the shadow is unmapped */
- /* XXX: ideally we check instead of ignore */
+ if ((vm_map_round_page(shadow, HW_PAGE_MASK) != shadow_page) && !kasan_is_shadow_mapped(shadow)) {
+ /* avoid unmapped shadow when crossing page boundaries */
continue;
}
- l += snprintf(buf+l, len-l, " %#16lx: ", shadow);
+ n += scnprintf(buf+n, len-n, " %16lx:", shadow);
+
+ char *left = " ";
+ char *right;
for (j = 0; j < 16; j++) {
uint8_t *x = (uint8_t *)(shadow + j);
- l += snprintf(buf+l, len-l, "%02x ", (unsigned)*x);
+
+ right = " ";
+ if ((uptr)x == shadow_p) {
+ left = "[";
+ right = "]";
+ } else if ((uptr)(x + 1) == shadow_p) {
+ right = "";
+ }
+
+ n += scnprintf(buf+n, len-n, "%s%02x%s", left, (unsigned)*x, right);
+ left = "";
}
- l += snprintf(buf+l, len-l, "\n");
+ n += scnprintf(buf+n, len-n, "\n");
}
- l += snprintf(buf+l, len-l, "\n");
- return l;
+ n += scnprintf(buf+n, len-n, "\n");
+ return n;
}
-static void NOINLINE
-kasan_crash_report(uptr p, uptr width, unsigned access_type)
+static void
+kasan_report_internal(uptr p, uptr width, access_t access, violation_t reason, bool dopanic)
{
const size_t len = 4096;
static char buf[len];
- size_t l = 0;
+ size_t n = 0;
uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
uint8_t shadow_type = *shadow_ptr;
const char *shadow_str = shadow_strings[shadow_type];
+ if (!shadow_str) {
+ shadow_str = "<invalid>";
+ }
+ buf[0] = '\0';
+
+ if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
+ n += scnprintf(buf+n, len-n, "KASan: free of corrupted/invalid object %#lx\n", p);
+ } else if (reason == REASON_MOD_AFTER_FREE) {
+ n += scnprintf(buf+n, len-n, "KASan: UaF of quarantined object %#lx\n", p);
+ } else {
+ n += scnprintf(buf+n, len-n, "KASan: invalid %lu-byte %s %#lx [%s]\n",
+ width, access_str(access), p, shadow_str);
+ }
+ n += kasan_shadow_crashlog(p, buf+n, len-n);
+
+ if (dopanic) {
+ panic("%s", buf);
+ } else {
+ printf("%s", buf);
+ }
+}
+static void NOINLINE OS_NORETURN
+kasan_crash_report(uptr p, uptr width, access_t access, violation_t reason)
+{
kasan_handle_test();
+ kasan_report_internal(p, width, access, reason, true);
+ __builtin_unreachable(); /* we cant handle this returning anyway */
+}
- buf[0] = '\0';
- l += snprintf(buf+l, len-l,
- "KASan: invalid %lu-byte %s @ %#lx [%s]\n"
- "Shadow %#02x @ %#lx\n\n",
- width, access_type_str(access_type), p, shadow_str,
- (unsigned)shadow_type, (unsigned long)shadow_ptr);
+static void
+kasan_log_report(uptr p, uptr width, access_t access, violation_t reason)
+{
+ const size_t len = 256;
+ char buf[len];
+ size_t l = 0;
+ uint32_t nframes = 14;
+ uintptr_t frames[nframes];
+ uintptr_t *bt = frames;
- l += kasan_shadow_crashlog(p, buf+l, len-l);
+ kasan_report_internal(p, width, access, reason, false);
- panic("%s", buf);
+ /*
+ * print a backtrace
+ */
+
+ nframes = backtrace_frame(bt, nframes, __builtin_frame_address(0),
+ NULL); /* ignore current frame */
+
+ buf[0] = '\0';
+ l += scnprintf(buf+l, len-l, "Backtrace: ");
+ for (uint32_t i = 0; i < nframes; i++) {
+ l += scnprintf(buf+l, len-l, "%lx,", VM_KERNEL_UNSLIDE(bt[i]));
+ }
+ l += scnprintf(buf+l, len-l, "\n");
+
+ printf("%s", buf);
}
#define REPORT_DECLARE(n) \
- void __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD); } \
- void __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE); } \
- void __asan_report_exp_load##n(uptr, int32_t); \
- void __asan_report_exp_store##n(uptr, int32_t); \
- void __asan_report_exp_load##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; } \
- void __asan_report_exp_store##n(uptr __unused p, int32_t __unused e) { ABI_UNSUPPORTED; }
+ void OS_NORETURN __asan_report_load##n(uptr p) { kasan_crash_report(p, n, TYPE_LOAD, 0); } \
+ void OS_NORETURN __asan_report_store##n(uptr p) { kasan_crash_report(p, n, TYPE_STORE, 0); } \
+ void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load##n, uptr a, int32_t b); \
+ void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store##n, uptr a, int32_t b);
REPORT_DECLARE(1)
REPORT_DECLARE(2)
REPORT_DECLARE(8)
REPORT_DECLARE(16)
-void __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD); }
-void __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE); }
+void OS_NORETURN __asan_report_load_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_LOAD, 0); }
+void OS_NORETURN __asan_report_store_n(uptr p, unsigned long sz) { kasan_crash_report(p, sz, TYPE_STORE, 0); }
/* unpoison the current stack */
-/* XXX: as an optimization, we could unpoison only up to the current stack depth */
void NOINLINE
-kasan_unpoison_curstack(void)
+kasan_unpoison_curstack(bool whole_stack)
{
- kasan_unpoison_stack(ml_stack_base(), ml_stack_size());
+ uintptr_t base = ml_stack_base();
+ size_t sz = ml_stack_size();
+ uintptr_t cur = (uintptr_t)&base;
+
+ if (whole_stack) {
+ cur = base;
+ }
+
+ if (cur >= base && cur < base + sz) {
+ /* unpoison from current stack depth to the top */
+ size_t unused = cur - base;
+ kasan_unpoison_stack(cur, sz - unused);
+ }
}
void NOINLINE
__asan_handle_no_return(void)
{
- kasan_unpoison_curstack();
- kasan_unpoison_fakestack(current_thread());
+ kasan_unpoison_curstack(false);
+
+ /*
+ * No need to free any fakestack objects because they must stay alive until
+ * we drop the real stack, at which point we can drop the entire fakestack
+ * anyway.
+ */
}
bool NOINLINE
uint8_t *shadow;
vm_size_t i;
- if (!kasan_initialized || !kasan_enabled) {
+ if (!kasan_enabled) {
return false;
}
unsigned long sectsz;
void *sect;
+#if KASAN_DYNAMIC_BLACKLIST
+ kasan_dybl_load_kext(base, bundleid);
+#endif
+
/* find the kasan globals segment/section */
sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, §sz);
if (sect) {
kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz);
kexts_loaded++;
}
-
-#if KASAN_DYNAMIC_BLACKLIST
- kasan_dybl_load_kext(base, bundleid);
-#endif
}
void NOINLINE
#endif
}
+/*
+ * Turn off as much as possible for panic path etc. There's no way to turn it back
+ * on.
+ */
void NOINLINE
kasan_disable(void)
{
__asan_option_detect_stack_use_after_return = 0;
+ fakestack_enabled = 0;
kasan_enabled = 0;
+ quarantine_enabled = 0;
+ enabled_checks = 0;
}
static void NOINLINE
kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header;
if (!header) {
- printf("KASAN: failed to find kernel mach header\n");
- printf("KASAN: redzones for globals not poisoned\n");
+ printf("KASan: failed to find kernel mach header\n");
+ printf("KASan: redzones for globals not poisoned\n");
return;
}
globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size);
if (!globals) {
- printf("KASAN: failed to find segment %s section %s\n", seg, sect);
- printf("KASAN: redzones for globals not poisoned\n");
+ printf("KASan: failed to find segment %s section %s\n", seg, sect);
+ printf("KASan: redzones for globals not poisoned\n");
return;
}
size = (vm_size_t)_size;
- printf("KASAN: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
- printf("KASAN: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global));
+ printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
+ printf("KASan: poisoning redzone for %lu globals\n", size / sizeof(struct asan_global));
kasan_init_globals(globals, size);
}
void NOINLINE
kasan_late_init(void)
{
- kasan_init_fakestack();
- kasan_init_xnu_globals();
-
#if KASAN_DYNAMIC_BLACKLIST
kasan_init_dybl();
#endif
+
+ kasan_init_fakestack();
+ kasan_init_xnu_globals();
}
void NOINLINE
void NOINLINE
kasan_init(void)
{
+ unsigned arg;
+
simple_lock_init(&kasan_vm_lock, 0);
/* Map all of the kernel text and data */
kasan_arch_init();
- kasan_initialized = 1;
+ /*
+ * handle KASan boot-args
+ */
+
+ if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) {
+ enabled_checks = arg;
+ }
+
+ if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) {
+ if (arg & KASAN_ARGS_FAKESTACK) {
+ fakestack_enabled = 1;
+ }
+ if (arg & KASAN_ARGS_REPORTIGNORED) {
+ report_ignored = 1;
+ }
+ if (arg & KASAN_ARGS_NODYCHECKS) {
+ enabled_checks &= ~TYPE_DYNAMIC;
+ }
+ if (arg & KASAN_ARGS_NOPOISON_HEAP) {
+ enabled_checks &= ~TYPE_POISON_HEAP;
+ }
+ if (arg & KASAN_ARGS_NOPOISON_GLOBAL) {
+ enabled_checks &= ~TYPE_POISON_GLOBAL;
+ }
+ if (arg & KASAN_ARGS_CHECK_LEAKS) {
+ enabled_checks |= TYPE_LEAK;
+ }
+ }
+
+ if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) {
+ free_yield = arg;
+ }
+
+ if (PE_parse_boot_argn("kasan.leak_threshold", &arg, sizeof(arg))) {
+ leak_threshold = arg;
+ }
+
+ if (PE_parse_boot_argn("kasan.leak_fatal_threshold", &arg, sizeof(arg))) {
+ leak_fatal_threshold = arg;
+ }
+
+ /* kasan.bl boot-arg handled in kasan_init_dybl() */
+
+ quarantine_enabled = 1;
kasan_enabled = 1;
}
{
assert(address < VM_MAX_KERNEL_ADDRESS);
- if (!kasan_initialized || !kasan_enabled) {
+ if (!kasan_enabled) {
return;
}
*/
struct kasan_alloc_header {
- uint32_t magic;
+ uint16_t magic;
+ uint16_t crc;
uint32_t alloc_size;
uint32_t user_size;
struct {
- uint32_t left_rz : 28;
- uint32_t frames : 4;
+ uint32_t left_rz : 32 - BACKTRACE_BITS;
+ uint32_t frames : BACKTRACE_BITS;
};
};
_Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
};
_Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
-#define MAGIC_XOR ((uint32_t)0xA110C8ED)
-static uint32_t
-magic_for_addr(vm_offset_t addr)
+#define LIVE_XOR ((uint16_t)0x3a65)
+#define FREE_XOR ((uint16_t)0xf233)
+
+static uint16_t
+magic_for_addr(vm_offset_t addr, uint16_t magic_xor)
{
- return (uint32_t)addr ^ MAGIC_XOR;
+ uint16_t magic = addr & 0xFFFF;
+ magic ^= (addr >> 16) & 0xFFFF;
+ magic ^= (addr >> 32) & 0xFFFF;
+ magic ^= (addr >> 48) & 0xFFFF;
+ magic ^= magic_xor;
+ return magic;
}
static struct kasan_alloc_header *
if (frames > 0) {
frames = min(frames + skip, BACKTRACE_MAXFRAMES);
- frames = backtrace(bt, frames);
+ frames = backtrace(bt, frames, NULL);
while (frames > sz && skip > 0) {
bt++;
return frames;
}
+/* addr: user address of allocation */
+static uint16_t
+kasan_alloc_crc(vm_offset_t addr)
+{
+ struct kasan_alloc_header *h = header_for_user_addr(addr);
+ vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
+
+ uint16_t crc_orig = h->crc;
+ h->crc = 0;
+
+ uint16_t crc = 0;
+ crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz);
+ crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz);
+
+ h->crc = crc_orig;
+
+ return crc;
+}
+
+static vm_size_t
+kasan_alloc_retrieve_bt(vm_address_t addr, uintptr_t frames[static BACKTRACE_MAXFRAMES])
+{
+ vm_size_t num_frames = 0;
+ uptr shadow = (uptr)SHADOW_FOR_ADDRESS(addr);
+ uptr max_search = shadow - 4096;
+ vm_address_t alloc_base = 0;
+ size_t fsize = 0;
+
+ /* walk the shadow backwards to find the allocation base */
+ while (shadow >= max_search) {
+ if (*(uint8_t *)shadow == ASAN_HEAP_LEFT_RZ) {
+ alloc_base = ADDRESS_FOR_SHADOW(shadow) + 8;
+ break;
+ }
+ shadow--;
+ }
+
+ if (alloc_base) {
+ struct kasan_alloc_header *header = header_for_user_addr(alloc_base);
+ if (magic_for_addr(alloc_base, LIVE_XOR) == header->magic) {
+ struct kasan_alloc_footer *footer = footer_for_user_addr(alloc_base, &fsize);
+ if ((fsize/sizeof(footer->backtrace[0])) >= header->frames) {
+ num_frames = header->frames;
+ for (size_t i = 0; i < num_frames; i++) {
+ frames[i] = footer->backtrace[i] + vm_kernel_slid_base;
+ }
+ }
+ }
+ }
+
+ return num_frames;
+}
+
/*
* addr: base address of full allocation (including redzones)
* size: total size of allocation (include redzones)
addr += leftrz;
+ if (enabled_checks & TYPE_LEAK) {
+ __nosan_memset((void *)addr, KASAN_UNINITIALIZED_HEAP, req);
+ }
+
/* stash the allocation sizes in the left redzone */
struct kasan_alloc_header *h = header_for_user_addr(addr);
- h->magic = magic_for_addr(addr);
+ h->magic = magic_for_addr(addr, LIVE_XOR);
h->left_rz = leftrz;
h->alloc_size = size;
h->user_size = req;
struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
h->frames = kasan_alloc_bt(f->backtrace, fsize, 2);
+ /* checksum the whole object, minus the user part */
+ h->crc = kasan_alloc_crc(addr);
+
return addr;
}
{
assert(size && addr);
struct kasan_alloc_header *h = header_for_user_addr(addr);
- if (h->magic != magic_for_addr(addr)) {
- /* no point blacklisting here - this is fatal */
- kasan_crash_report(addr, *size, TYPE_FREE);
- }
*size = h->alloc_size;
return addr - h->left_rz;
}
kasan_user_size(vm_offset_t addr)
{
struct kasan_alloc_header *h = header_for_user_addr(addr);
- assert(h->magic == magic_for_addr(addr));
+ assert(h->magic == magic_for_addr(addr, LIVE_XOR));
return h->user_size;
}
struct kasan_alloc_header *h = header_for_user_addr(addr);
/* map heap type to an internal access type */
- unsigned type;
- if (heap_type == KASAN_HEAP_KALLOC) {
- type = TYPE_KFREE;
- } else if (heap_type == KASAN_HEAP_ZALLOC) {
- type = TYPE_ZFREE;
- } else if (heap_type == KASAN_HEAP_FAKESTACK) {
- type = TYPE_FSFREE;
- }
-
- /* check the magic matches */
- if (h->magic != magic_for_addr(addr)) {
- if (kasan_is_blacklisted(type)) {
- return;
- }
- kasan_crash_report(addr, size, type);
+ access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE :
+ heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE :
+ heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0;
+
+ /* check the magic and crc match */
+ if (h->magic != magic_for_addr(addr, LIVE_XOR)) {
+ kasan_violation(addr, size, type, REASON_BAD_METADATA);
+ }
+ if (h->crc != kasan_alloc_crc(addr)) {
+ kasan_violation(addr, size, type, REASON_MOD_OOB);
}
/* check the freed size matches what we recorded at alloc time */
if (h->user_size != size) {
- if (kasan_is_blacklisted(type)) {
- return;
- }
- kasan_crash_report(addr, size, type);
+ kasan_violation(addr, size, type, REASON_INVALID_SIZE);
}
vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
/* Check that the redzones are valid */
- kasan_assert_shadow(addr - h->left_rz, h->left_rz, addr, ASAN_HEAP_LEFT_RZ);
- kasan_assert_shadow(addr + h->user_size, rightrz_sz, addr, ASAN_HEAP_RIGHT_RZ);
+ if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
+ !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
+ kasan_violation(addr, size, type, REASON_BAD_METADATA);
+ }
/* Check the allocated range is not poisoned */
kasan_check_range((void *)addr, size, type);
*/
struct freelist_entry {
- uint32_t magic;
- uint32_t checksum;
+ uint16_t magic;
+ uint16_t crc;
STAILQ_ENTRY(freelist_entry) list;
union {
struct {
vm_size_t size : 28;
vm_size_t user_size : 28;
- vm_size_t frames : 4; /* number of frames in backtrace */
- vm_size_t __unused : 4;
+ vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */
+ vm_size_t __unused : 8 - BACKTRACE_BITS;
};
uint64_t bits;
};
};
_Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
-#define FREELIST_MAGIC_XOR ((uint32_t)0xF23333D)
-static uint32_t
-freelist_magic(vm_offset_t addr)
-{
- return (uint32_t)addr ^ FREELIST_MAGIC_XOR;
-}
-
struct quarantine {
STAILQ_HEAD(freelist_head, freelist_entry) freelist;
unsigned long entries;
{ STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
};
+static uint16_t
+fle_crc(struct freelist_entry *fle)
+{
+ return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits));
+}
+
/*
* addr, sizep: pointer/size of full allocation including redzone
*/
/* create a new freelist entry */
fle = (struct freelist_entry *)addr;
- fle->magic = freelist_magic((vm_offset_t)fle);
+ fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR);
fle->size = size;
fle->user_size = user_size;
fle->frames = 0;
fle->zone = *zone;
}
if (type != KASAN_HEAP_FAKESTACK) {
+ /* don't do expensive things on the fakestack path */
fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
+ fle->crc = fle_crc(fle);
}
boolean_t flg;
size = tofree->size;
addr = (vm_offset_t)tofree;
- if (tofree->magic != freelist_magic(addr)) {
- kasan_crash_report(addr, size, TYPE_FREE);
+
+ /* check the magic and crc match */
+ if (tofree->magic != magic_for_addr(addr, FREE_XOR)) {
+ kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
+ }
+ if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) {
+ kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
}
/* clobber the quarantine header */
- kasan_rz_clobber(addr, 0, sizeof(struct freelist_entry), 0);
+ __nosan_bzero((void *)addr, sizeof(struct freelist_entry));
} else {
/* quarantine is not full - don't really free anything */
vm_size_t user_size, bool quarantine)
{
kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine);
+
+ if (free_yield) {
+ thread_yield_internal(free_yield);
+ }
}
uptr
*shadow = ASAN_ARRAY_COOKIE;
}
-#define ACCESS_CHECK_DECLARE(type, sz, access_type) \
+/*
+ * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
+ * lives relative to the start of the buffer, but it's always the word immediately
+ * before the start of the array data, so for naturally-aligned objects we need to
+ * search at most 2 shadow bytes.
+ */
+void
+kasan_unpoison_cxx_array_cookie(void *ptr)
+{
+ uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)ptr);
+ for (size_t i = 0; i < 2; i++) {
+ if (shadow[i] == ASAN_ARRAY_COOKIE) {
+ shadow[i] = ASAN_VALID;
+ return;
+ } else if (shadow[i] != ASAN_VALID) {
+ /* must have seen the cookie by now */
+ return;
+ }
+ }
+}
+
+#define ACCESS_CHECK_DECLARE(type, sz, access) \
void __asan_##type##sz(uptr addr) { \
- kasan_check_range((const void *)addr, sz, access_type); \
+ kasan_check_range((const void *)addr, sz, access); \
} \
- void __asan_exp_##type##sz(uptr, int32_t); \
- void __asan_exp_##type##sz(uptr __unused addr, int32_t __unused e) { ABI_UNSUPPORTED; }
+ void OS_NORETURN UNSUPPORTED_API(__asan_exp_##type##sz, uptr a, int32_t b);
ACCESS_CHECK_DECLARE(load, 1, TYPE_LOAD);
ACCESS_CHECK_DECLARE(load, 2, TYPE_LOAD);
kasan_check_range((const void *)addr, sz, TYPE_STORE);
}
-void __asan_exp_loadN(uptr, size_t, int32_t);
-void __asan_exp_storeN(uptr, size_t, int32_t);
-void __asan_exp_loadN(uptr __unused addr, size_t __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
-void __asan_exp_storeN(uptr __unused addr, size_t __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
-
-void __asan_report_exp_load_n(uptr, unsigned long, int32_t);
-void __asan_report_exp_store_n(uptr, unsigned long, int32_t);
-void __asan_report_exp_load_n(uptr __unused p, unsigned long __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
-void __asan_report_exp_store_n(uptr __unused p, unsigned long __unused sz, int32_t __unused e) { ABI_UNSUPPORTED; }
-
static void
kasan_set_shadow(uptr addr, size_t sz, uint8_t val)
{
SET_SHADOW_DECLARE(f5)
SET_SHADOW_DECLARE(f8)
+
/*
- * XXX: implement these
+ * Call 'cb' for each contiguous range of the shadow map. This could be more
+ * efficient by walking the page table directly.
*/
-
-void __asan_alloca_poison(uptr addr, uptr size)
-{
- (void)addr;
- (void)size;
-}
-
-void __asan_allocas_unpoison(uptr top, uptr bottom)
-{
- (void)top;
- (void)bottom;
-}
-
-void
-__sanitizer_ptr_sub(uptr a, uptr b)
-{
- (void)a;
- (void)b;
-}
-
-void
-__sanitizer_ptr_cmp(uptr a, uptr b)
+int
+kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx)
{
- (void)a;
- (void)b;
-}
-
-void
-__asan_poison_stack_memory(uptr addr, size_t size)
-{
- (void)addr;
- (void)size;
-}
+ uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS);
+ uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS);
+ shadow_base = vm_map_trunc_page(shadow_base, HW_PAGE_MASK);
+ shadow_top = vm_map_round_page(shadow_top, HW_PAGE_MASK);
+
+ uintptr_t start = 0, end = 0;
+
+ for (uintptr_t addr = shadow_base; addr < shadow_top; addr += HW_PAGE_SIZE) {
+ if (kasan_is_shadow_mapped(addr)) {
+ if (start == 0) {
+ start = addr;
+ }
+ end = addr + HW_PAGE_SIZE;
+ } else if (start && end) {
+ cb(start, end, ctx);
+ start = end = 0;
+ }
+ }
-void
-__asan_unpoison_stack_memory(uptr addr, size_t size)
-{
- (void)addr;
- (void)size;
-}
+ if (start && end) {
+ cb(start, end, ctx);
+ }
-void
-__sanitizer_annotate_contiguous_container(const void *beg,
- const void *end,
- const void *old_mid,
- const void *new_mid)
-{
- (void)beg;
- (void)end;
- (void)old_mid;
- (void)new_mid;
+ return 0;
}
/*
+ * XXX: implement these
*/
-void
-__asan_init(void)
-{
-}
-
-#define VERSION_DECLARE(v) \
- void __asan_version_mismatch_check_##v(void); \
- void __asan_version_mismatch_check_##v(void) {}
-
-VERSION_DECLARE(v8)
-VERSION_DECLARE(apple_802)
-VERSION_DECLARE(apple_900)
-
-void
-__asan_register_globals(uptr __unused a, uptr __unused b)
-{
- ABI_UNSUPPORTED;
-}
-
-void
-__asan_unregister_globals(uptr __unused a, uptr __unused b)
-{
- ABI_UNSUPPORTED;
-}
-
-void
-__asan_register_image_globals(uptr __unused ptr)
-{
-}
-
-void
-__asan_unregister_image_globals(uptr __unused ptr)
-{
-}
-
-void
-__asan_init_v5(void)
-{
-}
-
-void
-__asan_before_dynamic_init(uptr __unused arg)
-{
-}
+UNUSED_ABI(__asan_alloca_poison, uptr addr, uptr size);
+UNUSED_ABI(__asan_allocas_unpoison, uptr top, uptr bottom);
+UNUSED_ABI(__sanitizer_ptr_sub, uptr a, uptr b);
+UNUSED_ABI(__sanitizer_ptr_cmp, uptr a, uptr b);
+UNUSED_ABI(__sanitizer_annotate_contiguous_container, const void *a, const void *b, const void *c, const void *d);
+UNUSED_ABI(__asan_poison_stack_memory, uptr addr, size_t size);
+UNUSED_ABI(__asan_unpoison_stack_memory, uptr a, uptr b);
-void
-__asan_after_dynamic_init(void)
-{
-}
+/*
+ * Miscellaneous unimplemented asan ABI
+ */
+UNUSED_ABI(__asan_init, void);
+UNUSED_ABI(__asan_register_image_globals, uptr a);
+UNUSED_ABI(__asan_unregister_image_globals, uptr a);
+UNUSED_ABI(__asan_before_dynamic_init, uptr a);
+UNUSED_ABI(__asan_after_dynamic_init, void);
+UNUSED_ABI(__asan_version_mismatch_check_v8, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_802, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_900, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_902, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100, void);
+
+void OS_NORETURN UNSUPPORTED_API(__asan_init_v5, void);
+void OS_NORETURN UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b);
+void OS_NORETURN UNSUPPORTED_API(__asan_unregister_globals, uptr a, uptr b);
+void OS_NORETURN UNSUPPORTED_API(__asan_register_elf_globals, uptr a, uptr b, uptr c);
+void OS_NORETURN UNSUPPORTED_API(__asan_unregister_elf_globals, uptr a, uptr b, uptr c);
+
+void OS_NORETURN UNSUPPORTED_API(__asan_exp_loadN, uptr addr, size_t sz, int32_t e);
+void OS_NORETURN UNSUPPORTED_API(__asan_exp_storeN, uptr addr, size_t sz, int32_t e);
+void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_load_n, uptr addr, unsigned long b, int32_t c);
+void OS_NORETURN UNSUPPORTED_API(__asan_report_exp_store_n, uptr addr, unsigned long b, int32_t c);
/*
*
return err;
}
+static int
+sysctl_fakestack_enable(__unused struct sysctl_oid *oidp, __unused void *arg1, int __unused arg2, struct sysctl_req *req)
+{
+ int ch, err, val;
+
+ err = sysctl_io_number(req, fakestack_enabled, sizeof(fakestack_enabled), &val, &ch);
+ if (err == 0 && ch) {
+ fakestack_enabled = !!val;
+ __asan_option_detect_stack_use_after_return = !!val;
+ }
+
+ return err;
+}
+
SYSCTL_DECL(kasan);
SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, "");
-SYSCTL_INT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
-SYSCTL_INT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
-SYSCTL_LONG(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, "");
-SYSCTL_LONG(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, "");
-SYSCTL_LONG(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, "");
-
-SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
-SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, "");
-SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, "");
-SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, fakestack, CTLFLAG_RD, NULL, FAKESTACK, "");
-SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, "");
-SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, memintrinsics, CTLFLAG_RD, NULL, MEMINTRINSICS, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &enabled_checks, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, report_ignored, CTLFLAG_RW, &report_ignored, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, free_yield_ms, CTLFLAG_RW, &free_yield, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, leak_threshold, CTLFLAG_RW, &leak_threshold, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, leak_fatal_threshold, CTLFLAG_RW, &leak_fatal_threshold, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, "");
+SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, "");
+SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
+SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, "");
+SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, "");
+SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, "");
+
+SYSCTL_PROC(_kern_kasan, OID_AUTO, fakestack,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ 0, 0, sysctl_fakestack_enable, "I", "");
SYSCTL_PROC(_kern_kasan, OID_AUTO, test,
CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,