X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/a39ff7e25e19b3a8c3020042a3872ca9ec9659f1..4ba76501152d51ccb5647018f3192c6096367d48:/san/kasan-fakestack.c?ds=sidebyside diff --git a/san/kasan-fakestack.c b/san/kasan-fakestack.c index b023ded1c..add9941a9 100644 --- a/san/kasan-fakestack.c +++ b/san/kasan-fakestack.c @@ -47,8 +47,9 @@ int fakestack_enabled = 0; #define FAKESTACK_HEADER_SZ 64 #define FAKESTACK_NUM_SZCLASS 7 -#define FAKESTACK_FREED 0 /* forced by clang */ +#define FAKESTACK_UNUSED 0 /* waiting to be collected at next gc - forced by clang */ #define FAKESTACK_ALLOCATED 1 +#define FAKESTACK_FREED 2 #if FAKESTACK @@ -120,29 +121,38 @@ ptr_is_on_stack(uptr ptr) } /* free all unused fakestack entries */ -static void NOINLINE +void kasan_fakestack_gc(thread_t thread) { struct fakestack_header *cur, *tmp; LIST_HEAD(, fakestack_header) tofree = LIST_HEAD_INITIALIZER(tofree); - /* move all the freed elements off the main list */ + boolean_t flags; + if (!thread_enter_fakestack(&flags)) { + panic("expected success entering fakestack\n"); + } + + /* move the unused objects off the per-thread list... */ struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head; LIST_FOREACH_SAFE(cur, head, list, tmp) { - if (cur->flag == FAKESTACK_FREED) { + if (cur->flag == FAKESTACK_UNUSED) { LIST_REMOVE(cur, list); LIST_INSERT_HEAD(&tofree, cur, list); + cur->flag = FAKESTACK_FREED; } } + kasan_unlock(flags); + /* ... then actually free them */ LIST_FOREACH_SAFE(cur, &tofree, list, tmp) { + LIST_REMOVE(cur, list); + zone_t zone = fakestack_zones[cur->sz_class]; size_t sz = (fakestack_min << cur->sz_class) + FAKESTACK_HEADER_SZ; - LIST_REMOVE(cur, list); void *ptr = (void *)cur; - kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 1, FAKESTACK_QUARANTINE); + kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 0, FAKESTACK_QUARANTINE); if (ptr) { zfree(zone, ptr); } @@ -179,8 +189,6 @@ kasan_fakestack_alloc(int sz_class, size_t realsz) return 0; } - kasan_fakestack_gc(current_thread()); /* XXX: optimal? */ - ret = (uptr)zget(zone); if (ret) { @@ -234,14 +242,14 @@ kasan_fakestack_free(int sz_class, uptr dst, size_t realsz) kasan_free_internal((void **)&dst, &sz, KASAN_HEAP_FAKESTACK, &zone, realsz, 1, FAKESTACK_QUARANTINE); if (dst) { - zfree(zone, (void *)dst); + zfree(zone, dst); } kasan_unlock(flags); } void NOINLINE -kasan_unpoison_fakestack(thread_t thread) +kasan_fakestack_drop(thread_t thread) { boolean_t flags; if (!thread_enter_fakestack(&flags)) { @@ -252,11 +260,10 @@ kasan_unpoison_fakestack(thread_t thread) struct fakestack_header *cur; LIST_FOREACH(cur, head, list) { if (cur->flag == FAKESTACK_ALLOCATED) { - cur->flag = FAKESTACK_FREED; + cur->flag = FAKESTACK_UNUSED; } } - kasan_fakestack_gc(thread); kasan_unlock(flags); } @@ -279,7 +286,7 @@ kasan_init_fakestack(void) assert(z); zone_change(z, Z_NOCALLOUT, TRUE); zone_change(z, Z_EXHAUST, TRUE); - zone_change(z, Z_EXPAND, FALSE); + zone_change(z, Z_EXPAND, FALSE); zone_change(z, Z_COLLECT, FALSE); zone_change(z, Z_KASAN_QUARANTINE, FALSE); zfill(z, maxsz / sz); @@ -322,7 +329,8 @@ kasan_fakestack_free(int __unused sz_class, uptr __unused dst, size_t __unused r #endif -void kasan_init_thread(struct kasan_thread_data *td) +void +kasan_init_thread(struct kasan_thread_data *td) { LIST_INIT(&td->fakestack_head); }