]> git.saurik.com Git - apple/xnu.git/blobdiff - san/kasan-fakestack.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / san / kasan-fakestack.c
index 12869e512298d2ecf2c7cc0818e8037e31b6b57f..add9941a9cb431e3a63fb7d0d95dd807a1f7cb03 100644 (file)
 #include <kasan_internal.h>
 
 int __asan_option_detect_stack_use_after_return = 0;
+int fakestack_enabled = 0;
 
 #define FAKESTACK_HEADER_SZ 64
 #define FAKESTACK_NUM_SZCLASS 7
 
-#define FAKESTACK_FREED     0 /* forced by clang */
+#define FAKESTACK_UNUSED    0 /* waiting to be collected at next gc - forced by clang */
 #define FAKESTACK_ALLOCATED 1
+#define FAKESTACK_FREED     2
 
 #if FAKESTACK
 
@@ -69,28 +71,40 @@ static const unsigned long fakestack_min = 1 << 6;
 static const unsigned long __unused fakestack_max = 1 << 16;
 
 /*
- * Mark the current thread as being in a fakestack operation, to avoid reentrancy
- * issues. If set, disable fakestack allocation.
+ * Enter a fakestack critical section in a reentrant-safe fashion. Returns true on
+ * success with the kasan lock held.
  */
-static boolean_t
-thread_enter_fakestack(void)
+static bool
+thread_enter_fakestack(boolean_t *flags)
 {
-       thread_t thread = current_thread();
-       if (thread) {
-               return OSIncrementAtomic(&kasan_get_thread_data(current_thread())->in_fakestack);
-       } else {
-               return 0;
+       thread_t cur = current_thread();
+       if (cur && kasan_lock_held(cur)) {
+               /* current thread is already in kasan - fail */
+               return false;
        }
+       kasan_lock(flags);
+       return true;
 }
 
-static boolean_t
-thread_exit_fakestack(void)
+static volatile long suspend_count;
+static const long suspend_threshold = 20;
+
+void
+kasan_fakestack_suspend(void)
 {
-       thread_t thread = current_thread();
-       if (thread) {
-               return OSDecrementAtomic(&kasan_get_thread_data(current_thread())->in_fakestack);
-       } else {
-               return 0;
+       if (OSIncrementAtomicLong(&suspend_count) == suspend_threshold) {
+               __asan_option_detect_stack_use_after_return = 0;
+       }
+}
+
+void
+kasan_fakestack_resume(void)
+{
+       long orig = OSDecrementAtomicLong(&suspend_count);
+       assert(orig >= 0);
+
+       if (fakestack_enabled && orig == suspend_threshold) {
+               __asan_option_detect_stack_use_after_return = 1;
        }
 }
 
@@ -107,29 +121,38 @@ ptr_is_on_stack(uptr ptr)
 }
 
 /* free all unused fakestack entries */
-static void NOINLINE
+void
 kasan_fakestack_gc(thread_t thread)
 {
        struct fakestack_header *cur, *tmp;
        LIST_HEAD(, fakestack_header) tofree = LIST_HEAD_INITIALIZER(tofree);
 
-       /* move all the freed elements off the main list */
+       boolean_t flags;
+       if (!thread_enter_fakestack(&flags)) {
+               panic("expected success entering fakestack\n");
+       }
+
+       /* move the unused objects off the per-thread list... */
        struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head;
        LIST_FOREACH_SAFE(cur, head, list, tmp) {
-               if (cur->flag == FAKESTACK_FREED) {
+               if (cur->flag == FAKESTACK_UNUSED) {
                        LIST_REMOVE(cur, list);
                        LIST_INSERT_HEAD(&tofree, cur, list);
+                       cur->flag = FAKESTACK_FREED;
                }
        }
 
+       kasan_unlock(flags);
+
        /* ... then actually free them */
        LIST_FOREACH_SAFE(cur, &tofree, list, tmp) {
+               LIST_REMOVE(cur, list);
+
                zone_t zone = fakestack_zones[cur->sz_class];
                size_t sz = (fakestack_min << cur->sz_class) + FAKESTACK_HEADER_SZ;
-               LIST_REMOVE(cur, list);
 
                void *ptr = (void *)cur;
-               kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 1, FAKESTACK_QUARANTINE);
+               kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 0, FAKESTACK_QUARANTINE);
                if (ptr) {
                        zfree(zone, ptr);
                }
@@ -155,24 +178,19 @@ kasan_fakestack_alloc(int sz_class, size_t realsz)
                return 0;
        }
 
-       boolean_t flags;
        uptr ret = 0;
        size_t sz = fakestack_min << sz_class;
        assert(realsz <= sz);
        assert(sz <= fakestack_max);
        zone_t zone = fakestack_zones[sz_class];
 
-       if (thread_enter_fakestack()) {
+       boolean_t flags;
+       if (!thread_enter_fakestack(&flags)) {
                return 0;
        }
 
-       kasan_lock(&flags);
-       kasan_fakestack_gc(current_thread()); /* XXX: optimal? */
-
        ret = (uptr)zget(zone);
 
-       thread_exit_fakestack();
-
        if (ret) {
                size_t leftrz = 32 + FAKESTACK_HEADER_SZ;
                size_t validsz = realsz - 32 - 16; /* remove redzones */
@@ -204,7 +222,6 @@ kasan_fakestack_free(int sz_class, uptr dst, size_t realsz)
        }
 
        assert(realsz <= (fakestack_min << sz_class));
-       assert(__asan_option_detect_stack_use_after_return);
 
        vm_size_t sz = fakestack_min << sz_class;
        zone_t zone = fakestack_zones[sz_class];
@@ -225,34 +242,28 @@ kasan_fakestack_free(int sz_class, uptr dst, size_t realsz)
 
        kasan_free_internal((void **)&dst, &sz, KASAN_HEAP_FAKESTACK, &zone, realsz, 1, FAKESTACK_QUARANTINE);
        if (dst) {
-               zfree(zone, (void *)dst);
+               zfree(zone, dst);
        }
 
        kasan_unlock(flags);
 }
 
 void NOINLINE
-kasan_unpoison_fakestack(thread_t thread)
+kasan_fakestack_drop(thread_t thread)
 {
-       if (!__asan_option_detect_stack_use_after_return) {
-               return;
-       }
-
        boolean_t flags;
-       kasan_lock(&flags);
-
-       thread_enter_fakestack();
+       if (!thread_enter_fakestack(&flags)) {
+               panic("expected success entering fakestack\n");
+       }
 
        struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head;
        struct fakestack_header *cur;
        LIST_FOREACH(cur, head, list) {
                if (cur->flag == FAKESTACK_ALLOCATED) {
-                       cur->flag = FAKESTACK_FREED;
+                       cur->flag = FAKESTACK_UNUSED;
                }
        }
 
-       kasan_fakestack_gc(thread);
-       thread_exit_fakestack();
        kasan_unlock(flags);
 }
 
@@ -275,7 +286,7 @@ kasan_init_fakestack(void)
                assert(z);
                zone_change(z, Z_NOCALLOUT, TRUE);
                zone_change(z, Z_EXHAUST, TRUE);
-               zone_change(z, Z_EXPAND,  FALSE);
+               zone_change(z, Z_EXPAND, FALSE);
                zone_change(z, Z_COLLECT, FALSE);
                zone_change(z, Z_KASAN_QUARANTINE, FALSE);
                zfill(z, maxsz / sz);
@@ -283,7 +294,9 @@ kasan_init_fakestack(void)
        }
 
        /* globally enable */
-       __asan_option_detect_stack_use_after_return = 1;
+       if (fakestack_enabled) {
+               __asan_option_detect_stack_use_after_return = 1;
+       }
 }
 
 #else /* FAKESTACK */
@@ -316,9 +329,9 @@ kasan_fakestack_free(int __unused sz_class, uptr __unused dst, size_t __unused r
 
 #endif
 
-void kasan_init_thread(struct kasan_thread_data *td)
+void
+kasan_init_thread(struct kasan_thread_data *td)
 {
-       td->in_fakestack = 0;
        LIST_INIT(&td->fakestack_head);
 }