#include <kasan_internal.h>
int __asan_option_detect_stack_use_after_return = 0;
+int fakestack_enabled = 0;
#define FAKESTACK_HEADER_SZ 64
#define FAKESTACK_NUM_SZCLASS 7
-#define FAKESTACK_FREED 0 /* forced by clang */
+#define FAKESTACK_UNUSED 0 /* waiting to be collected at next gc - forced by clang */
#define FAKESTACK_ALLOCATED 1
+#define FAKESTACK_FREED 2
#if FAKESTACK
static const unsigned long __unused fakestack_max = 1 << 16;
/*
- * Mark the current thread as being in a fakestack operation, to avoid reentrancy
- * issues. If set, disable fakestack allocation.
+ * Enter a fakestack critical section in a reentrant-safe fashion. Returns true on
+ * success with the kasan lock held.
*/
-static boolean_t
-thread_enter_fakestack(void)
+static bool
+thread_enter_fakestack(boolean_t *flags)
{
- thread_t thread = current_thread();
- if (thread) {
- return OSIncrementAtomic(&kasan_get_thread_data(current_thread())->in_fakestack);
- } else {
- return 0;
+ thread_t cur = current_thread();
+ if (cur && kasan_lock_held(cur)) {
+ /* current thread is already in kasan - fail */
+ return false;
}
+ kasan_lock(flags);
+ return true;
}
-static boolean_t
-thread_exit_fakestack(void)
+static volatile long suspend_count;
+static const long suspend_threshold = 20;
+
+void
+kasan_fakestack_suspend(void)
{
- thread_t thread = current_thread();
- if (thread) {
- return OSDecrementAtomic(&kasan_get_thread_data(current_thread())->in_fakestack);
- } else {
- return 0;
+ if (OSIncrementAtomicLong(&suspend_count) == suspend_threshold) {
+ __asan_option_detect_stack_use_after_return = 0;
+ }
+}
+
+void
+kasan_fakestack_resume(void)
+{
+ long orig = OSDecrementAtomicLong(&suspend_count);
+ assert(orig >= 0);
+
+ if (fakestack_enabled && orig == suspend_threshold) {
+ __asan_option_detect_stack_use_after_return = 1;
}
}
}
/* free all unused fakestack entries */
-static void NOINLINE
+void
kasan_fakestack_gc(thread_t thread)
{
struct fakestack_header *cur, *tmp;
LIST_HEAD(, fakestack_header) tofree = LIST_HEAD_INITIALIZER(tofree);
- /* move all the freed elements off the main list */
+ boolean_t flags;
+ if (!thread_enter_fakestack(&flags)) {
+ panic("expected success entering fakestack\n");
+ }
+
+ /* move the unused objects off the per-thread list... */
struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head;
LIST_FOREACH_SAFE(cur, head, list, tmp) {
- if (cur->flag == FAKESTACK_FREED) {
+ if (cur->flag == FAKESTACK_UNUSED) {
LIST_REMOVE(cur, list);
LIST_INSERT_HEAD(&tofree, cur, list);
+ cur->flag = FAKESTACK_FREED;
}
}
+ kasan_unlock(flags);
+
/* ... then actually free them */
LIST_FOREACH_SAFE(cur, &tofree, list, tmp) {
+ LIST_REMOVE(cur, list);
+
zone_t zone = fakestack_zones[cur->sz_class];
size_t sz = (fakestack_min << cur->sz_class) + FAKESTACK_HEADER_SZ;
- LIST_REMOVE(cur, list);
void *ptr = (void *)cur;
- kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 1, FAKESTACK_QUARANTINE);
+ kasan_free_internal(&ptr, &sz, KASAN_HEAP_FAKESTACK, &zone, cur->realsz, 0, FAKESTACK_QUARANTINE);
if (ptr) {
zfree(zone, ptr);
}
return 0;
}
- boolean_t flags;
uptr ret = 0;
size_t sz = fakestack_min << sz_class;
assert(realsz <= sz);
assert(sz <= fakestack_max);
zone_t zone = fakestack_zones[sz_class];
- if (thread_enter_fakestack()) {
+ boolean_t flags;
+ if (!thread_enter_fakestack(&flags)) {
return 0;
}
- kasan_lock(&flags);
- kasan_fakestack_gc(current_thread()); /* XXX: optimal? */
-
ret = (uptr)zget(zone);
- thread_exit_fakestack();
-
if (ret) {
size_t leftrz = 32 + FAKESTACK_HEADER_SZ;
size_t validsz = realsz - 32 - 16; /* remove redzones */
}
assert(realsz <= (fakestack_min << sz_class));
- assert(__asan_option_detect_stack_use_after_return);
vm_size_t sz = fakestack_min << sz_class;
zone_t zone = fakestack_zones[sz_class];
kasan_free_internal((void **)&dst, &sz, KASAN_HEAP_FAKESTACK, &zone, realsz, 1, FAKESTACK_QUARANTINE);
if (dst) {
- zfree(zone, (void *)dst);
+ zfree(zone, dst);
}
kasan_unlock(flags);
}
void NOINLINE
-kasan_unpoison_fakestack(thread_t thread)
+kasan_fakestack_drop(thread_t thread)
{
- if (!__asan_option_detect_stack_use_after_return) {
- return;
- }
-
boolean_t flags;
- kasan_lock(&flags);
-
- thread_enter_fakestack();
+ if (!thread_enter_fakestack(&flags)) {
+ panic("expected success entering fakestack\n");
+ }
struct fakestack_header_list *head = &kasan_get_thread_data(thread)->fakestack_head;
struct fakestack_header *cur;
LIST_FOREACH(cur, head, list) {
if (cur->flag == FAKESTACK_ALLOCATED) {
- cur->flag = FAKESTACK_FREED;
+ cur->flag = FAKESTACK_UNUSED;
}
}
- kasan_fakestack_gc(thread);
- thread_exit_fakestack();
kasan_unlock(flags);
}
assert(z);
zone_change(z, Z_NOCALLOUT, TRUE);
zone_change(z, Z_EXHAUST, TRUE);
- zone_change(z, Z_EXPAND, FALSE);
+ zone_change(z, Z_EXPAND, FALSE);
zone_change(z, Z_COLLECT, FALSE);
zone_change(z, Z_KASAN_QUARANTINE, FALSE);
zfill(z, maxsz / sz);
}
/* globally enable */
- __asan_option_detect_stack_use_after_return = 1;
+ if (fakestack_enabled) {
+ __asan_option_detect_stack_use_after_return = 1;
+ }
}
#else /* FAKESTACK */
#endif
-void kasan_init_thread(struct kasan_thread_data *td)
+void
+kasan_init_thread(struct kasan_thread_data *td)
{
- td->in_fakestack = 0;
LIST_INIT(&td->fakestack_head);
}