X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/cb3231590a3c94ab4375e2228bd5e86b0cf1ad7e..c3c9b80d004dbbfdf763edeb97968c6997e3b45b:/tests/backtracing.c diff --git a/tests/backtracing.c b/tests/backtracing.c index f0af5447d..a56dc9dce 100644 --- a/tests/backtracing.c +++ b/tests/backtracing.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. */ +// Copyright (c) 2016-2020 Apple Computer, Inc. All rights reserved. #include #include @@ -12,47 +12,65 @@ T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true)); #define USER_FRAMES (12) - -#define NON_RECURSE_FRAMES (4) +#define MAX_SYSCALL_SETUP_FRAMES (2) +#define NON_RECURSE_FRAMES (2) static const char *user_bt[USER_FRAMES] = { - NULL, "backtrace_thread", "recurse_a", "recurse_b", "recurse_a", "recurse_b", "recurse_a", "recurse_b", "recurse_a", "recurse_b", - "expect_stack", NULL + "recurse_a", "recurse_b", "expect_callstack", +}; + +struct callstack_exp { + bool in_syscall_setup; + unsigned int syscall_frames; + const char **callstack; + size_t callstack_len; + unsigned int nchecked; }; static void -expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol, - unsigned long addr, unsigned int bt_idx, unsigned int max_frames) +expect_frame(struct callstack_exp *cs, CSSymbolRef symbol, + unsigned long addr, unsigned int bt_idx) { - const char *name; - unsigned int frame_idx = max_frames - bt_idx - 1; - if (CSIsNull(symbol)) { - T_FAIL("invalid symbol for address %#lx at frame %d", addr, - frame_idx); - return; - } - - if (bt[frame_idx] == NULL) { - T_LOG("frame %2u: skipping system frame %s", frame_idx, - CSSymbolGetName(symbol)); + if (!cs->in_syscall_setup) { + T_FAIL("invalid symbol for address %#lx at frame %d", addr, + bt_idx); + } return; } - if (frame_idx >= bt_len) { - T_FAIL("unexpected frame '%s' (%#lx) at index %u", - CSSymbolGetName(symbol), addr, frame_idx); - return; + const char *name = CSSymbolGetName(symbol); + if (name) { + if (cs->in_syscall_setup) { + if (strcmp(name, cs->callstack[cs->callstack_len - 1]) == 0) { + cs->in_syscall_setup = false; + cs->syscall_frames = bt_idx; + T_LOG("found start of controlled stack at frame %u, expected " + "index %zu", cs->syscall_frames, cs->callstack_len - 1); + } else { + T_LOG("found syscall setup symbol %s at frame %u", name, + bt_idx); + } + } + if (!cs->in_syscall_setup) { + if (cs->nchecked >= cs->callstack_len) { + T_LOG("frame %2u: skipping system frame %s", bt_idx, name); + } else { + size_t frame_idx = cs->callstack_len - cs->nchecked - 1; + T_EXPECT_EQ_STR(name, cs->callstack[frame_idx], + "frame %2zu: saw '%s', expected '%s'", + frame_idx, name, cs->callstack[frame_idx]); + } + cs->nchecked++; + } + } else { + if (!cs->in_syscall_setup) { + T_ASSERT_NOTNULL(name, NULL, "symbol should not be NULL"); + } } - - name = CSSymbolGetName(symbol); - T_QUIET; T_ASSERT_NOTNULL(name, NULL); - T_EXPECT_EQ_STR(name, bt[frame_idx], - "frame %2u: saw '%s', expected '%s'", - frame_idx, name, bt[frame_idx]); } static bool @@ -77,14 +95,21 @@ is_kernel_64_bit(void) return k64; } +// Use an extra, non-inlineable function so that any frames after expect_stack +// can be safely ignored. This insulates the test from changes in how syscalls +// are called by Libc and the kernel. +static int __attribute__((noinline, not_tail_called)) +backtrace_current_thread_wrapper(uint64_t *bt, size_t *bt_filled) +{ + int ret = sysctlbyname("kern.backtrace.user", bt, bt_filled, NULL, 0); + getpid(); // Really prevent tail calls. + return ret; +} + static void __attribute__((noinline, not_tail_called)) -expect_stack(void) +expect_callstack(void) { - uint64_t bt[USER_FRAMES] = { 0 }; - unsigned int bt_len = USER_FRAMES; - int err; - size_t bt_filled; - bool k64; + uint64_t bt[USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES] = { 0 }; static CSSymbolicatorRef user_symb; static dispatch_once_t expect_stack_once; @@ -94,38 +119,49 @@ expect_stack(void) T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL); }); - k64 = is_kernel_64_bit(); - bt_filled = USER_FRAMES; - err = sysctlbyname("kern.backtrace.user", bt, &bt_filled, NULL, 0); - if (err == ENOENT) { + size_t bt_filled = USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES; + int ret = backtrace_current_thread_wrapper(bt, &bt_filled); + if (ret == -1 && errno == ENOENT) { T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT"); } - T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(\"kern.backtrace.user\")"); - - bt_len = (unsigned int)bt_filled; - T_EXPECT_EQ(bt_len, (unsigned int)USER_FRAMES, - "%u frames should be present in backtrace", (unsigned int)USER_FRAMES); - + T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"kern.backtrace.user\")"); + T_LOG("kernel returned %zu frame backtrace", bt_filled); + + unsigned int bt_len = (unsigned int)bt_filled; + T_EXPECT_GE(bt_len, (unsigned int)USER_FRAMES, + "at least %u frames should be present in backtrace", USER_FRAMES); + T_EXPECT_LE(bt_len, (unsigned int)USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES, + "at most %u frames should be present in backtrace", + USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES); + + struct callstack_exp callstack = { + .in_syscall_setup = true, + .syscall_frames = 0, + .callstack = user_bt, + .callstack_len = USER_FRAMES, + .nchecked = 0, + }; for (unsigned int i = 0; i < bt_len; i++) { uintptr_t addr; #if !defined(__LP64__) - /* - * Backtrace frames come out as kernel words; convert them back to user - * uintptr_t for 32-bit processes. - */ - if (k64) { + // Backtrace frames come out as kernel words; convert them back to user + // uintptr_t for 32-bit processes. + if (is_kernel_64_bit()) { addr = (uintptr_t)(bt[i]); } else { addr = (uintptr_t)(((uint32_t *)bt)[i]); } -#else /* defined(__LP32__) */ +#else // defined(__LP32__) addr = (uintptr_t)bt[i]; -#endif /* defined(__LP32__) */ +#endif // defined(__LP32__) CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime( user_symb, addr, kCSNow); - expect_frame(user_bt, USER_FRAMES, symbol, addr, i, bt_len); + expect_frame(&callstack, symbol, addr, i); } + + T_EXPECT_GE(callstack.nchecked, USER_FRAMES, + "checked enough frames for correct symbols"); } static int __attribute__((noinline, not_tail_called)) @@ -137,8 +173,8 @@ static int __attribute__((noinline, not_tail_called)) recurse_a(unsigned int frames) { if (frames == 1) { - expect_stack(); - getpid(); + expect_callstack(); + getpid(); // Really prevent tail calls. return 0; } @@ -149,8 +185,8 @@ static int __attribute__((noinline, not_tail_called)) recurse_b(unsigned int frames) { if (frames == 1) { - expect_stack(); - getpid(); + expect_callstack(); + getpid(); // Really prevent tail calls. return 0; } @@ -163,11 +199,9 @@ backtrace_thread(void *arg) #pragma unused(arg) unsigned int calls; - /* - * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname - * - * Always make one less call for this frame (backtrace_thread). - */ + // backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname + // + // Always make one less call for this frame (backtrace_thread). calls = USER_FRAMES - NON_RECURSE_FRAMES; T_LOG("backtrace thread calling into %d frames (already at %d frames)", @@ -181,6 +215,8 @@ T_DECL(backtrace_user, "test that the kernel can backtrace user stacks", { pthread_t thread; + // Run the test from a different thread to insulate it from libdarwintest + // setup. T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread, NULL), "create additional thread to backtrace"); @@ -198,26 +234,18 @@ T_DECL(backtrace_user_bounds, void *guard_page = NULL; void *bt_start = NULL; - /* - * The backtrace addresses come back as kernel words. - */ + // The backtrace addresses come back as kernel words. size_t kword_size = is_kernel_64_bit() ? 8 : 4; - /* - * Get an idea of how many frames to expect. - */ - error = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, - 0); - if (error == ENOENT) { + // Get an idea of how many frames to expect. + int ret = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, 0); + if (ret == -1 && errno == ENOENT) { T_SKIP("release kernel: kern.backtrace.user missing"); } T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")"); - /* - * Allocate two pages -- a first one that's valid and a second that - * will be non-writeable to catch a copyout that's too large. - */ - + // Allocate two pages -- a first one that's valid and a second that + // will be non-writeable to catch a copyout that's too large. bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); T_WITH_ERRNO; @@ -227,23 +255,16 @@ T_DECL(backtrace_user_bounds, error = mprotect(guard_page, vm_page_size, PROT_READ); T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page"); - /* - * Ensure the pages are set up as expected. - */ - + // Ensure the pages are set up as expected. kr = vm_write(mach_task_self(), (vm_address_t)bt_page, (vm_offset_t)&(int){ 12345 }, sizeof(int)); T_ASSERT_MACH_SUCCESS(kr, "should succeed in writing to backtrace page"); - kr = vm_write(mach_task_self(), (vm_address_t)guard_page, (vm_offset_t)&(int){ 12345 }, sizeof(int)); T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page"); - /* - * Ask the kernel to write the backtrace just before the guard page. - */ - + // Ask the kernel to write the backtrace just before the guard page. bt_start = (char *)guard_page - (kword_size * bt_filled); bt_filled_after = bt_filled; @@ -255,10 +276,7 @@ T_DECL(backtrace_user_bounds, "both calls to backtrace should have filled in the same number of " "frames"); - /* - * Expect the kernel to fault when writing too far. - */ - + // Expect the kernel to fault when writing too far. bt_start = (char *)bt_start + 1; bt_filled_after = bt_filled; error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,