]> git.saurik.com Git - apple/xnu.git/blob - tests/backtracing.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / tests / backtracing.c
1 // Copyright (c) 2016-2020 Apple Computer, Inc. All rights reserved.
2
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
6 #include <execinfo.h>
7 #include <pthread.h>
8 #include <mach/mach.h>
9 #include <sys/mman.h>
10 #include <sys/sysctl.h>
11
12 T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
13
14 #define USER_FRAMES (12)
15 #define MAX_SYSCALL_SETUP_FRAMES (2)
16 #define NON_RECURSE_FRAMES (2)
17
18 static const char *user_bt[USER_FRAMES] = {
19 "backtrace_thread",
20 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
21 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
22 "recurse_a", "recurse_b", "expect_callstack",
23 };
24
25 struct callstack_exp {
26 bool in_syscall_setup;
27 unsigned int syscall_frames;
28 const char **callstack;
29 size_t callstack_len;
30 unsigned int nchecked;
31 };
32
33 static void
34 expect_frame(struct callstack_exp *cs, CSSymbolRef symbol,
35 unsigned long addr, unsigned int bt_idx)
36 {
37 if (CSIsNull(symbol)) {
38 if (!cs->in_syscall_setup) {
39 T_FAIL("invalid symbol for address %#lx at frame %d", addr,
40 bt_idx);
41 }
42 return;
43 }
44
45 const char *name = CSSymbolGetName(symbol);
46 if (name) {
47 if (cs->in_syscall_setup) {
48 if (strcmp(name, cs->callstack[cs->callstack_len - 1]) == 0) {
49 cs->in_syscall_setup = false;
50 cs->syscall_frames = bt_idx;
51 T_LOG("found start of controlled stack at frame %u, expected "
52 "index %zu", cs->syscall_frames, cs->callstack_len - 1);
53 } else {
54 T_LOG("found syscall setup symbol %s at frame %u", name,
55 bt_idx);
56 }
57 }
58 if (!cs->in_syscall_setup) {
59 if (cs->nchecked >= cs->callstack_len) {
60 T_LOG("frame %2u: skipping system frame %s", bt_idx, name);
61 } else {
62 size_t frame_idx = cs->callstack_len - cs->nchecked - 1;
63 T_EXPECT_EQ_STR(name, cs->callstack[frame_idx],
64 "frame %2zu: saw '%s', expected '%s'",
65 frame_idx, name, cs->callstack[frame_idx]);
66 }
67 cs->nchecked++;
68 }
69 } else {
70 if (!cs->in_syscall_setup) {
71 T_ASSERT_NOTNULL(name, NULL, "symbol should not be NULL");
72 }
73 }
74 }
75
76 static bool
77 is_kernel_64_bit(void)
78 {
79 static dispatch_once_t k64_once;
80 static bool k64 = false;
81 dispatch_once(&k64_once, ^{
82 int errb;
83 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 /* kernproc */ };
84
85 struct kinfo_proc kp;
86 size_t len = sizeof(kp);
87
88 errb = sysctl(mib, sizeof(mib) / sizeof(mib[0]), &kp, &len, NULL, 0);
89 T_QUIET; T_ASSERT_POSIX_SUCCESS(errb,
90 "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
91
92 k64 = kp.kp_proc.p_flag & P_LP64;
93 T_LOG("executing with a %s-bit kernel", k64 ? "64" : "32");
94 });
95 return k64;
96 }
97
98 // Use an extra, non-inlineable function so that any frames after expect_stack
99 // can be safely ignored. This insulates the test from changes in how syscalls
100 // are called by Libc and the kernel.
101 static int __attribute__((noinline, not_tail_called))
102 backtrace_current_thread_wrapper(uint64_t *bt, size_t *bt_filled)
103 {
104 int ret = sysctlbyname("kern.backtrace.user", bt, bt_filled, NULL, 0);
105 getpid(); // Really prevent tail calls.
106 return ret;
107 }
108
109 static void __attribute__((noinline, not_tail_called))
110 expect_callstack(void)
111 {
112 uint64_t bt[USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES] = { 0 };
113
114 static CSSymbolicatorRef user_symb;
115 static dispatch_once_t expect_stack_once;
116 dispatch_once(&expect_stack_once, ^{
117 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
118 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
119 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
120 });
121
122 size_t bt_filled = USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES;
123 int ret = backtrace_current_thread_wrapper(bt, &bt_filled);
124 if (ret == -1 && errno == ENOENT) {
125 T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
126 }
127 T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"kern.backtrace.user\")");
128 T_LOG("kernel returned %zu frame backtrace", bt_filled);
129
130 unsigned int bt_len = (unsigned int)bt_filled;
131 T_EXPECT_GE(bt_len, (unsigned int)USER_FRAMES,
132 "at least %u frames should be present in backtrace", USER_FRAMES);
133 T_EXPECT_LE(bt_len, (unsigned int)USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES,
134 "at most %u frames should be present in backtrace",
135 USER_FRAMES + MAX_SYSCALL_SETUP_FRAMES);
136
137 struct callstack_exp callstack = {
138 .in_syscall_setup = true,
139 .syscall_frames = 0,
140 .callstack = user_bt,
141 .callstack_len = USER_FRAMES,
142 .nchecked = 0,
143 };
144 for (unsigned int i = 0; i < bt_len; i++) {
145 uintptr_t addr;
146 #if !defined(__LP64__)
147 // Backtrace frames come out as kernel words; convert them back to user
148 // uintptr_t for 32-bit processes.
149 if (is_kernel_64_bit()) {
150 addr = (uintptr_t)(bt[i]);
151 } else {
152 addr = (uintptr_t)(((uint32_t *)bt)[i]);
153 }
154 #else // defined(__LP32__)
155 addr = (uintptr_t)bt[i];
156 #endif // defined(__LP32__)
157
158 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
159 user_symb, addr, kCSNow);
160 expect_frame(&callstack, symbol, addr, i);
161 }
162
163 T_EXPECT_GE(callstack.nchecked, USER_FRAMES,
164 "checked enough frames for correct symbols");
165 }
166
167 static int __attribute__((noinline, not_tail_called))
168 recurse_a(unsigned int frames);
169 static int __attribute__((noinline, not_tail_called))
170 recurse_b(unsigned int frames);
171
172 static int __attribute__((noinline, not_tail_called))
173 recurse_a(unsigned int frames)
174 {
175 if (frames == 1) {
176 expect_callstack();
177 getpid(); // Really prevent tail calls.
178 return 0;
179 }
180
181 return recurse_b(frames - 1) + 1;
182 }
183
184 static int __attribute__((noinline, not_tail_called))
185 recurse_b(unsigned int frames)
186 {
187 if (frames == 1) {
188 expect_callstack();
189 getpid(); // Really prevent tail calls.
190 return 0;
191 }
192
193 return recurse_a(frames - 1) + 1;
194 }
195
196 static void *
197 backtrace_thread(void *arg)
198 {
199 #pragma unused(arg)
200 unsigned int calls;
201
202 // backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
203 //
204 // Always make one less call for this frame (backtrace_thread).
205 calls = USER_FRAMES - NON_RECURSE_FRAMES;
206
207 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
208 calls, NON_RECURSE_FRAMES);
209 (void)recurse_a(calls);
210 return NULL;
211 }
212
213 T_DECL(backtrace_user, "test that the kernel can backtrace user stacks",
214 T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true))
215 {
216 pthread_t thread;
217
218 // Run the test from a different thread to insulate it from libdarwintest
219 // setup.
220 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
221 NULL), "create additional thread to backtrace");
222
223 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
224 }
225
226 T_DECL(backtrace_user_bounds,
227 "test that the kernel doesn't write frames out of expected bounds")
228 {
229 uint64_t bt_init[USER_FRAMES] = {};
230 size_t bt_filled = USER_FRAMES, bt_filled_after = 0;
231 int error = 0;
232 kern_return_t kr = KERN_FAILURE;
233 void *bt_page = NULL;
234 void *guard_page = NULL;
235 void *bt_start = NULL;
236
237 // The backtrace addresses come back as kernel words.
238 size_t kword_size = is_kernel_64_bit() ? 8 : 4;
239
240 // Get an idea of how many frames to expect.
241 int ret = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL, 0);
242 if (ret == -1 && errno == ENOENT) {
243 T_SKIP("release kernel: kern.backtrace.user missing");
244 }
245 T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")");
246
247 // Allocate two pages -- a first one that's valid and a second that
248 // will be non-writeable to catch a copyout that's too large.
249 bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE,
250 MAP_ANON | MAP_PRIVATE, -1, 0);
251 T_WITH_ERRNO;
252 T_ASSERT_NE(bt_page, MAP_FAILED, "allocated backtrace pages");
253 guard_page = (char *)bt_page + vm_page_size;
254
255 error = mprotect(guard_page, vm_page_size, PROT_READ);
256 T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page");
257
258 // Ensure the pages are set up as expected.
259 kr = vm_write(mach_task_self(), (vm_address_t)bt_page,
260 (vm_offset_t)&(int){ 12345 }, sizeof(int));
261 T_ASSERT_MACH_SUCCESS(kr,
262 "should succeed in writing to backtrace page");
263 kr = vm_write(mach_task_self(), (vm_address_t)guard_page,
264 (vm_offset_t)&(int){ 12345 }, sizeof(int));
265 T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page");
266
267 // Ask the kernel to write the backtrace just before the guard page.
268 bt_start = (char *)guard_page - (kword_size * bt_filled);
269 bt_filled_after = bt_filled;
270
271 error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
272 NULL, 0);
273 T_EXPECT_POSIX_SUCCESS(error,
274 "sysctlbyname(\"kern.backtrace.user\") just before guard page");
275 T_EXPECT_EQ(bt_filled, bt_filled_after,
276 "both calls to backtrace should have filled in the same number of "
277 "frames");
278
279 // Expect the kernel to fault when writing too far.
280 bt_start = (char *)bt_start + 1;
281 bt_filled_after = bt_filled;
282 error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
283 NULL, 0);
284 T_EXPECT_POSIX_FAILURE(error, EFAULT,
285 "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "
286 "guard page");
287 }