]> git.saurik.com Git - apple/xnu.git/blob - tests/backtracing.c
379960766a58e8c49ce727b4ab6fdb439a29c4ce
[apple/xnu.git] / tests / backtracing.c
1 /* Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. */
2
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
6 #include <execinfo.h>
7 #include <pthread.h>
8 #include <mach/mach.h>
9 #include <sys/mman.h>
10 #include <sys/sysctl.h>
11
12 #define USER_FRAMES (12)
13
14 #define NON_RECURSE_FRAMES (5)
15
16 static const char *user_bt[USER_FRAMES] = {
17 NULL, NULL,
18 "backtrace_thread",
19 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
20 "recurse_a", "recurse_b", "recurse_a",
21 "expect_stack", NULL
22 };
23
24 static void
25 expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
26 unsigned long addr, unsigned int bt_idx, unsigned int max_frames)
27 {
28 const char *name;
29 unsigned int frame_idx = max_frames - bt_idx - 1;
30
31 if (bt[frame_idx] == NULL) {
32 T_LOG("frame %2u: skipping system frame", frame_idx);
33 return;
34 }
35
36 if (CSIsNull(symbol)) {
37 T_FAIL("invalid symbol for address %#lx at frame %d", addr, frame_idx);
38 return;
39 }
40
41 if (frame_idx >= bt_len) {
42 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
43 CSSymbolGetName(symbol), addr, frame_idx);
44 return;
45 }
46
47 name = CSSymbolGetName(symbol);
48 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
49 T_EXPECT_EQ_STR(name, bt[frame_idx],
50 "frame %2u: saw '%s', expected '%s'",
51 frame_idx, name, bt[frame_idx]);
52 }
53
54 static bool
55 is_kernel_64_bit(void)
56 {
57 static dispatch_once_t k64_once;
58 static bool k64 = false;
59 dispatch_once(&k64_once, ^{
60 int errb;
61 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 /* kernproc */ };
62
63 struct kinfo_proc kp;
64 size_t len = sizeof(kp);
65
66 errb = sysctl(mib, sizeof(mib) / sizeof(mib[0]), &kp, &len, NULL, 0);
67 T_QUIET; T_ASSERT_POSIX_SUCCESS(errb,
68 "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
69
70 k64 = kp.kp_proc.p_flag & P_LP64;
71 T_LOG("executing with a %s-bit kernel", k64 ? "64" : "32");
72 });
73 return k64;
74 }
75
76 static void __attribute__((noinline, not_tail_called))
77 expect_stack(void)
78 {
79 uint64_t bt[USER_FRAMES] = { 0 };
80 unsigned int bt_len = USER_FRAMES;
81 int err;
82 size_t bt_filled;
83 bool k64;
84
85 static CSSymbolicatorRef user_symb;
86 static dispatch_once_t expect_stack_once;
87 dispatch_once(&expect_stack_once, ^{
88 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
89 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
90 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
91 });
92
93 k64 = is_kernel_64_bit();
94 bt_filled = USER_FRAMES;
95 err = sysctlbyname("kern.backtrace.user", bt, &bt_filled, NULL, 0);
96 if (err == ENOENT) {
97 T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
98 }
99 T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(\"kern.backtrace.user\")");
100
101 bt_len = (unsigned int)bt_filled;
102 T_EXPECT_EQ(bt_len, (unsigned int)USER_FRAMES,
103 "%u frames should be present in backtrace", (unsigned int)USER_FRAMES);
104
105 for (unsigned int i = 0; i < bt_len; i++) {
106 uintptr_t addr;
107 #if !defined(__LP64__)
108 /*
109 * Backtrace frames come out as kernel words; convert them back to user
110 * uintptr_t for 32-bit processes.
111 */
112 if (k64) {
113 addr = (uintptr_t)(bt[i]);
114 } else {
115 addr = (uintptr_t)(((uint32_t *)bt)[i]);
116 }
117 #else /* defined(__LP32__) */
118 addr = (uintptr_t)bt[i];
119 #endif /* defined(__LP32__) */
120
121 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
122 user_symb, addr, kCSNow);
123 expect_frame(user_bt, USER_FRAMES, symbol, addr, i, bt_len);
124 }
125 }
126
127 static int __attribute__((noinline, not_tail_called))
128 recurse_a(unsigned int frames);
129 static int __attribute__((noinline, not_tail_called))
130 recurse_b(unsigned int frames);
131
132 static int __attribute__((noinline, not_tail_called))
133 recurse_a(unsigned int frames)
134 {
135 if (frames == 1) {
136 expect_stack();
137 getpid();
138 return 0;
139 }
140
141 return recurse_b(frames - 1) + 1;
142 }
143
144 static int __attribute__((noinline, not_tail_called))
145 recurse_b(unsigned int frames)
146 {
147 if (frames == 1) {
148 expect_stack();
149 getpid();
150 return 0;
151 }
152
153 return recurse_a(frames - 1) + 1;
154 }
155
156 static void *
157 backtrace_thread(void *arg)
158 {
159 #pragma unused(arg)
160 unsigned int calls;
161
162 /*
163 * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
164 *
165 * Always make one less call for this frame (backtrace_thread).
166 */
167 calls = USER_FRAMES - NON_RECURSE_FRAMES;
168
169 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
170 calls, NON_RECURSE_FRAMES);
171 (void)recurse_a(calls);
172 return NULL;
173 }
174
175 T_DECL(backtrace_user, "test that the kernel can backtrace user stacks",
176 T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true))
177 {
178 pthread_t thread;
179
180 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
181 NULL), "create additional thread to backtrace");
182
183 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
184 }
185
186 T_DECL(backtrace_user_bounds,
187 "test that the kernel doesn't write frames out of expected bounds")
188 {
189 uint64_t bt_init[USER_FRAMES] = {};
190 size_t bt_filled = USER_FRAMES, bt_filled_after = 0;
191 int error = 0;
192 kern_return_t kr = KERN_FAILURE;
193 void *bt_page = NULL;
194 void *guard_page = NULL;
195 void *bt_start = NULL;
196
197 /*
198 * The backtrace addresses come back as kernel words.
199 */
200 size_t kword_size = is_kernel_64_bit() ? 8 : 4;
201
202 /*
203 * Get an idea of how many frames to expect.
204 */
205 error = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL,
206 0);
207 if (error == ENOENT) {
208 T_SKIP("release kernel: kern.backtrace.user missing");
209 }
210 T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")");
211
212 /*
213 * Allocate two pages -- a first one that's valid and a second that
214 * will be non-writeable to catch a copyout that's too large.
215 */
216
217 bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE,
218 MAP_ANON | MAP_PRIVATE, -1, 0);
219 T_WITH_ERRNO;
220 T_ASSERT_NE(bt_page, MAP_FAILED, "allocated backtrace pages");
221 guard_page = (char *)bt_page + vm_page_size;
222
223 error = mprotect(guard_page, vm_page_size, PROT_READ);
224 T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page");
225
226 /*
227 * Ensure the pages are set up as expected.
228 */
229
230 kr = vm_write(mach_task_self(), (vm_address_t)bt_page,
231 (vm_offset_t)&(int){ 12345 }, sizeof(int));
232 T_ASSERT_MACH_SUCCESS(kr,
233 "should succeed in writing to backtrace page");
234
235 kr = vm_write(mach_task_self(), (vm_address_t)guard_page,
236 (vm_offset_t)&(int){ 12345 }, sizeof(int));
237 T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page");
238
239 /*
240 * Ask the kernel to write the backtrace just before the guard page.
241 */
242
243 bt_start = (char *)guard_page - (kword_size * bt_filled);
244 bt_filled_after = bt_filled;
245
246 error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
247 NULL, 0);
248 T_EXPECT_POSIX_SUCCESS(error,
249 "sysctlbyname(\"kern.backtrace.user\") just before guard page");
250 T_EXPECT_EQ(bt_filled, bt_filled_after,
251 "both calls to backtrace should have filled in the same number of "
252 "frames");
253
254 /*
255 * Expect the kernel to fault when writing too far.
256 */
257
258 bt_start = (char *)bt_start + 1;
259 bt_filled_after = bt_filled;
260 error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
261 NULL, 0);
262 T_EXPECT_POSIX_FAILURE(error, EFAULT,
263 "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "
264 "guard page");
265 }