]> git.saurik.com Git - apple/xnu.git/blob - tests/backtracing.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / tests / backtracing.c
1 /* Copyright (c) 2016, 2019 Apple Computer, Inc. All rights reserved. */
2
3 #include <CoreSymbolication/CoreSymbolication.h>
4 #include <darwintest.h>
5 #include <dispatch/dispatch.h>
6 #include <execinfo.h>
7 #include <pthread.h>
8 #include <mach/mach.h>
9 #include <sys/mman.h>
10 #include <sys/sysctl.h>
11
12 T_GLOBAL_META(T_META_RUN_CONCURRENTLY(true));
13
14 #define USER_FRAMES (12)
15
16 #define NON_RECURSE_FRAMES (4)
17
18 static const char *user_bt[USER_FRAMES] = {
19 NULL,
20 "backtrace_thread",
21 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
22 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
23 "expect_stack", NULL
24 };
25
26 static void
27 expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
28 unsigned long addr, unsigned int bt_idx, unsigned int max_frames)
29 {
30 const char *name;
31 unsigned int frame_idx = max_frames - bt_idx - 1;
32
33 if (CSIsNull(symbol)) {
34 T_FAIL("invalid symbol for address %#lx at frame %d", addr,
35 frame_idx);
36 return;
37 }
38
39 if (bt[frame_idx] == NULL) {
40 T_LOG("frame %2u: skipping system frame %s", frame_idx,
41 CSSymbolGetName(symbol));
42 return;
43 }
44
45 if (frame_idx >= bt_len) {
46 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
47 CSSymbolGetName(symbol), addr, frame_idx);
48 return;
49 }
50
51 name = CSSymbolGetName(symbol);
52 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
53 T_EXPECT_EQ_STR(name, bt[frame_idx],
54 "frame %2u: saw '%s', expected '%s'",
55 frame_idx, name, bt[frame_idx]);
56 }
57
58 static bool
59 is_kernel_64_bit(void)
60 {
61 static dispatch_once_t k64_once;
62 static bool k64 = false;
63 dispatch_once(&k64_once, ^{
64 int errb;
65 int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, 0 /* kernproc */ };
66
67 struct kinfo_proc kp;
68 size_t len = sizeof(kp);
69
70 errb = sysctl(mib, sizeof(mib) / sizeof(mib[0]), &kp, &len, NULL, 0);
71 T_QUIET; T_ASSERT_POSIX_SUCCESS(errb,
72 "sysctl({ CTL_KERN, KERN_PROC, KERN_PROC_PID, 0})");
73
74 k64 = kp.kp_proc.p_flag & P_LP64;
75 T_LOG("executing with a %s-bit kernel", k64 ? "64" : "32");
76 });
77 return k64;
78 }
79
80 static void __attribute__((noinline, not_tail_called))
81 expect_stack(void)
82 {
83 uint64_t bt[USER_FRAMES] = { 0 };
84 unsigned int bt_len = USER_FRAMES;
85 int err;
86 size_t bt_filled;
87 bool k64;
88
89 static CSSymbolicatorRef user_symb;
90 static dispatch_once_t expect_stack_once;
91 dispatch_once(&expect_stack_once, ^{
92 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
93 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
94 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
95 });
96
97 k64 = is_kernel_64_bit();
98 bt_filled = USER_FRAMES;
99 err = sysctlbyname("kern.backtrace.user", bt, &bt_filled, NULL, 0);
100 if (err == ENOENT) {
101 T_SKIP("release kernel: kern.backtrace.user sysctl returned ENOENT");
102 }
103 T_ASSERT_POSIX_SUCCESS(err, "sysctlbyname(\"kern.backtrace.user\")");
104
105 bt_len = (unsigned int)bt_filled;
106 T_EXPECT_EQ(bt_len, (unsigned int)USER_FRAMES,
107 "%u frames should be present in backtrace", (unsigned int)USER_FRAMES);
108
109 for (unsigned int i = 0; i < bt_len; i++) {
110 uintptr_t addr;
111 #if !defined(__LP64__)
112 /*
113 * Backtrace frames come out as kernel words; convert them back to user
114 * uintptr_t for 32-bit processes.
115 */
116 if (k64) {
117 addr = (uintptr_t)(bt[i]);
118 } else {
119 addr = (uintptr_t)(((uint32_t *)bt)[i]);
120 }
121 #else /* defined(__LP32__) */
122 addr = (uintptr_t)bt[i];
123 #endif /* defined(__LP32__) */
124
125 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
126 user_symb, addr, kCSNow);
127 expect_frame(user_bt, USER_FRAMES, symbol, addr, i, bt_len);
128 }
129 }
130
131 static int __attribute__((noinline, not_tail_called))
132 recurse_a(unsigned int frames);
133 static int __attribute__((noinline, not_tail_called))
134 recurse_b(unsigned int frames);
135
136 static int __attribute__((noinline, not_tail_called))
137 recurse_a(unsigned int frames)
138 {
139 if (frames == 1) {
140 expect_stack();
141 getpid();
142 return 0;
143 }
144
145 return recurse_b(frames - 1) + 1;
146 }
147
148 static int __attribute__((noinline, not_tail_called))
149 recurse_b(unsigned int frames)
150 {
151 if (frames == 1) {
152 expect_stack();
153 getpid();
154 return 0;
155 }
156
157 return recurse_a(frames - 1) + 1;
158 }
159
160 static void *
161 backtrace_thread(void *arg)
162 {
163 #pragma unused(arg)
164 unsigned int calls;
165
166 /*
167 * backtrace_thread, recurse_a, recurse_b, ..., __sysctlbyname
168 *
169 * Always make one less call for this frame (backtrace_thread).
170 */
171 calls = USER_FRAMES - NON_RECURSE_FRAMES;
172
173 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
174 calls, NON_RECURSE_FRAMES);
175 (void)recurse_a(calls);
176 return NULL;
177 }
178
179 T_DECL(backtrace_user, "test that the kernel can backtrace user stacks",
180 T_META_CHECK_LEAKS(false), T_META_ALL_VALID_ARCHS(true))
181 {
182 pthread_t thread;
183
184 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
185 NULL), "create additional thread to backtrace");
186
187 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_join(thread, NULL), NULL);
188 }
189
190 T_DECL(backtrace_user_bounds,
191 "test that the kernel doesn't write frames out of expected bounds")
192 {
193 uint64_t bt_init[USER_FRAMES] = {};
194 size_t bt_filled = USER_FRAMES, bt_filled_after = 0;
195 int error = 0;
196 kern_return_t kr = KERN_FAILURE;
197 void *bt_page = NULL;
198 void *guard_page = NULL;
199 void *bt_start = NULL;
200
201 /*
202 * The backtrace addresses come back as kernel words.
203 */
204 size_t kword_size = is_kernel_64_bit() ? 8 : 4;
205
206 /*
207 * Get an idea of how many frames to expect.
208 */
209 error = sysctlbyname("kern.backtrace.user", bt_init, &bt_filled, NULL,
210 0);
211 if (error == ENOENT) {
212 T_SKIP("release kernel: kern.backtrace.user missing");
213 }
214 T_ASSERT_POSIX_SUCCESS(error, "sysctlbyname(\"kern.backtrace.user\")");
215
216 /*
217 * Allocate two pages -- a first one that's valid and a second that
218 * will be non-writeable to catch a copyout that's too large.
219 */
220
221 bt_page = mmap(NULL, vm_page_size * 2, PROT_READ | PROT_WRITE,
222 MAP_ANON | MAP_PRIVATE, -1, 0);
223 T_WITH_ERRNO;
224 T_ASSERT_NE(bt_page, MAP_FAILED, "allocated backtrace pages");
225 guard_page = (char *)bt_page + vm_page_size;
226
227 error = mprotect(guard_page, vm_page_size, PROT_READ);
228 T_ASSERT_POSIX_SUCCESS(error, "mprotect(..., PROT_READ) guard page");
229
230 /*
231 * Ensure the pages are set up as expected.
232 */
233
234 kr = vm_write(mach_task_self(), (vm_address_t)bt_page,
235 (vm_offset_t)&(int){ 12345 }, sizeof(int));
236 T_ASSERT_MACH_SUCCESS(kr,
237 "should succeed in writing to backtrace page");
238
239 kr = vm_write(mach_task_self(), (vm_address_t)guard_page,
240 (vm_offset_t)&(int){ 12345 }, sizeof(int));
241 T_ASSERT_NE(kr, KERN_SUCCESS, "should fail to write to guard page");
242
243 /*
244 * Ask the kernel to write the backtrace just before the guard page.
245 */
246
247 bt_start = (char *)guard_page - (kword_size * bt_filled);
248 bt_filled_after = bt_filled;
249
250 error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
251 NULL, 0);
252 T_EXPECT_POSIX_SUCCESS(error,
253 "sysctlbyname(\"kern.backtrace.user\") just before guard page");
254 T_EXPECT_EQ(bt_filled, bt_filled_after,
255 "both calls to backtrace should have filled in the same number of "
256 "frames");
257
258 /*
259 * Expect the kernel to fault when writing too far.
260 */
261
262 bt_start = (char *)bt_start + 1;
263 bt_filled_after = bt_filled;
264 error = sysctlbyname("kern.backtrace.user", bt_start, &bt_filled_after,
265 NULL, 0);
266 T_EXPECT_POSIX_FAILURE(error, EFAULT,
267 "sysctlbyname(\"kern.backtrace.user\") should fault one byte into "
268 "guard page");
269 }