]> git.saurik.com Git - apple/xnu.git/blob - tests/kperf_backtracing.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / tests / kperf_backtracing.c
1 #ifdef T_NAMESPACE
2 #undef T_NAMESPACE
3 #endif
4
5 #include <CoreSymbolication/CoreSymbolication.h>
6 #include <darwintest.h>
7 #include <dispatch/dispatch.h>
8 #include <kperf/kperf.h>
9 #include <ktrace/session.h>
10 #include <System/sys/kdebug.h>
11 #include <pthread.h>
12
13 #include "kperf_helpers.h"
14
15 #define PERF_STK_KHDR UINT32_C(0x25020014)
16 #define PERF_STK_UHDR UINT32_C(0x25020018)
17 #define PERF_STK_KDATA UINT32_C(0x2502000c)
18 #define PERF_STK_UDATA UINT32_C(0x25020010)
19
20 T_GLOBAL_META(
21 T_META_NAMESPACE("xnu.kperf"),
22 T_META_CHECK_LEAKS(false));
23
24 static void
25 expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
26 unsigned long addr, unsigned int bt_idx, unsigned int max_frames)
27 {
28 const char *name;
29 unsigned int frame_idx = max_frames - bt_idx - 1;
30
31 if (!bt[frame_idx]) {
32 T_LOG("frame %2u: skipping system frame", frame_idx);
33 return;
34 }
35
36 if (CSIsNull(symbol)) {
37 T_FAIL("invalid symbol for address %#lx at frame %d", addr, frame_idx);
38 return;
39 }
40
41 if (frame_idx >= bt_len) {
42 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
43 CSSymbolGetName(symbol), addr, frame_idx);
44 return;
45 }
46
47 name = CSSymbolGetName(symbol);
48 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
49 T_EXPECT_EQ_STR(name, bt[frame_idx],
50 "frame %2u: saw '%s', expected '%s'",
51 frame_idx, name, bt[frame_idx]);
52 }
53
54 /*
55 * Expect to see either user or kernel stacks on thread with ID `tid` with a
56 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
57 * is found.
58 *
59 * Can also allow stacks to be larger than the signature -- additional frames
60 * near the current PC will be ignored. This allows stacks to potentially be
61 * in the middle of a signalling system call (which signals that it is safe to
62 * start sampling).
63 */
64 static void
65 expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen,
66 bool kern, const char **bt, unsigned int bt_len, unsigned int allow_larger_by)
67 {
68 CSSymbolicatorRef symb;
69 uint32_t hdr_debugid;
70 uint32_t data_debugid;
71 __block unsigned int stacks = 0;
72 __block unsigned int frames = 0;
73 __block unsigned int hdr_frames = 0;
74 __block unsigned int allow_larger = allow_larger_by;
75
76 if (kern) {
77 static CSSymbolicatorRef kern_symb;
78 static dispatch_once_t kern_symb_once;
79
80 hdr_debugid = PERF_STK_KHDR;
81 data_debugid = PERF_STK_KDATA;
82
83 dispatch_once(&kern_symb_once, ^(void) {
84 kern_symb = CSSymbolicatorCreateWithMachKernel();
85 T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL);
86 });
87 symb = kern_symb;
88 } else {
89 static CSSymbolicatorRef user_symb;
90 static dispatch_once_t user_symb_once;
91
92 hdr_debugid = PERF_STK_UHDR;
93 data_debugid = PERF_STK_UDATA;
94
95 dispatch_once(&user_symb_once, ^(void) {
96 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
97 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
98 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
99 });
100 symb = user_symb;
101 }
102
103 ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) {
104 if (tid != 0 && tid != tp->threadid) {
105 return;
106 }
107
108 T_LOG("found stack from thread %#lx", tp->threadid);
109 stacks++;
110 if (!(tp->arg1 & 1)) {
111 T_FAIL("invalid %s stack on thread %#lx", kern ? "kernel" : "user",
112 tp->threadid);
113 return;
114 }
115
116 hdr_frames = (unsigned int)tp->arg2;
117 /* ignore extra link register or value pointed to by stack pointer */
118 hdr_frames -= 1;
119
120 T_QUIET; T_EXPECT_GE(hdr_frames, bt_len,
121 "number of frames in header");
122 T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger,
123 "number of frames in header");
124 if (hdr_frames > bt_len && allow_larger > 0) {
125 allow_larger = hdr_frames - bt_len;
126 hdr_frames = bt_len;
127 }
128
129 T_LOG("%s stack seen", kern ? "kernel" : "user");
130 frames = 0;
131 });
132
133 ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) {
134 if (tid != 0 && tid != tp->threadid) {
135 return;
136 }
137
138 int i = 0;
139
140 if (frames == 0 && hdr_frames > bt_len) {
141 /* skip frames near the PC */
142 i = (int)allow_larger;
143 allow_larger -= 4;
144 }
145
146 for (; i < 4 && frames < hdr_frames; i++, frames++) {
147 unsigned long addr = (&tp->arg1)[i];
148 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
149 symb, addr, kCSNow);
150
151 expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames);
152 }
153
154 /* saw the end of the user stack */
155 if (hdr_frames == frames) {
156 *stacks_seen += 1;
157 if (!kern) {
158 ktrace_end(s, 1);
159 }
160 }
161 });
162 }
163
164 #define TRIGGERING_DEBUGID (0xfeff0f00)
165
166 /*
167 * These functions must return an int to avoid the function prologue being
168 * hoisted out of the path to the spin (breaking being able to get a good
169 * backtrace).
170 */
171 static int __attribute__((noinline, not_tail_called))
172 recurse_a(dispatch_semaphore_t spinning, unsigned int frames);
173 static int __attribute__((noinline, not_tail_called))
174 recurse_b(dispatch_semaphore_t spinning, unsigned int frames);
175
176 static int __attribute__((noinline, not_tail_called))
177 recurse_a(dispatch_semaphore_t spinning, unsigned int frames)
178 {
179 if (frames == 0) {
180 if (spinning) {
181 dispatch_semaphore_signal(spinning);
182 for (;;) {
183 ;
184 }
185 } else {
186 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
187 return 0;
188 }
189 }
190
191 return recurse_b(spinning, frames - 1) + 1;
192 }
193
194 static int __attribute__((noinline, not_tail_called))
195 recurse_b(dispatch_semaphore_t spinning, unsigned int frames)
196 {
197 if (frames == 0) {
198 if (spinning) {
199 dispatch_semaphore_signal(spinning);
200 for (;;) {
201 ;
202 }
203 } else {
204 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
205 return 0;
206 }
207 }
208
209 return recurse_a(spinning, frames - 1) + 1;
210 }
211
212 #define USER_FRAMES (12)
213
214 #if defined(__x86_64__)
215 #define RECURSE_START_OFFSET (4)
216 #else /* defined(__x86_64__) */
217 #define RECURSE_START_OFFSET (3)
218 #endif /* defined(__x86_64__) */
219
220 static const char *user_bt[USER_FRAMES] = {
221 #if defined(__x86_64__)
222 NULL,
223 #endif /* defined(__x86_64__) */
224 NULL, NULL,
225 "backtrace_thread",
226 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
227 "recurse_a", "recurse_b", "recurse_a",
228 #if !defined(__x86_64__)
229 "recurse_b",
230 #endif /* !defined(__x86_64__) */
231 NULL
232 };
233
234 #if defined(__arm__)
235
236 #define KERNEL_FRAMES (2)
237 static const char *kernel_bt[KERNEL_FRAMES] = {
238 "unix_syscall", "kdebug_trace64"
239 };
240
241 #elif defined(__arm64__)
242
243 #define KERNEL_FRAMES (4)
244 static const char *kernel_bt[KERNEL_FRAMES] = {
245 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64"
246 };
247
248 #elif defined(__x86_64__)
249
250 #define KERNEL_FRAMES (2)
251 static const char *kernel_bt[KERNEL_FRAMES] = {
252 "unix_syscall64", "kdebug_trace64"
253 };
254
255 #else
256 #error "architecture unsupported"
257 #endif /* defined(__arm__) */
258
259 static dispatch_once_t backtrace_once;
260 static dispatch_semaphore_t backtrace_started;
261 static dispatch_semaphore_t backtrace_go;
262
263 /*
264 * Another thread to run with a known backtrace.
265 *
266 * Take a semaphore that will be signalled when the thread is spinning at the
267 * correct frame. If the semaphore is NULL, don't spin and instead make a
268 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
269 */
270 static void *
271 backtrace_thread(void *arg)
272 {
273 dispatch_semaphore_t notify_spinning;
274 unsigned int calls;
275
276 notify_spinning = (dispatch_semaphore_t)arg;
277
278 dispatch_semaphore_signal(backtrace_started);
279 if (!notify_spinning) {
280 dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER);
281 }
282
283 /*
284 * backtrace_thread, recurse_a, recurse_b, ...[, __kdebug_trace64]
285 *
286 * Always make one less call for this frame (backtrace_thread).
287 */
288 calls = USER_FRAMES - RECURSE_START_OFFSET - 1 /* backtrace_thread */;
289 if (notify_spinning) {
290 /*
291 * Spinning doesn't end up calling __kdebug_trace64.
292 */
293 calls -= 1;
294 }
295
296 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
297 calls, RECURSE_START_OFFSET);
298 (void)recurse_a(notify_spinning, calls);
299 return NULL;
300 }
301
302 static uint64_t
303 create_backtrace_thread(dispatch_semaphore_t notify_spinning)
304 {
305 pthread_t thread = NULL;
306 uint64_t tid;
307
308 dispatch_once(&backtrace_once, ^{
309 backtrace_started = dispatch_semaphore_create(0);
310 T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL);
311
312 if (!notify_spinning) {
313 backtrace_go = dispatch_semaphore_create(0);
314 T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL);
315 }
316 });
317
318 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, backtrace_thread,
319 (void *)notify_spinning), NULL);
320 T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created");
321 dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER);
322
323 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL);
324 T_QUIET; T_ASSERT_NE(tid, UINT64_C(0),
325 "backtrace thread created does not have ID 0");
326
327 T_LOG("starting thread with ID 0x%" PRIx64, tid);
328
329 return tid;
330 }
331
332 static void
333 start_backtrace_thread(void)
334 {
335 T_QUIET; T_ASSERT_NOTNULL(backtrace_go,
336 "thread to backtrace created before starting it");
337 dispatch_semaphore_signal(backtrace_go);
338 }
339
340 #if TARGET_OS_WATCH
341 #define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
342 #else /* TARGET_OS_WATCH */
343 #define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
344 #endif /* !TARGET_OS_WATCH */
345
346 T_DECL(backtraces_kdebug_trigger,
347 "test that backtraces from kdebug trigger are correct",
348 T_META_ASROOT(true))
349 {
350 static unsigned int stacks_seen = 0;
351 ktrace_session_t s;
352 kperf_kdebug_filter_t filter;
353 uint64_t tid;
354
355 s = ktrace_session_create();
356 T_ASSERT_NOTNULL(s, "ktrace session was created");
357
358 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
359
360 tid = create_backtrace_thread(NULL);
361 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0);
362 expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 0);
363
364 /*
365 * The triggering event must be traced (and thus registered with libktrace)
366 * to get backtraces.
367 */
368 ktrace_events_single(s, TRIGGERING_DEBUGID,
369 ^(__unused struct trace_point *tp){ });
370
371 ktrace_set_completion_handler(s, ^(void) {
372 T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks");
373 ktrace_session_destroy(s);
374 kperf_reset();
375 T_END;
376 });
377
378 filter = kperf_kdebug_filter_create();
379 T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created");
380
381 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter,
382 TRIGGERING_DEBUGID), NULL);
383 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
384 (void)kperf_action_count_set(1);
385 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
386 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL);
387 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
388 kperf_kdebug_filter_destroy(filter);
389
390 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
391
392 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
393
394 start_backtrace_thread();
395
396 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
397 dispatch_get_main_queue(), ^(void)
398 {
399 T_LOG("ending test after timeout");
400 ktrace_end(s, 0);
401 });
402
403 dispatch_main();
404 }
405
406 T_DECL(backtraces_user_timer,
407 "test that user backtraces on a timer are correct",
408 T_META_ASROOT(true))
409 {
410 static unsigned int stacks_seen = 0;
411 ktrace_session_t s;
412 uint64_t tid;
413 dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0);
414
415 s = ktrace_session_create();
416 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
417
418 ktrace_filter_pid(s, getpid());
419
420 configure_kperf_stacks_timer(getpid(), 10);
421
422 tid = create_backtrace_thread(wait_for_spinning);
423 /* potentially calling dispatch function and system call */
424 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2);
425
426 ktrace_set_completion_handler(s, ^(void) {
427 T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack");
428 ktrace_session_destroy(s);
429 kperf_reset();
430 T_END;
431 });
432
433 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
434
435 /* wait until the thread that will be backtraced is spinning */
436 dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER);
437
438 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
439
440 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
441 dispatch_get_main_queue(), ^(void)
442 {
443 T_LOG("ending test after timeout");
444 ktrace_end(s, 0);
445 });
446
447 dispatch_main();
448 }
449
450 /* TODO test kernel stacks in all modes */
451 /* TODO legacy PET mode backtracing */
452 /* TODO test deep stacks, further than 128 frames, make sure they are truncated */
453 /* TODO test constrained stacks */