]> git.saurik.com Git - apple/xnu.git/blame - tests/kperf_backtracing.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / tests / kperf_backtracing.c
CommitLineData
f427ee49 1/* Copyright (c) 2018-2019 Apple Inc. All rights reserved. */
5ba3f43e 2
39037602
A
3#include <CoreSymbolication/CoreSymbolication.h>
4#include <darwintest.h>
5#include <dispatch/dispatch.h>
6#include <kperf/kperf.h>
5ba3f43e 7#include <ktrace/session.h>
cb323159 8#include <ktrace/private.h>
5ba3f43e 9#include <System/sys/kdebug.h>
39037602
A
10#include <pthread.h>
11
12#include "kperf_helpers.h"
cb323159 13#include "ktrace_helpers.h"
39037602 14
cb323159
A
15#define CALLSTACK_VALID 0x1
16#define CALLSTACK_TRUNCATED 0x10
17
5ba3f43e 18T_GLOBAL_META(
cb323159 19 T_META_NAMESPACE("xnu.ktrace"),
0a7de745 20 T_META_CHECK_LEAKS(false));
5ba3f43e 21
39037602
A
22static void
23expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
f427ee49 24 uint64_t addr, unsigned int bt_idx, unsigned int max_frames)
39037602 25{
0a7de745
A
26 const char *name;
27 unsigned int frame_idx = max_frames - bt_idx - 1;
28
29 if (!bt[frame_idx]) {
cb323159
A
30 T_LOG("frame %2u: skipping system frame '%s'", frame_idx,
31 CSSymbolGetName(symbol));
0a7de745
A
32 return;
33 }
34
f427ee49
A
35 T_LOG("checking frame %d: %llx", bt_idx, addr);
36 T_ASSERT_FALSE(CSIsNull(symbol), "invalid symbol for return address");
0a7de745
A
37
38 if (frame_idx >= bt_len) {
f427ee49 39 T_FAIL("unexpected frame '%s' (%#" PRIx64 ") at index %u",
0a7de745
A
40 CSSymbolGetName(symbol), addr, frame_idx);
41 return;
42 }
43
44 name = CSSymbolGetName(symbol);
45 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
46 T_EXPECT_EQ_STR(name, bt[frame_idx],
47 "frame %2u: saw '%s', expected '%s'",
48 frame_idx, name, bt[frame_idx]);
39037602
A
49}
50
51/*
813fb2f6
A
52 * Expect to see either user or kernel stacks on thread with ID `tid` with a
53 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
54 * is found.
55 *
56 * Can also allow stacks to be larger than the signature -- additional frames
57 * near the current PC will be ignored. This allows stacks to potentially be
58 * in the middle of a signalling system call (which signals that it is safe to
59 * start sampling).
39037602
A
60 */
61static void
62expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen,
813fb2f6 63 bool kern, const char **bt, unsigned int bt_len, unsigned int allow_larger_by)
39037602 64{
0a7de745
A
65 CSSymbolicatorRef symb;
66 uint32_t hdr_debugid;
67 uint32_t data_debugid;
68 __block unsigned int stacks = 0;
69 __block unsigned int frames = 0;
70 __block unsigned int hdr_frames = 0;
71 __block unsigned int allow_larger = allow_larger_by;
72
f427ee49
A
73 T_SETUPBEGIN;
74
0a7de745
A
75 if (kern) {
76 static CSSymbolicatorRef kern_symb;
77 static dispatch_once_t kern_symb_once;
78
79 hdr_debugid = PERF_STK_KHDR;
80 data_debugid = PERF_STK_KDATA;
81
82 dispatch_once(&kern_symb_once, ^(void) {
83 kern_symb = CSSymbolicatorCreateWithMachKernel();
84 T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL);
85 });
86 symb = kern_symb;
87 } else {
88 static CSSymbolicatorRef user_symb;
89 static dispatch_once_t user_symb_once;
90
91 hdr_debugid = PERF_STK_UHDR;
92 data_debugid = PERF_STK_UDATA;
93
94 dispatch_once(&user_symb_once, ^(void) {
95 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
96 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
97 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
98 });
99 symb = user_symb;
100 }
101
102 ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) {
103 if (tid != 0 && tid != tp->threadid) {
104 return;
105 }
106
f427ee49
A
107 T_LOG("found %s stack from thread %#" PRIx64, kern ? "kernel" : "user",
108 tp->threadid);
0a7de745
A
109 stacks++;
110 if (!(tp->arg1 & 1)) {
cb323159
A
111 T_FAIL("invalid %s stack on thread %#" PRIx64,
112 kern ? "kernel" : "user", tp->threadid);
0a7de745
A
113 return;
114 }
115
116 hdr_frames = (unsigned int)tp->arg2;
117 /* ignore extra link register or value pointed to by stack pointer */
118 hdr_frames -= 1;
119
120 T_QUIET; T_EXPECT_GE(hdr_frames, bt_len,
121 "number of frames in header");
122 T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger,
123 "number of frames in header");
124 if (hdr_frames > bt_len && allow_larger > 0) {
125 allow_larger = hdr_frames - bt_len;
126 hdr_frames = bt_len;
127 }
128
129 T_LOG("%s stack seen", kern ? "kernel" : "user");
130 frames = 0;
131 });
132
133 ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) {
134 if (tid != 0 && tid != tp->threadid) {
135 return;
136 }
137
138 int i = 0;
139
140 if (frames == 0 && hdr_frames > bt_len) {
141 /* skip frames near the PC */
142 i = (int)allow_larger;
143 allow_larger -= 4;
144 }
145
146 for (; i < 4 && frames < hdr_frames; i++, frames++) {
f427ee49 147 uint64_t addr = (&tp->arg1)[i];
0a7de745
A
148 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
149 symb, addr, kCSNow);
150
151 expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames);
152 }
153
154 /* saw the end of the user stack */
155 if (hdr_frames == frames) {
156 *stacks_seen += 1;
157 if (!kern) {
158 ktrace_end(s, 1);
159 }
160 }
161 });
f427ee49
A
162
163 T_SETUPEND;
39037602
A
164}
165
166#define TRIGGERING_DEBUGID (0xfeff0f00)
167
168/*
169 * These functions must return an int to avoid the function prologue being
170 * hoisted out of the path to the spin (breaking being able to get a good
171 * backtrace).
172 */
0a7de745 173static int __attribute__((noinline, not_tail_called))
813fb2f6 174recurse_a(dispatch_semaphore_t spinning, unsigned int frames);
0a7de745 175static int __attribute__((noinline, not_tail_called))
813fb2f6 176recurse_b(dispatch_semaphore_t spinning, unsigned int frames);
39037602 177
0a7de745 178static int __attribute__((noinline, not_tail_called))
813fb2f6 179recurse_a(dispatch_semaphore_t spinning, unsigned int frames)
39037602 180{
0a7de745
A
181 if (frames == 0) {
182 if (spinning) {
183 dispatch_semaphore_signal(spinning);
184 for (;;) {
185 ;
186 }
187 } else {
188 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
189 return 0;
190 }
191 }
192
193 return recurse_b(spinning, frames - 1) + 1;
39037602
A
194}
195
0a7de745 196static int __attribute__((noinline, not_tail_called))
813fb2f6 197recurse_b(dispatch_semaphore_t spinning, unsigned int frames)
39037602 198{
0a7de745
A
199 if (frames == 0) {
200 if (spinning) {
201 dispatch_semaphore_signal(spinning);
202 for (;;) {
203 ;
204 }
205 } else {
206 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
207 return 0;
208 }
209 }
210
211 return recurse_a(spinning, frames - 1) + 1;
39037602
A
212}
213
cb323159 214#define USER_FRAMES (12)
39037602
A
215
216#if defined(__x86_64__)
cb323159 217
39037602 218#define RECURSE_START_OFFSET (3)
cb323159
A
219
220#else /* defined(__x86_64__) */
221
222#define RECURSE_START_OFFSET (2)
223
224#endif /* !defined(__x86_64__) */
39037602
A
225
226static const char *user_bt[USER_FRAMES] = {
227#if defined(__x86_64__)
cb323159
A
228 /*
229 * x86_64 has an extra "thread_start" frame here.
230 */
0a7de745 231 NULL,
39037602 232#endif /* defined(__x86_64__) */
cb323159 233 NULL,
0a7de745
A
234 "backtrace_thread",
235 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
cb323159 236 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
39037602 237#if !defined(__x86_64__)
cb323159
A
238 /*
239 * Pick up the slack to make the number of frames constant.
240 */
241 "recurse_a",
39037602 242#endif /* !defined(__x86_64__) */
cb323159 243 NULL,
39037602
A
244};
245
5ba3f43e
A
246#if defined(__arm__)
247
248#define KERNEL_FRAMES (2)
249static const char *kernel_bt[KERNEL_FRAMES] = {
0a7de745 250 "unix_syscall", "kdebug_trace64"
5ba3f43e
A
251};
252
253#elif defined(__arm64__)
254
255#define KERNEL_FRAMES (4)
256static const char *kernel_bt[KERNEL_FRAMES] = {
0a7de745 257 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64"
5ba3f43e
A
258};
259
260#elif defined(__x86_64__)
39037602
A
261
262#define KERNEL_FRAMES (2)
263static const char *kernel_bt[KERNEL_FRAMES] = {
0a7de745 264 "unix_syscall64", "kdebug_trace64"
39037602
A
265};
266
267#else
268#error "architecture unsupported"
269#endif /* defined(__arm__) */
270
813fb2f6
A
271static dispatch_once_t backtrace_once;
272static dispatch_semaphore_t backtrace_started;
273static dispatch_semaphore_t backtrace_go;
39037602 274
813fb2f6
A
275/*
276 * Another thread to run with a known backtrace.
277 *
278 * Take a semaphore that will be signalled when the thread is spinning at the
279 * correct frame. If the semaphore is NULL, don't spin and instead make a
280 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
281 */
39037602
A
282static void *
283backtrace_thread(void *arg)
284{
0a7de745
A
285 dispatch_semaphore_t notify_spinning;
286 unsigned int calls;
287
288 notify_spinning = (dispatch_semaphore_t)arg;
289
290 dispatch_semaphore_signal(backtrace_started);
291 if (!notify_spinning) {
292 dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER);
293 }
294
295 /*
296 * backtrace_thread, recurse_a, recurse_b, ...[, __kdebug_trace64]
297 *
298 * Always make one less call for this frame (backtrace_thread).
299 */
300 calls = USER_FRAMES - RECURSE_START_OFFSET - 1 /* backtrace_thread */;
301 if (notify_spinning) {
302 /*
303 * Spinning doesn't end up calling __kdebug_trace64.
304 */
305 calls -= 1;
306 }
307
308 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
309 calls, RECURSE_START_OFFSET);
310 (void)recurse_a(notify_spinning, calls);
311 return NULL;
39037602
A
312}
313
314static uint64_t
cb323159
A
315create_backtrace_thread(void *(*thread_fn)(void *),
316 dispatch_semaphore_t notify_spinning)
39037602 317{
0a7de745
A
318 pthread_t thread = NULL;
319 uint64_t tid;
39037602 320
0a7de745
A
321 dispatch_once(&backtrace_once, ^{
322 backtrace_started = dispatch_semaphore_create(0);
323 T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL);
813fb2f6 324
0a7de745
A
325 if (!notify_spinning) {
326 backtrace_go = dispatch_semaphore_create(0);
327 T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL);
328 }
329 });
39037602 330
cb323159 331 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, thread_fn,
0a7de745
A
332 (void *)notify_spinning), NULL);
333 T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created");
334 dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER);
813fb2f6 335
0a7de745
A
336 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL);
337 T_QUIET; T_ASSERT_NE(tid, UINT64_C(0),
338 "backtrace thread created does not have ID 0");
813fb2f6 339
0a7de745 340 T_LOG("starting thread with ID 0x%" PRIx64, tid);
39037602 341
0a7de745 342 return tid;
39037602
A
343}
344
345static void
346start_backtrace_thread(void)
347{
0a7de745
A
348 T_QUIET; T_ASSERT_NOTNULL(backtrace_go,
349 "thread to backtrace created before starting it");
350 dispatch_semaphore_signal(backtrace_go);
39037602
A
351}
352
5ba3f43e
A
353#if TARGET_OS_WATCH
354#define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
355#else /* TARGET_OS_WATCH */
39037602 356#define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
5ba3f43e 357#endif /* !TARGET_OS_WATCH */
39037602 358
f427ee49 359T_DECL(kperf_stacks_kdebug_trig,
39037602 360 "test that backtraces from kdebug trigger are correct",
813fb2f6 361 T_META_ASROOT(true))
39037602 362{
0a7de745
A
363 static unsigned int stacks_seen = 0;
364 ktrace_session_t s;
365 kperf_kdebug_filter_t filter;
366 uint64_t tid;
367
cb323159
A
368 start_controlling_ktrace();
369
0a7de745
A
370 s = ktrace_session_create();
371 T_ASSERT_NOTNULL(s, "ktrace session was created");
372
cb323159
A
373 ktrace_set_collection_interval(s, 100);
374
0a7de745
A
375 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
376
cb323159 377 tid = create_backtrace_thread(backtrace_thread, NULL);
0a7de745
A
378 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0);
379 expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 0);
380
381 /*
382 * The triggering event must be traced (and thus registered with libktrace)
383 * to get backtraces.
384 */
385 ktrace_events_single(s, TRIGGERING_DEBUGID,
386 ^(__unused struct trace_point *tp){ });
387
388 ktrace_set_completion_handler(s, ^(void) {
389 T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks");
390 ktrace_session_destroy(s);
391 kperf_reset();
392 T_END;
393 });
394
395 filter = kperf_kdebug_filter_create();
396 T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created");
397
398 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter,
399 TRIGGERING_DEBUGID), NULL);
400 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
401 (void)kperf_action_count_set(1);
402 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
403 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL);
404 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
405 kperf_kdebug_filter_destroy(filter);
406
407 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
408
409 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
410
411 start_backtrace_thread();
412
413 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
414 dispatch_get_main_queue(), ^(void)
415 {
416 T_LOG("ending test after timeout");
417 ktrace_end(s, 0);
418 });
419
420 dispatch_main();
39037602
A
421}
422
f427ee49 423T_DECL(kperf_ustack_timer,
39037602 424 "test that user backtraces on a timer are correct",
813fb2f6 425 T_META_ASROOT(true))
39037602 426{
0a7de745
A
427 static unsigned int stacks_seen = 0;
428 ktrace_session_t s;
429 uint64_t tid;
430 dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0);
39037602 431
cb323159
A
432 start_controlling_ktrace();
433
0a7de745
A
434 s = ktrace_session_create();
435 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
39037602 436
cb323159
A
437 ktrace_set_collection_interval(s, 100);
438
0a7de745 439 ktrace_filter_pid(s, getpid());
39037602 440
f427ee49 441 configure_kperf_stacks_timer(getpid(), 10, false);
39037602 442
cb323159 443 tid = create_backtrace_thread(backtrace_thread, wait_for_spinning);
0a7de745
A
444 /* potentially calling dispatch function and system call */
445 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2);
39037602 446
0a7de745
A
447 ktrace_set_completion_handler(s, ^(void) {
448 T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack");
449 ktrace_session_destroy(s);
450 kperf_reset();
451 T_END;
452 });
39037602 453
0a7de745 454 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
39037602 455
0a7de745
A
456 /* wait until the thread that will be backtraced is spinning */
457 dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER);
39037602 458
0a7de745 459 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
39037602 460
0a7de745
A
461 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
462 dispatch_get_main_queue(), ^(void)
463 {
464 T_LOG("ending test after timeout");
465 ktrace_end(s, 0);
466 });
39037602 467
0a7de745 468 dispatch_main();
39037602
A
469}
470
cb323159
A
471static volatile bool spin = true;
472
473__attribute__((noinline, not_tail_called))
474static void
475recurse_spin(dispatch_semaphore_t notify_sema, int depth)
476{
477 if (depth > 0) {
478 recurse_spin(notify_sema, depth - 1);
479 } else {
480 dispatch_semaphore_signal(notify_sema);
481 while (spin);
482 }
483}
484
485static void *
486spin_thread(void *arg)
487{
488 dispatch_semaphore_t notify_sema = arg;
489 dispatch_semaphore_signal(backtrace_started);
490 recurse_spin(notify_sema, 257);
491 return NULL;
492}
493
f427ee49 494T_DECL(kperf_ustack_trunc, "ensure stacks are marked as truncated")
cb323159
A
495{
496 start_controlling_ktrace();
497
498 ktrace_session_t s = ktrace_session_create();
499 T_ASSERT_NOTNULL(s, "ktrace session was created");
500
501 ktrace_set_collection_interval(s, 100);
502
503 T_QUIET;
504 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
505
f427ee49 506 configure_kperf_stacks_timer(getpid(), 10, false);
cb323159
A
507
508 __block bool saw_stack = false;
509 ktrace_set_completion_handler(s, ^{
510 T_EXPECT_TRUE(saw_stack, "saw the user stack");
511 T_END;
512 });
513
514 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
515 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
516
517 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
518 if (tp->threadid != tid) {
519 return;
520 }
521 T_LOG("found %llu frame stack", tp->arg2);
522 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
523 "found valid callstack");
524 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_TRUNCATED,
525 "found truncated callstack");
526 saw_stack = true;
527 ktrace_end(s, 1);
528 });
529
530 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
531
532 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
533 "start tracing");
534
535 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
536 dispatch_get_main_queue(), ^(void)
537 {
538 T_LOG("ending test after timeout");
539 ktrace_end(s, 0);
540 });
541
542 dispatch_main();
543}
544
f427ee49 545T_DECL(kperf_ustack_maxlen, "ensure stacks up to 256 frames can be captured")
cb323159
A
546{
547 start_controlling_ktrace();
548
549 ktrace_session_t s = ktrace_session_create();
550 T_ASSERT_NOTNULL(s, "ktrace session was created");
551
552 ktrace_set_collection_interval(s, 100);
553
554 T_QUIET;
555 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
556
f427ee49 557 configure_kperf_stacks_timer(getpid(), 10, false);
cb323159
A
558
559 __block bool saw_stack = false;
560 __block bool saw_stack_data = false;
561 __block uint64_t nevents = 0;
562 ktrace_set_completion_handler(s, ^{
563 T_EXPECT_TRUE(saw_stack, "saw the user stack");
564 T_LOG("saw %" PRIu64 " stack data events", nevents);
565 T_EXPECT_TRUE(saw_stack_data, "saw all frames of the user stack");
566 T_END;
567 });
568
569 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
570 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
571
572 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
573 if (tp->threadid != tid) {
574 return;
575 }
576 T_LOG("found %llu frame stack", tp->arg2);
577 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
578 "found valid callstack");
579 T_EXPECT_EQ(tp->arg2, UINT64_C(256),
580 "found the correct number of frames");
581 saw_stack = true;
582 });
583
584 ktrace_events_single(s, PERF_STK_UDATA, ^(struct trace_point *tp) {
585 if (tp->threadid != tid && !saw_stack) {
586 return;
587 }
588 nevents++;
589 if (nevents == 256 / 4) {
590 ktrace_end(s, 1);
591 }
592 saw_stack_data = true;
593 });
594
595 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
596
597 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
598 "start tracing");
599
600 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
601 dispatch_get_main_queue(), ^(void)
602 {
603 T_LOG("ending test after timeout");
604 ktrace_end(s, 0);
605 });
606
607 dispatch_main();
608}
609
39037602 610/* TODO test kernel stacks in all modes */
813fb2f6 611/* TODO legacy PET mode backtracing */