]> git.saurik.com Git - apple/xnu.git/blame - tests/kperf_backtracing.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / tests / kperf_backtracing.c
CommitLineData
5ba3f43e
A
1#ifdef T_NAMESPACE
2#undef T_NAMESPACE
3#endif
4
39037602
A
5#include <CoreSymbolication/CoreSymbolication.h>
6#include <darwintest.h>
7#include <dispatch/dispatch.h>
8#include <kperf/kperf.h>
5ba3f43e 9#include <ktrace/session.h>
cb323159 10#include <ktrace/private.h>
5ba3f43e 11#include <System/sys/kdebug.h>
39037602
A
12#include <pthread.h>
13
14#include "kperf_helpers.h"
cb323159 15#include "ktrace_helpers.h"
39037602
A
16
17#define PERF_STK_KHDR UINT32_C(0x25020014)
18#define PERF_STK_UHDR UINT32_C(0x25020018)
19#define PERF_STK_KDATA UINT32_C(0x2502000c)
20#define PERF_STK_UDATA UINT32_C(0x25020010)
21
cb323159
A
22#define CALLSTACK_VALID 0x1
23#define CALLSTACK_TRUNCATED 0x10
24
5ba3f43e 25T_GLOBAL_META(
cb323159 26 T_META_NAMESPACE("xnu.ktrace"),
0a7de745 27 T_META_CHECK_LEAKS(false));
5ba3f43e 28
39037602
A
29static void
30expect_frame(const char **bt, unsigned int bt_len, CSSymbolRef symbol,
31 unsigned long addr, unsigned int bt_idx, unsigned int max_frames)
32{
0a7de745
A
33 const char *name;
34 unsigned int frame_idx = max_frames - bt_idx - 1;
35
36 if (!bt[frame_idx]) {
cb323159
A
37 T_LOG("frame %2u: skipping system frame '%s'", frame_idx,
38 CSSymbolGetName(symbol));
0a7de745
A
39 return;
40 }
41
42 if (CSIsNull(symbol)) {
cb323159
A
43 T_FAIL("invalid symbol for address %#lx at frame %d", addr,
44 frame_idx);
0a7de745
A
45 return;
46 }
47
48 if (frame_idx >= bt_len) {
49 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
50 CSSymbolGetName(symbol), addr, frame_idx);
51 return;
52 }
53
54 name = CSSymbolGetName(symbol);
55 T_QUIET; T_ASSERT_NOTNULL(name, NULL);
56 T_EXPECT_EQ_STR(name, bt[frame_idx],
57 "frame %2u: saw '%s', expected '%s'",
58 frame_idx, name, bt[frame_idx]);
39037602
A
59}
60
61/*
813fb2f6
A
62 * Expect to see either user or kernel stacks on thread with ID `tid` with a
63 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
64 * is found.
65 *
66 * Can also allow stacks to be larger than the signature -- additional frames
67 * near the current PC will be ignored. This allows stacks to potentially be
68 * in the middle of a signalling system call (which signals that it is safe to
69 * start sampling).
39037602
A
70 */
71static void
72expect_backtrace(ktrace_session_t s, uint64_t tid, unsigned int *stacks_seen,
813fb2f6 73 bool kern, const char **bt, unsigned int bt_len, unsigned int allow_larger_by)
39037602 74{
0a7de745
A
75 CSSymbolicatorRef symb;
76 uint32_t hdr_debugid;
77 uint32_t data_debugid;
78 __block unsigned int stacks = 0;
79 __block unsigned int frames = 0;
80 __block unsigned int hdr_frames = 0;
81 __block unsigned int allow_larger = allow_larger_by;
82
83 if (kern) {
84 static CSSymbolicatorRef kern_symb;
85 static dispatch_once_t kern_symb_once;
86
87 hdr_debugid = PERF_STK_KHDR;
88 data_debugid = PERF_STK_KDATA;
89
90 dispatch_once(&kern_symb_once, ^(void) {
91 kern_symb = CSSymbolicatorCreateWithMachKernel();
92 T_QUIET; T_ASSERT_FALSE(CSIsNull(kern_symb), NULL);
93 });
94 symb = kern_symb;
95 } else {
96 static CSSymbolicatorRef user_symb;
97 static dispatch_once_t user_symb_once;
98
99 hdr_debugid = PERF_STK_UHDR;
100 data_debugid = PERF_STK_UDATA;
101
102 dispatch_once(&user_symb_once, ^(void) {
103 user_symb = CSSymbolicatorCreateWithTask(mach_task_self());
104 T_QUIET; T_ASSERT_FALSE(CSIsNull(user_symb), NULL);
105 T_QUIET; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb), NULL);
106 });
107 symb = user_symb;
108 }
109
110 ktrace_events_single(s, hdr_debugid, ^(struct trace_point *tp) {
111 if (tid != 0 && tid != tp->threadid) {
112 return;
113 }
114
cb323159 115 T_LOG("found stack from thread %#" PRIx64, tp->threadid);
0a7de745
A
116 stacks++;
117 if (!(tp->arg1 & 1)) {
cb323159
A
118 T_FAIL("invalid %s stack on thread %#" PRIx64,
119 kern ? "kernel" : "user", tp->threadid);
0a7de745
A
120 return;
121 }
122
123 hdr_frames = (unsigned int)tp->arg2;
124 /* ignore extra link register or value pointed to by stack pointer */
125 hdr_frames -= 1;
126
127 T_QUIET; T_EXPECT_GE(hdr_frames, bt_len,
128 "number of frames in header");
129 T_QUIET; T_EXPECT_LE(hdr_frames, bt_len + allow_larger,
130 "number of frames in header");
131 if (hdr_frames > bt_len && allow_larger > 0) {
132 allow_larger = hdr_frames - bt_len;
133 hdr_frames = bt_len;
134 }
135
136 T_LOG("%s stack seen", kern ? "kernel" : "user");
137 frames = 0;
138 });
139
140 ktrace_events_single(s, data_debugid, ^(struct trace_point *tp) {
141 if (tid != 0 && tid != tp->threadid) {
142 return;
143 }
144
145 int i = 0;
146
147 if (frames == 0 && hdr_frames > bt_len) {
148 /* skip frames near the PC */
149 i = (int)allow_larger;
150 allow_larger -= 4;
151 }
152
153 for (; i < 4 && frames < hdr_frames; i++, frames++) {
154 unsigned long addr = (&tp->arg1)[i];
155 CSSymbolRef symbol = CSSymbolicatorGetSymbolWithAddressAtTime(
156 symb, addr, kCSNow);
157
158 expect_frame(bt, bt_len, symbol, addr, frames, hdr_frames);
159 }
160
161 /* saw the end of the user stack */
162 if (hdr_frames == frames) {
163 *stacks_seen += 1;
164 if (!kern) {
165 ktrace_end(s, 1);
166 }
167 }
168 });
39037602
A
169}
170
171#define TRIGGERING_DEBUGID (0xfeff0f00)
172
173/*
174 * These functions must return an int to avoid the function prologue being
175 * hoisted out of the path to the spin (breaking being able to get a good
176 * backtrace).
177 */
0a7de745 178static int __attribute__((noinline, not_tail_called))
813fb2f6 179recurse_a(dispatch_semaphore_t spinning, unsigned int frames);
0a7de745 180static int __attribute__((noinline, not_tail_called))
813fb2f6 181recurse_b(dispatch_semaphore_t spinning, unsigned int frames);
39037602 182
0a7de745 183static int __attribute__((noinline, not_tail_called))
813fb2f6 184recurse_a(dispatch_semaphore_t spinning, unsigned int frames)
39037602 185{
0a7de745
A
186 if (frames == 0) {
187 if (spinning) {
188 dispatch_semaphore_signal(spinning);
189 for (;;) {
190 ;
191 }
192 } else {
193 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
194 return 0;
195 }
196 }
197
198 return recurse_b(spinning, frames - 1) + 1;
39037602
A
199}
200
0a7de745 201static int __attribute__((noinline, not_tail_called))
813fb2f6 202recurse_b(dispatch_semaphore_t spinning, unsigned int frames)
39037602 203{
0a7de745
A
204 if (frames == 0) {
205 if (spinning) {
206 dispatch_semaphore_signal(spinning);
207 for (;;) {
208 ;
209 }
210 } else {
211 kdebug_trace(TRIGGERING_DEBUGID, 0, 0, 0, 0);
212 return 0;
213 }
214 }
215
216 return recurse_a(spinning, frames - 1) + 1;
39037602
A
217}
218
cb323159 219#define USER_FRAMES (12)
39037602
A
220
221#if defined(__x86_64__)
cb323159 222
39037602 223#define RECURSE_START_OFFSET (3)
cb323159
A
224
225#else /* defined(__x86_64__) */
226
227#define RECURSE_START_OFFSET (2)
228
229#endif /* !defined(__x86_64__) */
39037602
A
230
231static const char *user_bt[USER_FRAMES] = {
232#if defined(__x86_64__)
cb323159
A
233 /*
234 * x86_64 has an extra "thread_start" frame here.
235 */
0a7de745 236 NULL,
39037602 237#endif /* defined(__x86_64__) */
cb323159 238 NULL,
0a7de745
A
239 "backtrace_thread",
240 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
cb323159 241 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
39037602 242#if !defined(__x86_64__)
cb323159
A
243 /*
244 * Pick up the slack to make the number of frames constant.
245 */
246 "recurse_a",
39037602 247#endif /* !defined(__x86_64__) */
cb323159 248 NULL,
39037602
A
249};
250
5ba3f43e
A
251#if defined(__arm__)
252
253#define KERNEL_FRAMES (2)
254static const char *kernel_bt[KERNEL_FRAMES] = {
0a7de745 255 "unix_syscall", "kdebug_trace64"
5ba3f43e
A
256};
257
258#elif defined(__arm64__)
259
260#define KERNEL_FRAMES (4)
261static const char *kernel_bt[KERNEL_FRAMES] = {
0a7de745 262 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64"
5ba3f43e
A
263};
264
265#elif defined(__x86_64__)
39037602
A
266
267#define KERNEL_FRAMES (2)
268static const char *kernel_bt[KERNEL_FRAMES] = {
0a7de745 269 "unix_syscall64", "kdebug_trace64"
39037602
A
270};
271
272#else
273#error "architecture unsupported"
274#endif /* defined(__arm__) */
275
813fb2f6
A
276static dispatch_once_t backtrace_once;
277static dispatch_semaphore_t backtrace_started;
278static dispatch_semaphore_t backtrace_go;
39037602 279
813fb2f6
A
280/*
281 * Another thread to run with a known backtrace.
282 *
283 * Take a semaphore that will be signalled when the thread is spinning at the
284 * correct frame. If the semaphore is NULL, don't spin and instead make a
285 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
286 */
39037602
A
287static void *
288backtrace_thread(void *arg)
289{
0a7de745
A
290 dispatch_semaphore_t notify_spinning;
291 unsigned int calls;
292
293 notify_spinning = (dispatch_semaphore_t)arg;
294
295 dispatch_semaphore_signal(backtrace_started);
296 if (!notify_spinning) {
297 dispatch_semaphore_wait(backtrace_go, DISPATCH_TIME_FOREVER);
298 }
299
300 /*
301 * backtrace_thread, recurse_a, recurse_b, ...[, __kdebug_trace64]
302 *
303 * Always make one less call for this frame (backtrace_thread).
304 */
305 calls = USER_FRAMES - RECURSE_START_OFFSET - 1 /* backtrace_thread */;
306 if (notify_spinning) {
307 /*
308 * Spinning doesn't end up calling __kdebug_trace64.
309 */
310 calls -= 1;
311 }
312
313 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
314 calls, RECURSE_START_OFFSET);
315 (void)recurse_a(notify_spinning, calls);
316 return NULL;
39037602
A
317}
318
319static uint64_t
cb323159
A
320create_backtrace_thread(void *(*thread_fn)(void *),
321 dispatch_semaphore_t notify_spinning)
39037602 322{
0a7de745
A
323 pthread_t thread = NULL;
324 uint64_t tid;
39037602 325
0a7de745
A
326 dispatch_once(&backtrace_once, ^{
327 backtrace_started = dispatch_semaphore_create(0);
328 T_QUIET; T_ASSERT_NOTNULL(backtrace_started, NULL);
813fb2f6 329
0a7de745
A
330 if (!notify_spinning) {
331 backtrace_go = dispatch_semaphore_create(0);
332 T_QUIET; T_ASSERT_NOTNULL(backtrace_go, NULL);
333 }
334 });
39037602 335
cb323159 336 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_create(&thread, NULL, thread_fn,
0a7de745
A
337 (void *)notify_spinning), NULL);
338 T_QUIET; T_ASSERT_NOTNULL(thread, "backtrace thread created");
339 dispatch_semaphore_wait(backtrace_started, DISPATCH_TIME_FOREVER);
813fb2f6 340
0a7de745
A
341 T_QUIET; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread, &tid), NULL);
342 T_QUIET; T_ASSERT_NE(tid, UINT64_C(0),
343 "backtrace thread created does not have ID 0");
813fb2f6 344
0a7de745 345 T_LOG("starting thread with ID 0x%" PRIx64, tid);
39037602 346
0a7de745 347 return tid;
39037602
A
348}
349
350static void
351start_backtrace_thread(void)
352{
0a7de745
A
353 T_QUIET; T_ASSERT_NOTNULL(backtrace_go,
354 "thread to backtrace created before starting it");
355 dispatch_semaphore_signal(backtrace_go);
39037602
A
356}
357
5ba3f43e
A
358#if TARGET_OS_WATCH
359#define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
360#else /* TARGET_OS_WATCH */
39037602 361#define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
5ba3f43e 362#endif /* !TARGET_OS_WATCH */
39037602 363
cb323159 364T_DECL(kdebug_trigger,
39037602 365 "test that backtraces from kdebug trigger are correct",
813fb2f6 366 T_META_ASROOT(true))
39037602 367{
0a7de745
A
368 static unsigned int stacks_seen = 0;
369 ktrace_session_t s;
370 kperf_kdebug_filter_t filter;
371 uint64_t tid;
372
cb323159
A
373 start_controlling_ktrace();
374
0a7de745
A
375 s = ktrace_session_create();
376 T_ASSERT_NOTNULL(s, "ktrace session was created");
377
cb323159
A
378 ktrace_set_collection_interval(s, 100);
379
0a7de745
A
380 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
381
cb323159 382 tid = create_backtrace_thread(backtrace_thread, NULL);
0a7de745
A
383 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES, 0);
384 expect_backtrace(s, tid, &stacks_seen, true, kernel_bt, KERNEL_FRAMES, 0);
385
386 /*
387 * The triggering event must be traced (and thus registered with libktrace)
388 * to get backtraces.
389 */
390 ktrace_events_single(s, TRIGGERING_DEBUGID,
391 ^(__unused struct trace_point *tp){ });
392
393 ktrace_set_completion_handler(s, ^(void) {
394 T_EXPECT_GE(stacks_seen, 2U, "saw both kernel and user stacks");
395 ktrace_session_destroy(s);
396 kperf_reset();
397 T_END;
398 });
399
400 filter = kperf_kdebug_filter_create();
401 T_ASSERT_NOTNULL(filter, "kperf kdebug filter was created");
402
403 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter,
404 TRIGGERING_DEBUGID), NULL);
405 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
406 (void)kperf_action_count_set(1);
407 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
408 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_KSTACK), NULL);
409 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
410 kperf_kdebug_filter_destroy(filter);
411
412 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
413
414 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
415
416 start_backtrace_thread();
417
418 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
419 dispatch_get_main_queue(), ^(void)
420 {
421 T_LOG("ending test after timeout");
422 ktrace_end(s, 0);
423 });
424
425 dispatch_main();
39037602
A
426}
427
cb323159 428T_DECL(user_timer,
39037602 429 "test that user backtraces on a timer are correct",
813fb2f6 430 T_META_ASROOT(true))
39037602 431{
0a7de745
A
432 static unsigned int stacks_seen = 0;
433 ktrace_session_t s;
434 uint64_t tid;
435 dispatch_semaphore_t wait_for_spinning = dispatch_semaphore_create(0);
39037602 436
cb323159
A
437 start_controlling_ktrace();
438
0a7de745
A
439 s = ktrace_session_create();
440 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
39037602 441
cb323159
A
442 ktrace_set_collection_interval(s, 100);
443
0a7de745 444 ktrace_filter_pid(s, getpid());
39037602 445
0a7de745 446 configure_kperf_stacks_timer(getpid(), 10);
39037602 447
cb323159 448 tid = create_backtrace_thread(backtrace_thread, wait_for_spinning);
0a7de745
A
449 /* potentially calling dispatch function and system call */
450 expect_backtrace(s, tid, &stacks_seen, false, user_bt, USER_FRAMES - 1, 2);
39037602 451
0a7de745
A
452 ktrace_set_completion_handler(s, ^(void) {
453 T_EXPECT_GE(stacks_seen, 1U, "saw at least one stack");
454 ktrace_session_destroy(s);
455 kperf_reset();
456 T_END;
457 });
39037602 458
0a7de745 459 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
39037602 460
0a7de745
A
461 /* wait until the thread that will be backtraced is spinning */
462 dispatch_semaphore_wait(wait_for_spinning, DISPATCH_TIME_FOREVER);
39037602 463
0a7de745 464 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
39037602 465
0a7de745
A
466 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
467 dispatch_get_main_queue(), ^(void)
468 {
469 T_LOG("ending test after timeout");
470 ktrace_end(s, 0);
471 });
39037602 472
0a7de745 473 dispatch_main();
39037602
A
474}
475
cb323159
A
476static volatile bool spin = true;
477
478__attribute__((noinline, not_tail_called))
479static void
480recurse_spin(dispatch_semaphore_t notify_sema, int depth)
481{
482 if (depth > 0) {
483 recurse_spin(notify_sema, depth - 1);
484 } else {
485 dispatch_semaphore_signal(notify_sema);
486 while (spin);
487 }
488}
489
490static void *
491spin_thread(void *arg)
492{
493 dispatch_semaphore_t notify_sema = arg;
494 dispatch_semaphore_signal(backtrace_started);
495 recurse_spin(notify_sema, 257);
496 return NULL;
497}
498
499T_DECL(truncated_user_stacks, "ensure stacks are marked as truncated")
500{
501 start_controlling_ktrace();
502
503 ktrace_session_t s = ktrace_session_create();
504 T_ASSERT_NOTNULL(s, "ktrace session was created");
505
506 ktrace_set_collection_interval(s, 100);
507
508 T_QUIET;
509 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
510
511 configure_kperf_stacks_timer(getpid(), 10);
512
513 __block bool saw_stack = false;
514 ktrace_set_completion_handler(s, ^{
515 T_EXPECT_TRUE(saw_stack, "saw the user stack");
516 T_END;
517 });
518
519 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
520 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
521
522 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
523 if (tp->threadid != tid) {
524 return;
525 }
526 T_LOG("found %llu frame stack", tp->arg2);
527 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
528 "found valid callstack");
529 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_TRUNCATED,
530 "found truncated callstack");
531 saw_stack = true;
532 ktrace_end(s, 1);
533 });
534
535 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
536
537 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
538 "start tracing");
539
540 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
541 dispatch_get_main_queue(), ^(void)
542 {
543 T_LOG("ending test after timeout");
544 ktrace_end(s, 0);
545 });
546
547 dispatch_main();
548}
549
550T_DECL(max_user_stacks, "ensure stacks up to 256 frames can be captured")
551{
552 start_controlling_ktrace();
553
554 ktrace_session_t s = ktrace_session_create();
555 T_ASSERT_NOTNULL(s, "ktrace session was created");
556
557 ktrace_set_collection_interval(s, 100);
558
559 T_QUIET;
560 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
561
562 configure_kperf_stacks_timer(getpid(), 10);
563
564 __block bool saw_stack = false;
565 __block bool saw_stack_data = false;
566 __block uint64_t nevents = 0;
567 ktrace_set_completion_handler(s, ^{
568 T_EXPECT_TRUE(saw_stack, "saw the user stack");
569 T_LOG("saw %" PRIu64 " stack data events", nevents);
570 T_EXPECT_TRUE(saw_stack_data, "saw all frames of the user stack");
571 T_END;
572 });
573
574 dispatch_semaphore_t notify_sema = dispatch_semaphore_create(0);
575 uint64_t tid = create_backtrace_thread(spin_thread, notify_sema);
576
577 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
578 if (tp->threadid != tid) {
579 return;
580 }
581 T_LOG("found %llu frame stack", tp->arg2);
582 T_EXPECT_BITS_SET(tp->arg1, CALLSTACK_VALID,
583 "found valid callstack");
584 T_EXPECT_EQ(tp->arg2, UINT64_C(256),
585 "found the correct number of frames");
586 saw_stack = true;
587 });
588
589 ktrace_events_single(s, PERF_STK_UDATA, ^(struct trace_point *tp) {
590 if (tp->threadid != tid && !saw_stack) {
591 return;
592 }
593 nevents++;
594 if (nevents == 256 / 4) {
595 ktrace_end(s, 1);
596 }
597 saw_stack_data = true;
598 });
599
600 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
601
602 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
603 "start tracing");
604
605 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TEST_TIMEOUT_NS),
606 dispatch_get_main_queue(), ^(void)
607 {
608 T_LOG("ending test after timeout");
609 ktrace_end(s, 0);
610 });
611
612 dispatch_main();
613}
614
39037602 615/* TODO test kernel stacks in all modes */
813fb2f6 616/* TODO legacy PET mode backtracing */