5 #include <CoreSymbolication/CoreSymbolication.h>
6 #include <darwintest.h>
7 #include <dispatch/dispatch.h>
8 #include <kperf/kperf.h>
9 #include <ktrace/session.h>
10 #include <System/sys/kdebug.h>
13 #include "kperf_helpers.h"
15 #define PERF_STK_KHDR UINT32_C(0x25020014)
16 #define PERF_STK_UHDR UINT32_C(0x25020018)
17 #define PERF_STK_KDATA UINT32_C(0x2502000c)
18 #define PERF_STK_UDATA UINT32_C(0x25020010)
21 T_META_NAMESPACE("xnu.kperf"),
22 T_META_CHECK_LEAKS(false));
25 expect_frame(const char **bt
, unsigned int bt_len
, CSSymbolRef symbol
,
26 unsigned long addr
, unsigned int bt_idx
, unsigned int max_frames
)
29 unsigned int frame_idx
= max_frames
- bt_idx
- 1;
32 T_LOG("frame %2u: skipping system frame", frame_idx
);
36 if (CSIsNull(symbol
)) {
37 T_FAIL("invalid symbol for address %#lx at frame %d", addr
, frame_idx
);
41 if (frame_idx
>= bt_len
) {
42 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
43 CSSymbolGetName(symbol
), addr
, frame_idx
);
47 name
= CSSymbolGetName(symbol
);
48 T_QUIET
; T_ASSERT_NOTNULL(name
, NULL
);
49 T_EXPECT_EQ_STR(name
, bt
[frame_idx
],
50 "frame %2u: saw '%s', expected '%s'",
51 frame_idx
, name
, bt
[frame_idx
]);
55 * Expect to see either user or kernel stacks on thread with ID `tid` with a
56 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
59 * Can also allow stacks to be larger than the signature -- additional frames
60 * near the current PC will be ignored. This allows stacks to potentially be
61 * in the middle of a signalling system call (which signals that it is safe to
65 expect_backtrace(ktrace_session_t s
, uint64_t tid
, unsigned int *stacks_seen
,
66 bool kern
, const char **bt
, unsigned int bt_len
, unsigned int allow_larger_by
)
68 CSSymbolicatorRef symb
;
70 uint32_t data_debugid
;
71 __block
unsigned int stacks
= 0;
72 __block
unsigned int frames
= 0;
73 __block
unsigned int hdr_frames
= 0;
74 __block
unsigned int allow_larger
= allow_larger_by
;
77 static CSSymbolicatorRef kern_symb
;
78 static dispatch_once_t kern_symb_once
;
80 hdr_debugid
= PERF_STK_KHDR
;
81 data_debugid
= PERF_STK_KDATA
;
83 dispatch_once(&kern_symb_once
, ^(void) {
84 kern_symb
= CSSymbolicatorCreateWithMachKernel();
85 T_QUIET
; T_ASSERT_FALSE(CSIsNull(kern_symb
), NULL
);
89 static CSSymbolicatorRef user_symb
;
90 static dispatch_once_t user_symb_once
;
92 hdr_debugid
= PERF_STK_UHDR
;
93 data_debugid
= PERF_STK_UDATA
;
95 dispatch_once(&user_symb_once
, ^(void) {
96 user_symb
= CSSymbolicatorCreateWithTask(mach_task_self());
97 T_QUIET
; T_ASSERT_FALSE(CSIsNull(user_symb
), NULL
);
98 T_QUIET
; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb
), NULL
);
103 ktrace_events_single(s
, hdr_debugid
, ^(struct trace_point
*tp
) {
104 if (tid
!= 0 && tid
!= tp
->threadid
) {
108 T_LOG("found stack from thread %#lx", tp
->threadid
);
110 if (!(tp
->arg1
& 1)) {
111 T_FAIL("invalid %s stack on thread %#lx", kern
? "kernel" : "user",
116 hdr_frames
= (unsigned int)tp
->arg2
;
117 /* ignore extra link register or value pointed to by stack pointer */
120 T_QUIET
; T_EXPECT_GE(hdr_frames
, bt_len
,
121 "number of frames in header");
122 T_QUIET
; T_EXPECT_LE(hdr_frames
, bt_len
+ allow_larger
,
123 "number of frames in header");
124 if (hdr_frames
> bt_len
&& allow_larger
> 0) {
125 allow_larger
= hdr_frames
- bt_len
;
129 T_LOG("%s stack seen", kern
? "kernel" : "user");
133 ktrace_events_single(s
, data_debugid
, ^(struct trace_point
*tp
) {
134 if (tid
!= 0 && tid
!= tp
->threadid
) {
140 if (frames
== 0 && hdr_frames
> bt_len
) {
141 /* skip frames near the PC */
142 i
= (int)allow_larger
;
146 for (; i
< 4 && frames
< hdr_frames
; i
++, frames
++) {
147 unsigned long addr
= (&tp
->arg1
)[i
];
148 CSSymbolRef symbol
= CSSymbolicatorGetSymbolWithAddressAtTime(
151 expect_frame(bt
, bt_len
, symbol
, addr
, frames
, hdr_frames
);
154 /* saw the end of the user stack */
155 if (hdr_frames
== frames
) {
164 #define TRIGGERING_DEBUGID (0xfeff0f00)
167 * These functions must return an int to avoid the function prologue being
168 * hoisted out of the path to the spin (breaking being able to get a good
171 static int __attribute__((noinline
, not_tail_called
))
172 recurse_a(dispatch_semaphore_t spinning
, unsigned int frames
);
173 static int __attribute__((noinline
, not_tail_called
))
174 recurse_b(dispatch_semaphore_t spinning
, unsigned int frames
);
176 static int __attribute__((noinline
, not_tail_called
))
177 recurse_a(dispatch_semaphore_t spinning
, unsigned int frames
)
181 dispatch_semaphore_signal(spinning
);
186 kdebug_trace(TRIGGERING_DEBUGID
, 0, 0, 0, 0);
191 return recurse_b(spinning
, frames
- 1) + 1;
194 static int __attribute__((noinline
, not_tail_called
))
195 recurse_b(dispatch_semaphore_t spinning
, unsigned int frames
)
199 dispatch_semaphore_signal(spinning
);
204 kdebug_trace(TRIGGERING_DEBUGID
, 0, 0, 0, 0);
209 return recurse_a(spinning
, frames
- 1) + 1;
212 #define USER_FRAMES (12)
214 #if defined(__x86_64__)
215 #define RECURSE_START_OFFSET (4)
216 #else /* defined(__x86_64__) */
217 #define RECURSE_START_OFFSET (3)
218 #endif /* defined(__x86_64__) */
220 static const char *user_bt
[USER_FRAMES
] = {
221 #if defined(__x86_64__)
223 #endif /* defined(__x86_64__) */
226 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
227 "recurse_a", "recurse_b", "recurse_a",
228 #if !defined(__x86_64__)
230 #endif /* !defined(__x86_64__) */
236 #define KERNEL_FRAMES (2)
237 static const char *kernel_bt
[KERNEL_FRAMES
] = {
238 "unix_syscall", "kdebug_trace64"
241 #elif defined(__arm64__)
243 #define KERNEL_FRAMES (4)
244 static const char *kernel_bt
[KERNEL_FRAMES
] = {
245 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64"
248 #elif defined(__x86_64__)
250 #define KERNEL_FRAMES (2)
251 static const char *kernel_bt
[KERNEL_FRAMES
] = {
252 "unix_syscall64", "kdebug_trace64"
256 #error "architecture unsupported"
257 #endif /* defined(__arm__) */
259 static dispatch_once_t backtrace_once
;
260 static dispatch_semaphore_t backtrace_started
;
261 static dispatch_semaphore_t backtrace_go
;
264 * Another thread to run with a known backtrace.
266 * Take a semaphore that will be signalled when the thread is spinning at the
267 * correct frame. If the semaphore is NULL, don't spin and instead make a
268 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
271 backtrace_thread(void *arg
)
273 dispatch_semaphore_t notify_spinning
;
276 notify_spinning
= (dispatch_semaphore_t
)arg
;
278 dispatch_semaphore_signal(backtrace_started
);
279 if (!notify_spinning
) {
280 dispatch_semaphore_wait(backtrace_go
, DISPATCH_TIME_FOREVER
);
284 * backtrace_thread, recurse_a, recurse_b, ...[, __kdebug_trace64]
286 * Always make one less call for this frame (backtrace_thread).
288 calls
= USER_FRAMES
- RECURSE_START_OFFSET
- 1 /* backtrace_thread */;
289 if (notify_spinning
) {
291 * Spinning doesn't end up calling __kdebug_trace64.
296 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
297 calls
, RECURSE_START_OFFSET
);
298 (void)recurse_a(notify_spinning
, calls
);
303 create_backtrace_thread(dispatch_semaphore_t notify_spinning
)
305 pthread_t thread
= NULL
;
308 dispatch_once(&backtrace_once
, ^{
309 backtrace_started
= dispatch_semaphore_create(0);
310 T_QUIET
; T_ASSERT_NOTNULL(backtrace_started
, NULL
);
312 if (!notify_spinning
) {
313 backtrace_go
= dispatch_semaphore_create(0);
314 T_QUIET
; T_ASSERT_NOTNULL(backtrace_go
, NULL
);
318 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_create(&thread
, NULL
, backtrace_thread
,
319 (void *)notify_spinning
), NULL
);
320 T_QUIET
; T_ASSERT_NOTNULL(thread
, "backtrace thread created");
321 dispatch_semaphore_wait(backtrace_started
, DISPATCH_TIME_FOREVER
);
323 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread
, &tid
), NULL
);
324 T_QUIET
; T_ASSERT_NE(tid
, UINT64_C(0),
325 "backtrace thread created does not have ID 0");
327 T_LOG("starting thread with ID 0x%" PRIx64
, tid
);
333 start_backtrace_thread(void)
335 T_QUIET
; T_ASSERT_NOTNULL(backtrace_go
,
336 "thread to backtrace created before starting it");
337 dispatch_semaphore_signal(backtrace_go
);
341 #define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
342 #else /* TARGET_OS_WATCH */
343 #define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
344 #endif /* !TARGET_OS_WATCH */
346 T_DECL(backtraces_kdebug_trigger
,
347 "test that backtraces from kdebug trigger are correct",
350 static unsigned int stacks_seen
= 0;
352 kperf_kdebug_filter_t filter
;
355 s
= ktrace_session_create();
356 T_ASSERT_NOTNULL(s
, "ktrace session was created");
358 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()), NULL
);
360 tid
= create_backtrace_thread(NULL
);
361 expect_backtrace(s
, tid
, &stacks_seen
, false, user_bt
, USER_FRAMES
, 0);
362 expect_backtrace(s
, tid
, &stacks_seen
, true, kernel_bt
, KERNEL_FRAMES
, 0);
365 * The triggering event must be traced (and thus registered with libktrace)
368 ktrace_events_single(s
, TRIGGERING_DEBUGID
,
369 ^(__unused
struct trace_point
*tp
){ });
371 ktrace_set_completion_handler(s
, ^(void) {
372 T_EXPECT_GE(stacks_seen
, 2U, "saw both kernel and user stacks");
373 ktrace_session_destroy(s
);
378 filter
= kperf_kdebug_filter_create();
379 T_ASSERT_NOTNULL(filter
, "kperf kdebug filter was created");
381 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter
,
382 TRIGGERING_DEBUGID
), NULL
);
383 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter
), NULL
);
384 (void)kperf_action_count_set(1);
385 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
386 KPERF_SAMPLER_USTACK
| KPERF_SAMPLER_KSTACK
), NULL
);
387 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL
);
388 kperf_kdebug_filter_destroy(filter
);
390 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
392 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
394 start_backtrace_thread();
396 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, TEST_TIMEOUT_NS
),
397 dispatch_get_main_queue(), ^(void)
399 T_LOG("ending test after timeout");
406 T_DECL(backtraces_user_timer
,
407 "test that user backtraces on a timer are correct",
410 static unsigned int stacks_seen
= 0;
413 dispatch_semaphore_t wait_for_spinning
= dispatch_semaphore_create(0);
415 s
= ktrace_session_create();
416 T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
418 ktrace_filter_pid(s
, getpid());
420 configure_kperf_stacks_timer(getpid(), 10);
422 tid
= create_backtrace_thread(wait_for_spinning
);
423 /* potentially calling dispatch function and system call */
424 expect_backtrace(s
, tid
, &stacks_seen
, false, user_bt
, USER_FRAMES
- 1, 2);
426 ktrace_set_completion_handler(s
, ^(void) {
427 T_EXPECT_GE(stacks_seen
, 1U, "saw at least one stack");
428 ktrace_session_destroy(s
);
433 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
435 /* wait until the thread that will be backtraced is spinning */
436 dispatch_semaphore_wait(wait_for_spinning
, DISPATCH_TIME_FOREVER
);
438 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
440 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, TEST_TIMEOUT_NS
),
441 dispatch_get_main_queue(), ^(void)
443 T_LOG("ending test after timeout");
450 /* TODO test kernel stacks in all modes */
451 /* TODO legacy PET mode backtracing */
452 /* TODO test deep stacks, further than 128 frames, make sure they are truncated */
453 /* TODO test constrained stacks */