5 #include <CoreSymbolication/CoreSymbolication.h>
6 #include <darwintest.h>
7 #include <dispatch/dispatch.h>
8 #include <kperf/kperf.h>
9 #include <ktrace/session.h>
10 #include <System/sys/kdebug.h>
13 #include "kperf_helpers.h"
15 #define PERF_STK_KHDR UINT32_C(0x25020014)
16 #define PERF_STK_UHDR UINT32_C(0x25020018)
17 #define PERF_STK_KDATA UINT32_C(0x2502000c)
18 #define PERF_STK_UDATA UINT32_C(0x25020010)
21 T_META_NAMESPACE("xnu.kperf"),
22 T_META_CHECK_LEAKS(false));
25 expect_frame(const char **bt
, unsigned int bt_len
, CSSymbolRef symbol
,
26 unsigned long addr
, unsigned int bt_idx
, unsigned int max_frames
)
29 unsigned int frame_idx
= max_frames
- bt_idx
- 1;
32 T_LOG("frame %2u: skipping system frame", frame_idx
);
36 if (CSIsNull(symbol
)) {
37 T_FAIL("invalid symbol for address %#lx at frame %d", addr
, frame_idx
);
41 if (frame_idx
>= bt_len
) {
42 T_FAIL("unexpected frame '%s' (%#lx) at index %u",
43 CSSymbolGetName(symbol
), addr
, frame_idx
);
47 name
= CSSymbolGetName(symbol
);
48 T_QUIET
; T_ASSERT_NOTNULL(name
, NULL
);
49 T_EXPECT_EQ_STR(name
, bt
[frame_idx
],
50 "frame %2u: saw '%s', expected '%s'",
51 frame_idx
, name
, bt
[frame_idx
]);
55 * Expect to see either user or kernel stacks on thread with ID `tid` with a
56 * signature of `bt` of length `bt_len`. Updates `stacks_seen` when stack
59 * Can also allow stacks to be larger than the signature -- additional frames
60 * near the current PC will be ignored. This allows stacks to potentially be
61 * in the middle of a signalling system call (which signals that it is safe to
65 expect_backtrace(ktrace_session_t s
, uint64_t tid
, unsigned int *stacks_seen
,
66 bool kern
, const char **bt
, unsigned int bt_len
, unsigned int allow_larger_by
)
68 CSSymbolicatorRef symb
;
70 uint32_t data_debugid
;
71 __block
unsigned int stacks
= 0;
72 __block
unsigned int frames
= 0;
73 __block
unsigned int hdr_frames
= 0;
74 __block
unsigned int allow_larger
= allow_larger_by
;
77 static CSSymbolicatorRef kern_symb
;
78 static dispatch_once_t kern_symb_once
;
80 hdr_debugid
= PERF_STK_KHDR
;
81 data_debugid
= PERF_STK_KDATA
;
83 dispatch_once(&kern_symb_once
, ^(void) {
84 kern_symb
= CSSymbolicatorCreateWithMachKernel();
85 T_QUIET
; T_ASSERT_FALSE(CSIsNull(kern_symb
), NULL
);
89 static CSSymbolicatorRef user_symb
;
90 static dispatch_once_t user_symb_once
;
92 hdr_debugid
= PERF_STK_UHDR
;
93 data_debugid
= PERF_STK_UDATA
;
95 dispatch_once(&user_symb_once
, ^(void) {
96 user_symb
= CSSymbolicatorCreateWithTask(mach_task_self());
97 T_QUIET
; T_ASSERT_FALSE(CSIsNull(user_symb
), NULL
);
98 T_QUIET
; T_ASSERT_TRUE(CSSymbolicatorIsTaskValid(user_symb
), NULL
);
103 ktrace_events_single(s
, hdr_debugid
, ^(struct trace_point
*tp
) {
104 if (tid
!= 0 && tid
!= tp
->threadid
) {
108 T_LOG("found stack from thread %#lx", tp
->threadid
);
110 if (!(tp
->arg1
& 1)) {
111 T_FAIL("invalid %s stack on thread %#lx", kern
? "kernel" : "user",
116 hdr_frames
= (unsigned int)tp
->arg2
;
117 /* ignore extra link register or value pointed to by stack pointer */
120 T_QUIET
; T_EXPECT_GE(hdr_frames
, bt_len
,
121 "number of frames in header");
122 T_QUIET
; T_EXPECT_LE(hdr_frames
, bt_len
+ allow_larger
,
123 "number of frames in header");
124 if (hdr_frames
> bt_len
&& allow_larger
> 0) {
125 allow_larger
= hdr_frames
- bt_len
;
129 T_LOG("%s stack seen", kern
? "kernel" : "user");
133 ktrace_events_single(s
, data_debugid
, ^(struct trace_point
*tp
) {
134 if (tid
!= 0 && tid
!= tp
->threadid
) {
140 if (frames
== 0 && hdr_frames
> bt_len
) {
141 /* skip frames near the PC */
142 i
= (int)allow_larger
;
146 for (; i
< 4 && frames
< hdr_frames
; i
++, frames
++) {
147 unsigned long addr
= (&tp
->arg1
)[i
];
148 CSSymbolRef symbol
= CSSymbolicatorGetSymbolWithAddressAtTime(
151 expect_frame(bt
, bt_len
, symbol
, addr
, frames
, hdr_frames
);
154 /* saw the end of the user stack */
155 if (hdr_frames
== frames
) {
164 #define TRIGGERING_DEBUGID (0xfeff0f00)
167 * These functions must return an int to avoid the function prologue being
168 * hoisted out of the path to the spin (breaking being able to get a good
171 static int __attribute__((noinline
,not_tail_called
))
172 recurse_a(dispatch_semaphore_t spinning
, unsigned int frames
);
173 static int __attribute__((noinline
,not_tail_called
))
174 recurse_b(dispatch_semaphore_t spinning
, unsigned int frames
);
176 static int __attribute__((noinline
,not_tail_called
))
177 recurse_a(dispatch_semaphore_t spinning
, unsigned int frames
)
181 dispatch_semaphore_signal(spinning
);
184 kdebug_trace(TRIGGERING_DEBUGID
, 0, 0, 0, 0);
189 return recurse_b(spinning
, frames
- 1) + 1;
192 static int __attribute__((noinline
,not_tail_called
))
193 recurse_b(dispatch_semaphore_t spinning
, unsigned int frames
)
197 dispatch_semaphore_signal(spinning
);
200 kdebug_trace(TRIGGERING_DEBUGID
, 0, 0, 0, 0);
205 return recurse_a(spinning
, frames
- 1) + 1;
208 #define USER_FRAMES (12)
210 #if defined(__x86_64__)
211 #define RECURSE_START_OFFSET (4)
212 #else /* defined(__x86_64__) */
213 #define RECURSE_START_OFFSET (3)
214 #endif /* defined(__x86_64__) */
216 static const char *user_bt
[USER_FRAMES
] = {
217 #if defined(__x86_64__)
219 #endif /* defined(__x86_64__) */
222 "recurse_a", "recurse_b", "recurse_a", "recurse_b",
223 "recurse_a", "recurse_b", "recurse_a",
224 #if !defined(__x86_64__)
226 #endif /* !defined(__x86_64__) */
232 #define KERNEL_FRAMES (2)
233 static const char *kernel_bt
[KERNEL_FRAMES
] = {
234 "unix_syscall", "kdebug_trace64"
237 #elif defined(__arm64__)
239 #define KERNEL_FRAMES (4)
240 static const char *kernel_bt
[KERNEL_FRAMES
] = {
241 "fleh_synchronous", "sleh_synchronous", "unix_syscall", "kdebug_trace64"
244 #elif defined(__x86_64__)
246 #define KERNEL_FRAMES (2)
247 static const char *kernel_bt
[KERNEL_FRAMES
] = {
248 "unix_syscall64", "kdebug_trace64"
252 #error "architecture unsupported"
253 #endif /* defined(__arm__) */
255 static dispatch_once_t backtrace_once
;
256 static dispatch_semaphore_t backtrace_started
;
257 static dispatch_semaphore_t backtrace_go
;
260 * Another thread to run with a known backtrace.
262 * Take a semaphore that will be signalled when the thread is spinning at the
263 * correct frame. If the semaphore is NULL, don't spin and instead make a
264 * kdebug_trace system call, which can trigger a deterministic backtrace itself.
267 backtrace_thread(void *arg
)
269 dispatch_semaphore_t notify_spinning
;
272 notify_spinning
= (dispatch_semaphore_t
)arg
;
274 dispatch_semaphore_signal(backtrace_started
);
275 if (!notify_spinning
) {
276 dispatch_semaphore_wait(backtrace_go
, DISPATCH_TIME_FOREVER
);
280 * backtrace_thread, recurse_a, recurse_b, ...[, __kdebug_trace64]
282 * Always make one less call for this frame (backtrace_thread).
284 calls
= USER_FRAMES
- RECURSE_START_OFFSET
- 1 /* backtrace_thread */;
285 if (notify_spinning
) {
287 * Spinning doesn't end up calling __kdebug_trace64.
292 T_LOG("backtrace thread calling into %d frames (already at %d frames)",
293 calls
, RECURSE_START_OFFSET
);
294 (void)recurse_a(notify_spinning
, calls
);
299 create_backtrace_thread(dispatch_semaphore_t notify_spinning
)
301 pthread_t thread
= NULL
;
304 dispatch_once(&backtrace_once
, ^{
305 backtrace_started
= dispatch_semaphore_create(0);
306 T_QUIET
; T_ASSERT_NOTNULL(backtrace_started
, NULL
);
308 if (!notify_spinning
) {
309 backtrace_go
= dispatch_semaphore_create(0);
310 T_QUIET
; T_ASSERT_NOTNULL(backtrace_go
, NULL
);
314 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_create(&thread
, NULL
, backtrace_thread
,
315 (void *)notify_spinning
), NULL
);
316 T_QUIET
; T_ASSERT_NOTNULL(thread
, "backtrace thread created");
317 dispatch_semaphore_wait(backtrace_started
, DISPATCH_TIME_FOREVER
);
319 T_QUIET
; T_ASSERT_POSIX_ZERO(pthread_threadid_np(thread
, &tid
), NULL
);
320 T_QUIET
; T_ASSERT_NE(tid
, UINT64_C(0),
321 "backtrace thread created does not have ID 0");
323 T_LOG("starting thread with ID 0x%" PRIx64
, tid
);
329 start_backtrace_thread(void)
331 T_QUIET
; T_ASSERT_NOTNULL(backtrace_go
,
332 "thread to backtrace created before starting it");
333 dispatch_semaphore_signal(backtrace_go
);
337 #define TEST_TIMEOUT_NS (30 * NSEC_PER_SEC)
338 #else /* TARGET_OS_WATCH */
339 #define TEST_TIMEOUT_NS (5 * NSEC_PER_SEC)
340 #endif /* !TARGET_OS_WATCH */
342 T_DECL(backtraces_kdebug_trigger
,
343 "test that backtraces from kdebug trigger are correct",
346 static unsigned int stacks_seen
= 0;
348 kperf_kdebug_filter_t filter
;
351 s
= ktrace_session_create();
352 T_ASSERT_NOTNULL(s
, "ktrace session was created");
354 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()), NULL
);
356 tid
= create_backtrace_thread(NULL
);
357 expect_backtrace(s
, tid
, &stacks_seen
, false, user_bt
, USER_FRAMES
, 0);
358 expect_backtrace(s
, tid
, &stacks_seen
, true, kernel_bt
, KERNEL_FRAMES
, 0);
361 * The triggering event must be traced (and thus registered with libktrace)
364 ktrace_events_single(s
, TRIGGERING_DEBUGID
,
365 ^(__unused
struct trace_point
*tp
){ });
367 ktrace_set_completion_handler(s
, ^(void) {
368 T_EXPECT_GE(stacks_seen
, 2U, "saw both kernel and user stacks");
369 ktrace_session_destroy(s
);
374 filter
= kperf_kdebug_filter_create();
375 T_ASSERT_NOTNULL(filter
, "kperf kdebug filter was created");
377 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_debugid(filter
,
378 TRIGGERING_DEBUGID
), NULL
);
379 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter
), NULL
);
380 (void)kperf_action_count_set(1);
381 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
382 KPERF_SAMPLER_USTACK
| KPERF_SAMPLER_KSTACK
), NULL
);
383 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL
);
384 kperf_kdebug_filter_destroy(filter
);
386 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
388 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
390 start_backtrace_thread();
392 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, TEST_TIMEOUT_NS
),
393 dispatch_get_main_queue(), ^(void)
395 T_LOG("ending test after timeout");
402 T_DECL(backtraces_user_timer
,
403 "test that user backtraces on a timer are correct",
406 static unsigned int stacks_seen
= 0;
409 dispatch_semaphore_t wait_for_spinning
= dispatch_semaphore_create(0);
411 s
= ktrace_session_create();
412 T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
414 ktrace_filter_pid(s
, getpid());
416 configure_kperf_stacks_timer(getpid(), 10);
418 tid
= create_backtrace_thread(wait_for_spinning
);
419 /* potentially calling dispatch function and system call */
420 expect_backtrace(s
, tid
, &stacks_seen
, false, user_bt
, USER_FRAMES
- 1, 2);
422 ktrace_set_completion_handler(s
, ^(void) {
423 T_EXPECT_GE(stacks_seen
, 1U, "saw at least one stack");
424 ktrace_session_destroy(s
);
429 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
431 /* wait until the thread that will be backtraced is spinning */
432 dispatch_semaphore_wait(wait_for_spinning
, DISPATCH_TIME_FOREVER
);
434 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
436 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, TEST_TIMEOUT_NS
),
437 dispatch_get_main_queue(), ^(void)
439 T_LOG("ending test after timeout");
446 /* TODO test kernel stacks in all modes */
447 /* TODO legacy PET mode backtracing */
448 /* TODO test deep stacks, further than 128 frames, make sure they are truncated */
449 /* TODO test constrained stacks */