1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <dispatch/dispatch.h>
5 #include <ktrace/session.h>
6 #include <ktrace/private.h>
7 #include <kperf/kperf.h>
8 #include <mach/clock_types.h>
9 #include <mach/dyld_kernel.h>
10 #include <mach/host_info.h>
11 #include <mach/mach.h>
12 #include <mach/mach_init.h>
13 #include <mach/task.h>
14 #include <os/assumes.h>
16 #include <sys/kdebug.h>
17 #include <sys/kdebug_signpost.h>
18 #include <sys/sysctl.h>
21 T_META_NAMESPACE("xnu.ktrace"),
24 #define KDBG_TEST_MACROS 1
25 #define KDBG_TEST_OLD_TIMES 2
28 assert_kdebug_test(unsigned int flavor
)
31 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDTEST
};
32 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, sizeof(mib
) / sizeof(mib
[0]), NULL
,
33 &size
, NULL
, 0), "KERN_KDTEST sysctl");
36 #pragma mark kdebug syscalls
38 #define TRACE_DEBUGID (0xfedfed00U)
40 T_DECL(kdebug_trace_syscall
, "test that kdebug_trace(2) emits correct events")
42 ktrace_session_t s
= ktrace_session_create();
43 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
45 ktrace_events_class(s
, DBG_MACH
, ^(__unused
struct trace_point
*tp
){});
47 __block
int events_seen
= 0;
48 ktrace_events_single(s
, TRACE_DEBUGID
, ^void(struct trace_point
*tp
) {
50 T_PASS("saw traced event");
52 T_EXPECT_EQ(tp
->arg1
, 1UL, "argument 1 of traced event is correct");
53 T_EXPECT_EQ(tp
->arg2
, 2UL, "argument 2 of traced event is correct");
54 T_EXPECT_EQ(tp
->arg3
, 3UL, "argument 3 of traced event is correct");
55 T_EXPECT_EQ(tp
->arg4
, 4UL, "argument 4 of traced event is correct");
60 ktrace_set_completion_handler(s
, ^{
61 T_EXPECT_GE(events_seen
, 1, NULL
);
62 ktrace_session_destroy(s
);
66 ktrace_filter_pid(s
, getpid());
68 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
69 T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID
, 1, 2, 3, 4), NULL
);
75 #define SIGNPOST_SINGLE_CODE (0x10U)
76 #define SIGNPOST_PAIRED_CODE (0x20U)
78 T_DECL(kdebug_signpost_syscall
,
79 "test that kdebug_signpost(2) emits correct events")
81 ktrace_session_t s
= ktrace_session_create();
82 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
84 __block
int single_seen
= 0;
85 __block
int paired_seen
= 0;
87 /* make sure to get enough events for the KDBUFWAIT to trigger */
88 // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
89 ktrace_events_single(s
,
90 APPSDBG_CODE(DBG_APP_SIGNPOST
, SIGNPOST_SINGLE_CODE
),
91 ^(struct trace_point
*tp
) {
93 T_PASS("single signpost is traced");
95 T_EXPECT_EQ(tp
->arg1
, 1UL, "argument 1 of single signpost is correct");
96 T_EXPECT_EQ(tp
->arg2
, 2UL, "argument 2 of single signpost is correct");
97 T_EXPECT_EQ(tp
->arg3
, 3UL, "argument 3 of single signpost is correct");
98 T_EXPECT_EQ(tp
->arg4
, 4UL, "argument 4 of single signpost is correct");
101 ktrace_events_single_paired(s
,
102 APPSDBG_CODE(DBG_APP_SIGNPOST
, SIGNPOST_PAIRED_CODE
),
103 ^(struct trace_point
*start
, struct trace_point
*end
) {
105 T_PASS("paired signposts are traced");
107 T_EXPECT_EQ(start
->arg1
, 5UL, "argument 1 of start signpost is correct");
108 T_EXPECT_EQ(start
->arg2
, 6UL, "argument 2 of start signpost is correct");
109 T_EXPECT_EQ(start
->arg3
, 7UL, "argument 3 of start signpost is correct");
110 T_EXPECT_EQ(start
->arg4
, 8UL, "argument 4 of start signpost is correct");
112 T_EXPECT_EQ(end
->arg1
, 9UL, "argument 1 of end signpost is correct");
113 T_EXPECT_EQ(end
->arg2
, 10UL, "argument 2 of end signpost is correct");
114 T_EXPECT_EQ(end
->arg3
, 11UL, "argument 3 of end signpost is correct");
115 T_EXPECT_EQ(end
->arg4
, 12UL, "argument 4 of end signpost is correct");
117 T_EXPECT_EQ(single_seen
, 1,
118 "signposts are traced in the correct order");
123 ktrace_set_completion_handler(s
, ^(void) {
124 T_QUIET
; T_EXPECT_NE(single_seen
, 0,
125 "did not see single tracepoint before timeout");
126 T_QUIET
; T_EXPECT_NE(paired_seen
, 0,
127 "did not see single tracepoint before timeout");
128 ktrace_session_destroy(s
);
132 ktrace_filter_pid(s
, getpid());
134 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()),
137 T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE
, 1, 2, 3, 4),
138 "emitted single signpost");
139 T_EXPECT_POSIX_SUCCESS(
140 kdebug_signpost_start(SIGNPOST_PAIRED_CODE
, 5, 6, 7, 8),
141 "emitted start signpost");
142 T_EXPECT_POSIX_SUCCESS(
143 kdebug_signpost_end(SIGNPOST_PAIRED_CODE
, 9, 10, 11, 12),
144 "emitted end signpost");
150 #pragma mark kdebug behaviors
152 #define WRAPPING_EVENTS_COUNT (150000)
153 #define TRACE_ITERATIONS (5000)
154 #define WRAPPING_EVENTS_THRESHOLD (100)
157 "ensure that wrapping traces lost events and no events prior to the wrap",
158 T_META_CHECK_LEAKS(false))
162 int wait_wrapping_secs
= (WRAPPING_EVENTS_COUNT
/ TRACE_ITERATIONS
) + 5;
163 int current_secs
= wait_wrapping_secs
;
165 /* use sysctls manually to bypass libktrace assumptions */
167 mib
[0] = CTL_KERN
; mib
[1] = KERN_KDEBUG
; mib
[2] = KERN_KDSETUP
; mib
[3] = 0;
169 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 3, NULL
, &needed
, NULL
, 0),
172 mib
[2] = KERN_KDSETBUF
; mib
[3] = WRAPPING_EVENTS_COUNT
;
173 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 4, NULL
, 0, NULL
, 0), "KERN_KDSETBUF");
175 mib
[2] = KERN_KDENABLE
; mib
[3] = 1;
176 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 4, NULL
, 0, NULL
, 0), "KERN_KDENABLE");
178 /* wrapping is on by default */
180 /* wait until wrapped */
181 T_LOG("waiting for trace to wrap");
182 mib
[2] = KERN_KDGETBUF
;
183 needed
= sizeof(buf_info
);
186 for (int i
= 0; i
< TRACE_ITERATIONS
; i
++) {
188 T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL
);
191 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 3, &buf_info
, &needed
, NULL
, 0),
193 } while (!(buf_info
.flags
& KDBG_WRAPPED
) && --current_secs
> 0);
195 T_ASSERT_TRUE(buf_info
.flags
& KDBG_WRAPPED
,
196 "trace wrapped (after %d seconds within %d second timeout)",
197 wait_wrapping_secs
- current_secs
, wait_wrapping_secs
);
199 ktrace_session_t s
= ktrace_session_create();
200 T_QUIET
; T_ASSERT_NOTNULL(s
, NULL
);
201 T_QUIET
; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s
), NULL
);
203 __block
int events
= 0;
205 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
207 T_EXPECT_EQ(tp
->debugid
, (unsigned int)TRACE_LOST_EVENTS
,
208 "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
210 ktrace_name_for_eventid(s
, tp
->debugid
& KDBG_EVENTID_MASK
));
213 T_EXPECT_NE(tp
->debugid
, (unsigned int)TRACE_LOST_EVENTS
,
214 "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
216 ktrace_name_for_eventid(s
, tp
->debugid
& KDBG_EVENTID_MASK
));
220 if (events
> WRAPPING_EVENTS_THRESHOLD
) {
225 ktrace_set_completion_handler(s
, ^{
226 ktrace_session_destroy(s
);
230 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()),
236 T_DECL(reject_old_events
,
237 "ensure that kdebug rejects events from before tracing began",
238 T_META_CHECK_LEAKS(false))
240 __block
uint64_t event_horizon_ts
;
242 ktrace_session_t s
= ktrace_session_create();
243 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
245 __block
int events
= 0;
246 ktrace_events_range(s
, KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 0),
247 KDBG_EVENTID(DBG_BSD
+ 1, 0, 0), ^(struct trace_point
*tp
) {
249 T_EXPECT_GT(tp
->timestamp
, event_horizon_ts
,
250 "events in trace should be from after tracing began");
253 ktrace_set_completion_handler(s
, ^{
254 T_EXPECT_EQ(events
, 2, "should see only two events");
255 ktrace_session_destroy(s
);
259 event_horizon_ts
= mach_absolute_time();
261 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
262 /* first, try an old event at the beginning of trace */
263 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
264 /* after a good event has been traced, old events should be rejected */
265 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
271 #define ORDERING_TIMEOUT_SEC 5
273 T_DECL(ascending_time_order
,
274 "ensure that kdebug events are in ascending order based on time",
275 T_META_CHECK_LEAKS(false))
277 __block
uint64_t prev_ts
= 0;
278 __block
uint32_t prev_debugid
= 0;
279 __block
unsigned int prev_cpu
= 0;
280 __block
bool in_order
= true;
282 ktrace_session_t s
= ktrace_session_create();
283 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
285 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
286 if (tp
->timestamp
< prev_ts
) {
288 T_LOG("%" PRIu64
": %#" PRIx32
" (cpu %d)",
289 prev_ts
, prev_debugid
, prev_cpu
);
290 T_LOG("%" PRIu64
": %#" PRIx32
" (cpu %d)",
291 tp
->timestamp
, tp
->debugid
, tp
->cpuid
);
296 ktrace_set_completion_handler(s
, ^{
297 ktrace_session_destroy(s
);
298 T_EXPECT_TRUE(in_order
, "event timestamps were in-order");
302 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()),
305 /* try to inject old timestamps into trace */
306 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
308 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, ORDERING_TIMEOUT_SEC
* NSEC_PER_SEC
),
309 dispatch_get_main_queue(), ^{
310 T_LOG("ending test after timeout");
317 #pragma mark dyld tracing
319 __attribute__((aligned(8)))
320 static const char map_uuid
[16] = "map UUID";
322 __attribute__((aligned(8)))
323 static const char unmap_uuid
[16] = "unmap UUID";
325 __attribute__((aligned(8)))
326 static const char sc_uuid
[16] = "shared UUID";
328 static fsid_t map_fsid
= { .val
= { 42, 43 } };
329 static fsid_t unmap_fsid
= { .val
= { 44, 45 } };
330 static fsid_t sc_fsid
= { .val
= { 46, 47 } };
332 static fsobj_id_t map_fsobjid
= { .fid_objno
= 42, .fid_generation
= 43 };
333 static fsobj_id_t unmap_fsobjid
= { .fid_objno
= 44, .fid_generation
= 45 };
334 static fsobj_id_t sc_fsobjid
= { .fid_objno
= 46, .fid_generation
= 47 };
336 #define MAP_LOAD_ADDR 0xabadcafe
337 #define UNMAP_LOAD_ADDR 0xfeedface
338 #define SC_LOAD_ADDR 0xfedfaced
342 expect_dyld_image_info(struct trace_point
*tp
, const uint64_t *exp_uuid
,
343 uint64_t exp_load_addr
, fsid_t
*exp_fsid
, fsobj_id_t
*exp_fsobjid
,
346 #if defined(__LP64__) || defined(__arm64__)
352 uuid
[0] = (uint64_t)tp
->arg1
;
353 uuid
[1] = (uint64_t)tp
->arg2
;
354 load_addr
= (uint64_t)tp
->arg3
;
355 fsid
.val
[0] = (int32_t)(tp
->arg4
& UINT32_MAX
);
356 fsid
.val
[1] = (int32_t)((uint64_t)tp
->arg4
>> 32);
358 T_QUIET
; T_EXPECT_EQ(uuid
[0], exp_uuid
[0], NULL
);
359 T_QUIET
; T_EXPECT_EQ(uuid
[1], exp_uuid
[1], NULL
);
360 T_QUIET
; T_EXPECT_EQ(load_addr
, exp_load_addr
, NULL
);
361 T_QUIET
; T_EXPECT_EQ(fsid
.val
[0], exp_fsid
->val
[0], NULL
);
362 T_QUIET
; T_EXPECT_EQ(fsid
.val
[1], exp_fsid
->val
[1], NULL
);
363 } else if (order
== 1) {
366 fsobjid
.fid_objno
= (uint32_t)(tp
->arg1
& UINT32_MAX
);
367 fsobjid
.fid_generation
= (uint32_t)((uint64_t)tp
->arg1
>> 32);
369 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_objno
, exp_fsobjid
->fid_objno
, NULL
);
370 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_generation
,
371 exp_fsobjid
->fid_generation
, NULL
);
373 T_ASSERT_FAIL("unrecognized order of events %d", order
);
375 #else /* defined(__LP64__) */
379 uuid
[0] = (uint32_t)tp
->arg1
;
380 uuid
[1] = (uint32_t)tp
->arg2
;
381 uuid
[2] = (uint32_t)tp
->arg3
;
382 uuid
[3] = (uint32_t)tp
->arg4
;
384 T_QUIET
; T_EXPECT_EQ(uuid
[0], (uint32_t)exp_uuid
[0], NULL
);
385 T_QUIET
; T_EXPECT_EQ(uuid
[1], (uint32_t)(exp_uuid
[0] >> 32), NULL
);
386 T_QUIET
; T_EXPECT_EQ(uuid
[2], (uint32_t)exp_uuid
[1], NULL
);
387 T_QUIET
; T_EXPECT_EQ(uuid
[3], (uint32_t)(exp_uuid
[1] >> 32), NULL
);
388 } else if (order
== 1) {
393 load_addr
= (uint32_t)tp
->arg1
;
394 fsid
.val
[0] = (int32_t)tp
->arg2
;
395 fsid
.val
[1] = (int32_t)tp
->arg3
;
396 fsobjid
.fid_objno
= (uint32_t)tp
->arg4
;
398 T_QUIET
; T_EXPECT_EQ(load_addr
, (uint32_t)exp_load_addr
, NULL
);
399 T_QUIET
; T_EXPECT_EQ(fsid
.val
[0], exp_fsid
->val
[0], NULL
);
400 T_QUIET
; T_EXPECT_EQ(fsid
.val
[1], exp_fsid
->val
[1], NULL
);
401 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_objno
, exp_fsobjid
->fid_objno
, NULL
);
402 } else if (order
== 2) {
405 fsobjid
.fid_generation
= tp
->arg1
;
407 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_generation
,
408 exp_fsobjid
->fid_generation
, NULL
);
410 T_ASSERT_FAIL("unrecognized order of events %d", order
);
412 #endif /* defined(__LP64__) */
415 #if defined(__LP64__) || defined(__arm64__)
416 #define DYLD_CODE_OFFSET (0)
417 #define DYLD_EVENTS (2)
419 #define DYLD_CODE_OFFSET (2)
420 #define DYLD_EVENTS (3)
424 expect_dyld_events(ktrace_session_t s
, const char *name
, uint32_t base_code
,
425 const char *exp_uuid
, uint64_t exp_load_addr
, fsid_t
*exp_fsid
,
426 fsobj_id_t
*exp_fsobjid
, uint8_t *saw_events
)
428 for (int i
= 0; i
< DYLD_EVENTS
; i
++) {
429 ktrace_events_single(s
, KDBG_EVENTID(DBG_DYLD
, DBG_DYLD_UUID
,
430 base_code
+ DYLD_CODE_OFFSET
+ (unsigned int)i
),
431 ^(struct trace_point
*tp
) {
432 T_LOG("checking %s event %c", name
, 'A' + i
);
433 expect_dyld_image_info(tp
, (const void *)exp_uuid
, exp_load_addr
,
434 exp_fsid
, exp_fsobjid
, i
);
435 *saw_events
|= (1U << i
);
440 T_DECL(dyld_events
, "test that dyld registering libraries emits events")
442 dyld_kernel_image_info_t info
;
445 * Use pointers instead of __block variables in order to use these variables
446 * in the completion block below _and_ pass pointers to them to the
447 * expect_dyld_events function.
449 uint8_t saw_events
[3] = { 0 };
450 uint8_t *saw_mapping
= &(saw_events
[0]);
451 uint8_t *saw_unmapping
= &(saw_events
[1]);
452 uint8_t *saw_shared_cache
= &(saw_events
[2]);
454 ktrace_session_t s
= ktrace_session_create();
455 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
458 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()),
459 "filtered to current process");
461 expect_dyld_events(s
, "mapping", DBG_DYLD_UUID_MAP_A
, map_uuid
,
462 MAP_LOAD_ADDR
, &map_fsid
, &map_fsobjid
, saw_mapping
);
463 expect_dyld_events(s
, "unmapping", DBG_DYLD_UUID_UNMAP_A
, unmap_uuid
,
464 UNMAP_LOAD_ADDR
, &unmap_fsid
, &unmap_fsobjid
, saw_unmapping
);
465 expect_dyld_events(s
, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A
,
466 sc_uuid
, SC_LOAD_ADDR
, &sc_fsid
, &sc_fsobjid
, saw_shared_cache
);
468 ktrace_set_completion_handler(s
, ^{
469 ktrace_session_destroy(s
);
471 T_EXPECT_EQ(__builtin_popcount(*saw_mapping
), DYLD_EVENTS
, NULL
);
472 T_EXPECT_EQ(__builtin_popcount(*saw_unmapping
), DYLD_EVENTS
, NULL
);
473 T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache
), DYLD_EVENTS
, NULL
);
477 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
479 info
.load_addr
= MAP_LOAD_ADDR
;
480 memcpy(info
.uuid
, map_uuid
, sizeof(info
.uuid
));
481 info
.fsid
= map_fsid
;
482 info
.fsobjid
= map_fsobjid
;
483 T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
484 &info
, 1), "registered dyld image info");
486 info
.load_addr
= UNMAP_LOAD_ADDR
;
487 memcpy(info
.uuid
, unmap_uuid
, sizeof(info
.uuid
));
488 info
.fsid
= unmap_fsid
;
489 info
.fsobjid
= unmap_fsobjid
;
490 T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
491 &info
, 1), "unregistered dyld image info");
493 info
.load_addr
= SC_LOAD_ADDR
;
494 memcpy(info
.uuid
, sc_uuid
, sizeof(info
.uuid
));
496 info
.fsobjid
= sc_fsobjid
;
497 T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
498 mach_task_self(), info
, FALSE
, FALSE
),
499 "registered dyld shared cache image info");
506 #pragma mark kdebug kernel macros
508 #define EXP_KERNEL_EVENTS 5U
510 static const uint32_t dev_evts
[EXP_KERNEL_EVENTS
] = {
511 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 0),
512 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 1),
513 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 2),
514 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 3),
515 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 4),
518 static const uint32_t rel_evts
[EXP_KERNEL_EVENTS
] = {
519 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 5),
520 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 6),
521 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 7),
522 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 8),
523 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 9),
526 static const uint32_t filt_evts
[EXP_KERNEL_EVENTS
] = {
527 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 10),
528 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 11),
529 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 12),
530 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 13),
531 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 14),
534 static const uint32_t noprocfilt_evts
[EXP_KERNEL_EVENTS
] = {
535 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 15),
536 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 16),
537 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 17),
538 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 18),
539 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 19),
543 is_development_kernel(void)
545 static dispatch_once_t is_development_once
;
546 static bool is_development
;
548 dispatch_once(&is_development_once
, ^{
550 size_t dev_size
= sizeof(dev
);
553 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev
,
554 &dev_size
, NULL
, 0), NULL
);
555 is_development
= (dev
!= 0);
558 return is_development
;
562 expect_event(struct trace_point
*tp
, const char *name
, unsigned int *events
,
563 const uint32_t *event_ids
, size_t event_ids_len
)
565 unsigned int event_idx
= *events
;
566 bool event_found
= false;
568 for (i
= 0; i
< event_ids_len
; i
++) {
569 if (event_ids
[i
] == (tp
->debugid
& KDBG_EVENTID_MASK
)) {
570 T_LOG("found %s event 0x%x", name
, tp
->debugid
);
580 for (i
= 0; i
< event_idx
; i
++) {
581 T_QUIET
; T_EXPECT_EQ(((uint64_t *)&tp
->arg1
)[i
], (uint64_t)i
+ 1,
585 T_QUIET
; T_EXPECT_EQ(((uint64_t *)&tp
->arg1
)[i
], (uint64_t)0, NULL
);
590 expect_release_event(struct trace_point
*tp
, unsigned int *events
)
592 expect_event(tp
, "release", events
, rel_evts
,
593 sizeof(rel_evts
) / sizeof(rel_evts
[0]));
597 expect_development_event(struct trace_point
*tp
, unsigned int *events
)
599 expect_event(tp
, "dev", events
, dev_evts
, sizeof(dev_evts
) / sizeof(dev_evts
[0]));
603 expect_filtered_event(struct trace_point
*tp
, unsigned int *events
)
605 expect_event(tp
, "filtered", events
, filt_evts
,
606 sizeof(filt_evts
) / sizeof(filt_evts
[0]));
610 expect_noprocfilt_event(struct trace_point
*tp
, unsigned int *events
)
612 expect_event(tp
, "noprocfilt", events
, noprocfilt_evts
,
613 sizeof(noprocfilt_evts
) / sizeof(noprocfilt_evts
[0]));
617 expect_kdbg_test_events(ktrace_session_t s
, bool use_all_callback
,
618 void (^cb
)(unsigned int dev_seen
, unsigned int rel_seen
,
619 unsigned int filt_seen
, unsigned int noprocfilt_seen
))
621 __block
unsigned int dev_seen
= 0;
622 __block
unsigned int rel_seen
= 0;
623 __block
unsigned int filt_seen
= 0;
624 __block
unsigned int noprocfilt_seen
= 0;
626 void (^evtcb
)(struct trace_point
*tp
) = ^(struct trace_point
*tp
) {
627 expect_development_event(tp
, &dev_seen
);
628 expect_release_event(tp
, &rel_seen
);
629 expect_filtered_event(tp
, &filt_seen
);
630 expect_noprocfilt_event(tp
, &noprocfilt_seen
);
633 if (use_all_callback
) {
634 ktrace_events_all(s
, evtcb
);
636 ktrace_events_range(s
, KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 0),
637 KDBG_EVENTID(DBG_BSD
+ 1, 0, 0), evtcb
);
640 ktrace_set_completion_handler(s
, ^{
641 ktrace_session_destroy(s
);
642 cb(dev_seen
, rel_seen
, filt_seen
, noprocfilt_seen
);
646 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
647 assert_kdebug_test(KDBG_TEST_MACROS
);
652 T_DECL(kernel_events
, "ensure kernel macros work")
654 ktrace_session_t s
= ktrace_session_create();
655 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
657 T_QUIET
; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()),
658 "filtered events to current process");
660 expect_kdbg_test_events(s
, false,
661 ^(unsigned int dev_seen
, unsigned int rel_seen
,
662 unsigned int filt_seen
, unsigned int noprocfilt_seen
) {
664 * Development-only events are only filtered if running on an embedded
667 unsigned int dev_exp
;
668 #if TARGET_OS_EMBEDDED
669 dev_exp
= is_development_kernel() ? EXP_KERNEL_EVENTS
: 0U;
671 dev_exp
= EXP_KERNEL_EVENTS
;
674 T_EXPECT_EQ(rel_seen
, EXP_KERNEL_EVENTS
,
675 "release and development events seen");
676 T_EXPECT_EQ(dev_seen
, dev_exp
, "development-only events %sseen",
677 dev_exp
? "" : "not ");
678 T_EXPECT_EQ(filt_seen
, dev_exp
, "filter-only events seen");
679 T_EXPECT_EQ(noprocfilt_seen
, EXP_KERNEL_EVENTS
,
680 "process filter-agnostic events seen");
686 T_DECL(kernel_events_filtered
, "ensure that the filtered kernel macros work")
688 ktrace_session_t s
= ktrace_session_create();
689 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
691 T_QUIET
; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()),
692 "filtered events to current process");
694 expect_kdbg_test_events(s
, true,
695 ^(unsigned int dev_seen
, unsigned int rel_seen
,
696 unsigned int filt_seen
, unsigned int noprocfilt_seen
) {
697 T_EXPECT_EQ(rel_seen
, EXP_KERNEL_EVENTS
, NULL
);
698 #if defined(__arm__) || defined(__arm64__)
699 T_EXPECT_EQ(dev_seen
, is_development_kernel() ? EXP_KERNEL_EVENTS
: 0U,
702 T_EXPECT_EQ(dev_seen
, EXP_KERNEL_EVENTS
,
703 "development-only events seen");
704 #endif /* defined(__arm__) || defined(__arm64__) */
705 T_EXPECT_EQ(filt_seen
, 0U, "no filter-only events seen");
706 T_EXPECT_EQ(noprocfilt_seen
, EXP_KERNEL_EVENTS
,
707 "process filter-agnostic events seen");
713 T_DECL(kernel_events_noprocfilt
,
714 "ensure that the no process filter kernel macros work")
716 ktrace_session_t s
= ktrace_session_create();
717 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
720 * Only allow launchd events through.
722 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, 1), "filtered events to launchd");
723 for (size_t i
= 0; i
< sizeof(noprocfilt_evts
) / sizeof(noprocfilt_evts
[0]); i
++) {
725 T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s
,
727 "ignored process filter for noprocfilt event");
730 expect_kdbg_test_events(s
, false,
731 ^(unsigned int dev_seen
, unsigned int rel_seen
,
732 unsigned int filt_seen
, unsigned int noprocfilt_seen
) {
733 T_EXPECT_EQ(rel_seen
, 0U, "release and development events not seen");
734 T_EXPECT_EQ(dev_seen
, 0U, "development-only events not seen");
735 T_EXPECT_EQ(filt_seen
, 0U, "filter-only events not seen");
737 T_EXPECT_EQ(noprocfilt_seen
, EXP_KERNEL_EVENTS
,
738 "process filter-agnostic events seen");
744 static volatile bool continue_abuse
= true;
746 #define STRESS_DEBUGID (0xfeedfac0)
747 #define ABUSE_SECS (10)
748 #define TIMER_NS (100 * NSEC_PER_USEC)
750 * Use the quantum as the gap threshold.
752 #define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
755 kdebug_abuser_thread(void *ctx
)
757 unsigned int id
= (unsigned int)ctx
;
759 while (continue_abuse
) {
760 kdebug_trace(STRESS_DEBUGID
, id
, i
, 0, 0);
767 T_DECL(stress
, "emit events on all but one CPU with a small buffer",
768 T_META_CHECK_LEAKS(false))
771 ktrace_session_t s
= ktrace_session_create();
772 T_WITH_ERRNO
; T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
774 /* Let's not waste any time with pleasantries. */
775 ktrace_set_uuid_map_enabled(s
, KTRACE_FEATURE_DISABLED
);
778 ktrace_events_all(s
, ^(__unused
struct trace_point
*tp
) {});
779 ktrace_set_vnode_paths_enabled(s
, KTRACE_FEATURE_ENABLED
);
780 (void)atexit_b(^{ kperf_reset(); });
781 (void)kperf_action_count_set(1);
782 (void)kperf_timer_count_set(1);
783 int kperror
= kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS
));
784 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperror
, "kperf_timer_period_set %llu ns",
786 kperror
= kperf_timer_action_set(0, 1);
787 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperror
, "kperf_timer_action_set");
788 kperror
= kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO
|
789 KPERF_SAMPLER_TH_SNAPSHOT
| KPERF_SAMPLER_KSTACK
|
790 KPERF_SAMPLER_USTACK
| KPERF_SAMPLER_MEMINFO
|
791 KPERF_SAMPLER_TINFO_SCHED
| KPERF_SAMPLER_TH_DISPATCH
|
792 KPERF_SAMPLER_TK_SNAPSHOT
| KPERF_SAMPLER_SYS_MEM
|
793 KPERF_SAMPLER_TH_INSTRS_CYCLES
);
794 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperror
, "kperf_action_samplers_set");
797 /* The coup-de-grace. */
798 ktrace_set_buffer_size(s
, 10);
800 char filepath_arr
[MAXPATHLEN
] = "";
801 strlcpy(filepath_arr
, dt_tmpdir(), sizeof(filepath_arr
));
802 strlcat(filepath_arr
, "/stress.ktrace", sizeof(filepath_arr
));
803 char *filepath
= filepath_arr
;
806 size_t ncpus_size
= sizeof(ncpus
);
807 int ret
= sysctlbyname("hw.logicalcpu_max", &ncpus
, &ncpus_size
, NULL
, 0);
808 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctlbyname(\"hw.logicalcpu_max\"");
809 T_QUIET
; T_ASSERT_GT(ncpus
, 0, "realistic number of CPUs");
811 pthread_t
*threads
= calloc((unsigned int)ncpus
- 1, sizeof(pthread_t
));
812 T_WITH_ERRNO
; T_QUIET
; T_ASSERT_NOTNULL(threads
, "calloc(%d threads)",
815 ktrace_set_completion_handler(s
, ^{
817 ktrace_session_destroy(s
);
819 T_LOG("trace ended, searching for gaps");
821 ktrace_session_t sread
= ktrace_session_create();
822 T_WITH_ERRNO
; T_QUIET
; T_ASSERT_NOTNULL(sread
, "ktrace_session_create");
824 int error
= ktrace_set_file(sread
, filepath
);
825 T_QUIET
; T_ASSERT_POSIX_ZERO(error
, "ktrace_set_file %s", filepath
);
827 ktrace_file_t f
= ktrace_file_open(filepath
, false);
828 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(f
, "ktrace_file_open %s",
830 uint64_t first_timestamp
= 0;
831 error
= ktrace_file_earliest_timestamp(f
, &first_timestamp
);
832 T_QUIET
; T_ASSERT_POSIX_ZERO(error
, "ktrace_file_earliest_timestamp");
834 uint64_t last_timestamp
= 0;
835 (void)ktrace_file_latest_timestamp(f
, &last_timestamp
);
837 __block
uint64_t prev_timestamp
= 0;
838 __block
uint64_t nevents
= 0;
839 ktrace_events_all(sread
, ^(struct trace_point
*tp
) {
841 uint64_t delta_ns
= 0;
842 T_QUIET
; T_EXPECT_GE(tp
->timestamp
, prev_timestamp
,
843 "timestamps are monotonically increasing");
844 int converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
845 tp
->timestamp
- prev_timestamp
, &delta_ns
);
846 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
, "convert timestamp to ns");
847 if (prev_timestamp
&& delta_ns
> GAP_THRESHOLD_NS
) {
849 T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
850 (double)delta_ns
/ 1e9
, prev_timestamp
,
851 tp
->timestamp
, tp
->cpuid
, tp
->debugname
, tp
->debugid
);
853 T_LOG("gap: %gs at %llu - %llu on %d: %#x",
854 (double)delta_ns
/ 1e9
, prev_timestamp
,
855 tp
->timestamp
, tp
->cpuid
, tp
->debugid
);
859 * These gaps are ok -- they appear after CPUs are brought back
862 #define INTERRUPT (0x1050000)
863 #define PERF_CPU_IDLE (0x27001000)
864 #define INTC_HANDLER (0x5000004)
865 #define DECR_TRAP (0x1090000)
866 uint32_t eventid
= tp
->debugid
& KDBG_EVENTID_MASK
;
867 if (eventid
!= INTERRUPT
&& eventid
!= PERF_CPU_IDLE
&&
868 eventid
!= INTC_HANDLER
&& eventid
!= DECR_TRAP
) {
869 unsigned int lost_events
= TRACE_LOST_EVENTS
;
870 T_QUIET
; T_EXPECT_EQ(tp
->debugid
, lost_events
,
871 "gaps should end with lost events");
875 prev_timestamp
= tp
->timestamp
;
877 ktrace_events_single(sread
, TRACE_LOST_EVENTS
, ^(struct trace_point
*tp
){
878 T_LOG("lost: %llu on %d (%lu)", tp
->timestamp
, tp
->cpuid
, tp
->arg1
);
881 __block
uint64_t last_write
= 0;
882 ktrace_events_single_paired(sread
, TRACE_WRITING_EVENTS
,
883 ^(struct trace_point
*start
, struct trace_point
*end
) {
885 int converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
886 start
->timestamp
- last_write
, &delta_ns
);
887 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
, "convert timestamp to ns");
890 converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
891 end
->timestamp
- start
->timestamp
, &dur_ns
);
892 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
, "convert timestamp to ns");
894 T_LOG("write: %llu (+%gs): %gus on %d: %lu events", start
->timestamp
,
895 (double)delta_ns
/ 1e9
, (double)dur_ns
/ 1e3
, end
->cpuid
, end
->arg1
);
896 last_write
= end
->timestamp
;
898 ktrace_set_completion_handler(sread
, ^{
899 uint64_t duration_ns
= 0;
900 if (last_timestamp
) {
901 int converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
902 last_timestamp
- first_timestamp
, &duration_ns
);
903 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
,
904 "convert timestamp to ns");
905 T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
906 (double)duration_ns
/ 1e9
, nevents
,
907 (double)nevents
/ ((double)duration_ns
/ 1e6
) / ncpus
);
909 (void)unlink(filepath
);
910 ktrace_session_destroy(sread
);
914 int starterror
= ktrace_start(sread
, dispatch_get_main_queue());
915 T_QUIET
; T_ASSERT_POSIX_ZERO(starterror
, "ktrace_start read session");
920 /* Just kidding... for now. */
922 kperror
= kperf_sample_set(1);
923 T_ASSERT_POSIX_SUCCESS(kperror
,
924 "started kperf timer sampling every %llu ns", TIMER_NS
);
927 for (int i
= 0; i
< (ncpus
- 1); i
++) {
928 int error
= pthread_create(&threads
[i
], NULL
, kdebug_abuser_thread
,
929 (void *)(uintptr_t)i
);
930 T_QUIET
; T_ASSERT_POSIX_ZERO(error
,
931 "pthread_create abuser thread %d", i
);
934 int error
= ktrace_start_writing_file(s
, filepath
,
935 ktrace_compression_none
, NULL
, NULL
);
936 T_ASSERT_POSIX_ZERO(error
, "started writing ktrace to %s", filepath
);
940 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, ABUSE_SECS
* NSEC_PER_SEC
),
941 dispatch_get_main_queue(), ^{
942 T_LOG("ending trace");
945 continue_abuse
= false;
946 for (int i
= 0; i
< (ncpus
- 1); i
++) {
947 int joinerror
= pthread_join(threads
[i
], NULL
);
948 T_QUIET
; T_EXPECT_POSIX_ZERO(joinerror
, "pthread_join thread %d",
956 #define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
957 #define ROUND_TRIPS_THRESHOLD UINT64_C(25)
958 #define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
959 #define COLLECTION_INTERVAL_MS 100
962 * Test a sustained tracing session, involving multiple round-trips to the
965 * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
966 * unlikely to be emitted elsewhere. Look for this event, too, and make sure we
967 * see as many of them as we emitted.
969 * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
970 * In the failure mode, we won't see any of these, so set a timeout of
971 * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
975 "test sustained tracing with multiple round-trips through the kernel")
977 ktrace_session_t s
= ktrace_session_create();
978 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
981 * Set a small buffer and collection interval to increase the number of
984 ktrace_set_buffer_size(s
, 50);
985 ktrace_set_collection_interval(s
, COLLECTION_INTERVAL_MS
);
987 __block
uint64_t events
= 0;
988 __block
uint64_t emitted
= 0;
989 __block
uint64_t seen
= 0;
990 ktrace_events_all(s
, ^(__unused
struct trace_point
*tp
) {
992 if (events
% ROUND_TRIP_PERIOD
== 0) {
993 T_LOG("emitting round-trip event %" PRIu64
, emitted
);
994 kdebug_trace(TRACE_DEBUGID
, events
, 0, 0, 0);
999 ktrace_events_single(s
, TRACE_DEBUGID
, ^(__unused
struct trace_point
*tp
) {
1000 T_LOG("saw round-trip event after %" PRIu64
" events", events
);
1002 if (seen
>= ROUND_TRIPS_THRESHOLD
) {
1003 T_LOG("ending trace after seeing %" PRIu64
" events, "
1004 "emitting %" PRIu64
, seen
, emitted
);
1009 ktrace_set_completion_handler(s
, ^{
1010 T_EXPECT_GE(emitted
, ROUND_TRIPS_THRESHOLD
,
1011 "emitted %" PRIu64
" round-trip events", emitted
);
1012 T_EXPECT_GE(seen
, ROUND_TRIPS_THRESHOLD
,
1013 "saw %" PRIu64
" round-trip events", seen
);
1014 ktrace_session_destroy(s
);
1018 int error
= ktrace_start(s
, dispatch_get_main_queue());
1019 T_ASSERT_POSIX_ZERO(error
, "started tracing");
1021 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
,
1022 ROUND_TRIPS_TIMEOUT_SECS
* NSEC_PER_SEC
), dispatch_get_main_queue(),
1024 T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS
);
1031 #define HEARTBEAT_INTERVAL_SECS 2
1032 #define HEARTBEAT_COUNT 20
1035 * Ensure we see events periodically, checking for recent events on a
1038 T_DECL(event_coverage
, "ensure events appear up to the end of tracing")
1040 ktrace_session_t s
= ktrace_session_create();
1041 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
1043 __block
uint64_t current_timestamp
= 0;
1044 __block
uint64_t events
= 0;
1045 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
1046 current_timestamp
= tp
->timestamp
;
1050 ktrace_set_buffer_size(s
, 20);
1051 ktrace_set_collection_interval(s
, COLLECTION_INTERVAL_MS
);
1053 __block
uint64_t last_timestamp
= 0;
1054 __block
uint64_t last_events
= 0;
1055 __block
unsigned int heartbeats
= 0;
1057 ktrace_set_completion_handler(s
, ^{
1058 ktrace_session_destroy(s
);
1059 T_QUIET
; T_EXPECT_GT(events
, 0ULL, "should have seen some events");
1063 dispatch_source_t timer
= dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER
,
1064 0, 0, dispatch_get_main_queue());
1065 dispatch_source_set_timer(timer
, dispatch_time(DISPATCH_TIME_NOW
,
1066 HEARTBEAT_INTERVAL_SECS
* NSEC_PER_SEC
),
1067 HEARTBEAT_INTERVAL_SECS
* NSEC_PER_SEC
, 0);
1068 dispatch_source_set_cancel_handler(timer
, ^{
1069 dispatch_release(timer
);
1072 dispatch_source_set_event_handler(timer
, ^{
1075 T_LOG("heartbeat %u at time %lld, seen %" PRIu64
" events, "
1076 "current event time %lld", heartbeats
, mach_absolute_time(),
1077 events
, current_timestamp
);
1079 if (current_timestamp
> 0) {
1080 T_EXPECT_GT(current_timestamp
, last_timestamp
,
1081 "event timestamps should be increasing");
1082 T_QUIET
; T_EXPECT_GT(events
, last_events
,
1083 "number of events should be increasing");
1086 last_timestamp
= current_timestamp
;
1087 last_events
= events
;
1089 if (heartbeats
>= HEARTBEAT_COUNT
) {
1090 T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT
);
1095 int error
= ktrace_start(s
, dispatch_get_main_queue());
1096 T_ASSERT_POSIX_ZERO(error
, "started tracing");
1098 dispatch_activate(timer
);