1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <dispatch/dispatch.h>
5 #include <ktrace/session.h>
6 #include <ktrace/private.h>
7 #include <kperf/kperf.h>
8 #include <mach/clock_types.h>
9 #include <mach/dyld_kernel.h>
10 #include <mach/host_info.h>
11 #include <mach/mach.h>
12 #include <mach/mach_init.h>
13 #include <mach/task.h>
14 #include <os/assumes.h>
16 #include <sys/kdebug.h>
17 #include <sys/kdebug_signpost.h>
18 #include <sys/sysctl.h>
21 #include "ktrace_helpers.h"
24 T_META_NAMESPACE("xnu.ktrace"),
27 #define KDBG_TEST_MACROS 1
28 #define KDBG_TEST_OLD_TIMES 2
29 #define KDBG_TEST_FUTURE_TIMES 3
30 #define KDBG_TEST_IOP_SYNC_FLUSH 4
33 assert_kdebug_test(unsigned int flavor
)
36 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDTEST
};
37 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, sizeof(mib
) / sizeof(mib
[0]), NULL
,
38 &size
, NULL
, 0), "KERN_KDTEST sysctl");
41 #pragma mark kdebug syscalls
43 #define TRACE_DEBUGID (0xfedfed00U)
45 T_DECL(kdebug_trace_syscall
, "test that kdebug_trace(2) emits correct events")
47 start_controlling_ktrace();
49 ktrace_session_t s
= ktrace_session_create();
50 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
52 ktrace_events_class(s
, DBG_MACH
, ^(__unused
struct trace_point
*tp
){});
54 __block
int events_seen
= 0;
55 ktrace_events_single(s
, TRACE_DEBUGID
, ^void (struct trace_point
*tp
) {
57 T_PASS("saw traced event");
59 if (ktrace_is_kernel_64_bit(s
)) {
60 T_EXPECT_EQ(tp
->arg1
, UINT64_C(0xfeedfacefeedface),
61 "argument 1 of traced event is correct");
63 T_EXPECT_EQ(tp
->arg1
, UINT64_C(0xfeedface),
64 "argument 1 of traced event is correct");
66 T_EXPECT_EQ(tp
->arg2
, 2ULL, "argument 2 of traced event is correct");
67 T_EXPECT_EQ(tp
->arg3
, 3ULL, "argument 3 of traced event is correct");
68 T_EXPECT_EQ(tp
->arg4
, 4ULL, "argument 4 of traced event is correct");
73 ktrace_set_completion_handler(s
, ^{
74 T_EXPECT_GE(events_seen
, 1, NULL
);
75 ktrace_session_destroy(s
);
79 ktrace_filter_pid(s
, getpid());
81 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
82 T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID
, 0xfeedfacefeedface, 2,
89 #define SIGNPOST_SINGLE_CODE (0x10U)
90 #define SIGNPOST_PAIRED_CODE (0x20U)
92 T_DECL(kdebug_signpost_syscall
,
93 "test that kdebug_signpost(2) emits correct events")
95 start_controlling_ktrace();
97 ktrace_session_t s
= ktrace_session_create();
98 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
100 __block
int single_seen
= 0;
101 __block
int paired_seen
= 0;
103 /* make sure to get enough events for the KDBUFWAIT to trigger */
104 // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
105 ktrace_events_single(s
,
106 APPSDBG_CODE(DBG_APP_SIGNPOST
, SIGNPOST_SINGLE_CODE
),
107 ^(struct trace_point
*tp
) {
109 T_PASS("single signpost is traced");
111 T_EXPECT_EQ(tp
->arg1
, 1ULL, "argument 1 of single signpost is correct");
112 T_EXPECT_EQ(tp
->arg2
, 2ULL, "argument 2 of single signpost is correct");
113 T_EXPECT_EQ(tp
->arg3
, 3ULL, "argument 3 of single signpost is correct");
114 T_EXPECT_EQ(tp
->arg4
, 4ULL, "argument 4 of single signpost is correct");
117 ktrace_events_single_paired(s
,
118 APPSDBG_CODE(DBG_APP_SIGNPOST
, SIGNPOST_PAIRED_CODE
),
119 ^(struct trace_point
*start
, struct trace_point
*end
) {
121 T_PASS("paired signposts are traced");
123 T_EXPECT_EQ(start
->arg1
, 5ULL, "argument 1 of start signpost is correct");
124 T_EXPECT_EQ(start
->arg2
, 6ULL, "argument 2 of start signpost is correct");
125 T_EXPECT_EQ(start
->arg3
, 7ULL, "argument 3 of start signpost is correct");
126 T_EXPECT_EQ(start
->arg4
, 8ULL, "argument 4 of start signpost is correct");
128 T_EXPECT_EQ(end
->arg1
, 9ULL, "argument 1 of end signpost is correct");
129 T_EXPECT_EQ(end
->arg2
, 10ULL, "argument 2 of end signpost is correct");
130 T_EXPECT_EQ(end
->arg3
, 11ULL, "argument 3 of end signpost is correct");
131 T_EXPECT_EQ(end
->arg4
, 12ULL, "argument 4 of end signpost is correct");
133 T_EXPECT_EQ(single_seen
, 1, "signposts are traced in the correct order");
138 ktrace_set_completion_handler(s
, ^(void) {
139 T_QUIET
; T_EXPECT_NE(single_seen
, 0,
140 "did not see single tracepoint before timeout");
141 T_QUIET
; T_EXPECT_NE(paired_seen
, 0,
142 "did not see single tracepoint before timeout");
143 ktrace_session_destroy(s
);
147 ktrace_filter_pid(s
, getpid());
149 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()),
152 #pragma clang diagnostic push
153 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
154 T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE
, 1, 2, 3, 4),
155 "emitted single signpost");
156 T_EXPECT_POSIX_SUCCESS(
157 kdebug_signpost_start(SIGNPOST_PAIRED_CODE
, 5, 6, 7, 8),
158 "emitted start signpost");
159 T_EXPECT_POSIX_SUCCESS(
160 kdebug_signpost_end(SIGNPOST_PAIRED_CODE
, 9, 10, 11, 12),
161 "emitted end signpost");
162 #pragma clang diagnostic pop
168 T_DECL(syscall_tracing
,
169 "ensure that syscall arguments are traced propertly")
171 ktrace_session_t s
= ktrace_session_create();
172 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
174 __block
bool seen
= 0;
176 ktrace_filter_pid(s
, getpid());
178 static const int telemetry_syscall_no
= 451;
179 static const uint64_t arg1
= 0xfeedfacefeedface;
181 ktrace_events_single(s
, BSDDBG_CODE(DBG_BSD_EXCP_SC
, telemetry_syscall_no
),
182 ^(struct trace_point
*evt
){
183 if (KDBG_EXTRACT_CODE(evt
->debugid
) != telemetry_syscall_no
|| seen
) {
188 if (ktrace_is_kernel_64_bit(s
)) {
189 T_EXPECT_EQ(evt
->arg1
, arg1
,
190 "argument 1 of syscall event is correct");
192 T_EXPECT_EQ(evt
->arg1
, (uint64_t)(uint32_t)(arg1
),
193 "argument 1 of syscall event is correct");
199 ktrace_set_completion_handler(s
, ^{
201 "should have seen a syscall event for kevent_id(2)");
202 ktrace_session_destroy(s
);
206 int error
= ktrace_start(s
, dispatch_get_main_queue());
207 T_ASSERT_POSIX_ZERO(error
, "started tracing");
210 * telemetry(2) has a 64-bit argument that will definitely be traced, and
211 * is unlikely to be used elsewhere by this process.
213 extern int __telemetry(uint64_t cmd
, uint64_t deadline
, uint64_t interval
,
214 uint64_t leeway
, uint64_t arg4
, uint64_t arg5
);
215 (void)__telemetry(arg1
, 0, 0, 0, 0, 0);
217 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 5 * NSEC_PER_SEC
),
218 dispatch_get_main_queue(), ^{
219 T_LOG("ending test due to timeout");
226 #pragma mark kdebug behaviors
228 #define WRAPPING_EVENTS_COUNT (150000)
229 #define TRACE_ITERATIONS (5000)
230 #define WRAPPING_EVENTS_THRESHOLD (100)
233 "ensure that wrapping traces lost events and no events prior to the wrap",
234 T_META_CHECK_LEAKS(false))
237 int wait_wrapping_secs
= (WRAPPING_EVENTS_COUNT
/ TRACE_ITERATIONS
) + 5;
238 int current_secs
= wait_wrapping_secs
;
240 start_controlling_ktrace();
242 /* use sysctls manually to bypass libktrace assumptions */
244 int mib
[4] = { CTL_KERN
, KERN_KDEBUG
};
245 mib
[2] = KERN_KDSETBUF
; mib
[3] = WRAPPING_EVENTS_COUNT
;
246 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 4, NULL
, 0, NULL
, 0), "KERN_KDSETBUF");
248 mib
[2] = KERN_KDSETUP
; mib
[3] = 0;
250 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 3, NULL
, &needed
, NULL
, 0),
253 mib
[2] = KERN_KDENABLE
; mib
[3] = 1;
254 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 4, NULL
, 0, NULL
, 0), "KERN_KDENABLE");
256 /* wrapping is on by default */
258 /* wait until wrapped */
259 T_LOG("waiting for trace to wrap");
260 mib
[2] = KERN_KDGETBUF
;
261 needed
= sizeof(buf_info
);
264 for (int i
= 0; i
< TRACE_ITERATIONS
; i
++) {
266 T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL
);
269 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 3, &buf_info
, &needed
, NULL
, 0),
271 } while (!(buf_info
.flags
& KDBG_WRAPPED
) && --current_secs
> 0);
273 T_ASSERT_TRUE(buf_info
.flags
& KDBG_WRAPPED
,
274 "trace wrapped (after %d seconds within %d second timeout)",
275 wait_wrapping_secs
- current_secs
, wait_wrapping_secs
);
277 ktrace_session_t s
= ktrace_session_create();
278 T_QUIET
; T_ASSERT_NOTNULL(s
, NULL
);
279 T_QUIET
; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s
), NULL
);
281 __block
int events
= 0;
283 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
285 T_EXPECT_EQ(tp
->debugid
, (unsigned int)TRACE_LOST_EVENTS
,
286 "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
288 ktrace_name_for_eventid(s
, tp
->debugid
& KDBG_EVENTID_MASK
));
291 T_EXPECT_NE(tp
->debugid
, (unsigned int)TRACE_LOST_EVENTS
,
292 "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
294 ktrace_name_for_eventid(s
, tp
->debugid
& KDBG_EVENTID_MASK
));
298 if (events
> WRAPPING_EVENTS_THRESHOLD
) {
303 ktrace_set_completion_handler(s
, ^{
304 ktrace_session_destroy(s
);
308 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()),
314 T_DECL(reject_old_events
,
315 "ensure that kdebug rejects events from before tracing began",
316 T_META_CHECK_LEAKS(false))
318 __block
uint64_t event_horizon_ts
;
320 start_controlling_ktrace();
322 ktrace_session_t s
= ktrace_session_create();
323 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
325 __block
int events
= 0;
326 ktrace_events_single(s
, KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 1),
327 ^(struct trace_point
*tp
) {
329 T_EXPECT_GT(tp
->timestamp
, event_horizon_ts
,
330 "events in trace should be from after tracing began");
333 ktrace_set_completion_handler(s
, ^{
334 T_EXPECT_EQ(events
, 2, "should see only two events");
335 ktrace_session_destroy(s
);
339 event_horizon_ts
= mach_absolute_time();
341 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
342 /* first, try an old event at the beginning of trace */
343 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
344 /* after a good event has been traced, old events should be rejected */
345 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
351 #define ORDERING_TIMEOUT_SEC 5
353 T_DECL(ascending_time_order
,
354 "ensure that kdebug events are in ascending order based on time",
355 T_META_CHECK_LEAKS(false))
357 __block
uint64_t prev_ts
= 0;
358 __block
uint32_t prev_debugid
= 0;
359 __block
unsigned int prev_cpu
= 0;
360 __block
bool in_order
= true;
362 start_controlling_ktrace();
364 ktrace_session_t s
= ktrace_session_create();
365 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
367 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
368 if (tp
->timestamp
< prev_ts
) {
370 T_LOG("%" PRIu64
": %#" PRIx32
" (cpu %d)",
371 prev_ts
, prev_debugid
, prev_cpu
);
372 T_LOG("%" PRIu64
": %#" PRIx32
" (cpu %d)",
373 tp
->timestamp
, tp
->debugid
, tp
->cpuid
);
378 ktrace_set_completion_handler(s
, ^{
379 ktrace_session_destroy(s
);
380 T_EXPECT_TRUE(in_order
, "event timestamps were in-order");
384 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()),
387 /* try to inject old timestamps into trace */
388 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
390 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, ORDERING_TIMEOUT_SEC
* NSEC_PER_SEC
),
391 dispatch_get_main_queue(), ^{
392 T_LOG("ending test after timeout");
399 #pragma mark dyld tracing
401 __attribute__((aligned(8)))
402 static const char map_uuid
[16] = "map UUID";
404 __attribute__((aligned(8)))
405 static const char unmap_uuid
[16] = "unmap UUID";
407 __attribute__((aligned(8)))
408 static const char sc_uuid
[16] = "shared UUID";
410 static fsid_t map_fsid
= { .val
= { 42, 43 } };
411 static fsid_t unmap_fsid
= { .val
= { 44, 45 } };
412 static fsid_t sc_fsid
= { .val
= { 46, 47 } };
414 static fsobj_id_t map_fsobjid
= { .fid_objno
= 42, .fid_generation
= 43 };
415 static fsobj_id_t unmap_fsobjid
= { .fid_objno
= 44, .fid_generation
= 45 };
416 static fsobj_id_t sc_fsobjid
= { .fid_objno
= 46, .fid_generation
= 47 };
418 #define MAP_LOAD_ADDR 0xabadcafe
419 #define UNMAP_LOAD_ADDR 0xfeedface
420 #define SC_LOAD_ADDR 0xfedfaced
424 expect_dyld_image_info(struct trace_point
*tp
, const uint64_t *exp_uuid
,
425 uint64_t exp_load_addr
, fsid_t
*exp_fsid
, fsobj_id_t
*exp_fsobjid
,
428 #if defined(__LP64__) || defined(__arm64__)
434 uuid
[0] = (uint64_t)tp
->arg1
;
435 uuid
[1] = (uint64_t)tp
->arg2
;
436 load_addr
= (uint64_t)tp
->arg3
;
437 fsid
.val
[0] = (int32_t)(tp
->arg4
& UINT32_MAX
);
438 fsid
.val
[1] = (int32_t)((uint64_t)tp
->arg4
>> 32);
440 T_QUIET
; T_EXPECT_EQ(uuid
[0], exp_uuid
[0], NULL
);
441 T_QUIET
; T_EXPECT_EQ(uuid
[1], exp_uuid
[1], NULL
);
442 T_QUIET
; T_EXPECT_EQ(load_addr
, exp_load_addr
, NULL
);
443 T_QUIET
; T_EXPECT_EQ(fsid
.val
[0], exp_fsid
->val
[0], NULL
);
444 T_QUIET
; T_EXPECT_EQ(fsid
.val
[1], exp_fsid
->val
[1], NULL
);
445 } else if (order
== 1) {
448 fsobjid
.fid_objno
= (uint32_t)(tp
->arg1
& UINT32_MAX
);
449 fsobjid
.fid_generation
= (uint32_t)((uint64_t)tp
->arg1
>> 32);
451 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_objno
, exp_fsobjid
->fid_objno
, NULL
);
452 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_generation
,
453 exp_fsobjid
->fid_generation
, NULL
);
455 T_ASSERT_FAIL("unrecognized order of events %d", order
);
457 #else /* defined(__LP64__) */
461 uuid
[0] = (uint32_t)tp
->arg1
;
462 uuid
[1] = (uint32_t)tp
->arg2
;
463 uuid
[2] = (uint32_t)tp
->arg3
;
464 uuid
[3] = (uint32_t)tp
->arg4
;
466 T_QUIET
; T_EXPECT_EQ(uuid
[0], (uint32_t)exp_uuid
[0], NULL
);
467 T_QUIET
; T_EXPECT_EQ(uuid
[1], (uint32_t)(exp_uuid
[0] >> 32), NULL
);
468 T_QUIET
; T_EXPECT_EQ(uuid
[2], (uint32_t)exp_uuid
[1], NULL
);
469 T_QUIET
; T_EXPECT_EQ(uuid
[3], (uint32_t)(exp_uuid
[1] >> 32), NULL
);
470 } else if (order
== 1) {
475 load_addr
= (uint32_t)tp
->arg1
;
476 fsid
.val
[0] = (int32_t)tp
->arg2
;
477 fsid
.val
[1] = (int32_t)tp
->arg3
;
478 fsobjid
.fid_objno
= (uint32_t)tp
->arg4
;
480 T_QUIET
; T_EXPECT_EQ(load_addr
, (uint32_t)exp_load_addr
, NULL
);
481 T_QUIET
; T_EXPECT_EQ(fsid
.val
[0], exp_fsid
->val
[0], NULL
);
482 T_QUIET
; T_EXPECT_EQ(fsid
.val
[1], exp_fsid
->val
[1], NULL
);
483 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_objno
, exp_fsobjid
->fid_objno
, NULL
);
484 } else if (order
== 2) {
487 fsobjid
.fid_generation
= tp
->arg1
;
489 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_generation
,
490 exp_fsobjid
->fid_generation
, NULL
);
492 T_ASSERT_FAIL("unrecognized order of events %d", order
);
494 #endif /* defined(__LP64__) */
497 #if defined(__LP64__) || defined(__arm64__)
498 #define DYLD_CODE_OFFSET (0)
499 #define DYLD_EVENTS (2)
501 #define DYLD_CODE_OFFSET (2)
502 #define DYLD_EVENTS (3)
506 expect_dyld_events(ktrace_session_t s
, const char *name
, uint32_t base_code
,
507 const char *exp_uuid
, uint64_t exp_load_addr
, fsid_t
*exp_fsid
,
508 fsobj_id_t
*exp_fsobjid
, uint8_t *saw_events
)
510 for (int i
= 0; i
< DYLD_EVENTS
; i
++) {
511 ktrace_events_single(s
, KDBG_EVENTID(DBG_DYLD
, DBG_DYLD_UUID
,
512 base_code
+ DYLD_CODE_OFFSET
+ (unsigned int)i
),
513 ^(struct trace_point
*tp
) {
514 T_LOG("checking %s event %c", name
, 'A' + i
);
515 expect_dyld_image_info(tp
, (const void *)exp_uuid
, exp_load_addr
,
516 exp_fsid
, exp_fsobjid
, i
);
517 *saw_events
|= (1U << i
);
522 T_DECL(dyld_events
, "test that dyld registering libraries emits events")
524 dyld_kernel_image_info_t info
;
527 * Use pointers instead of __block variables in order to use these variables
528 * in the completion block below _and_ pass pointers to them to the
529 * expect_dyld_events function.
531 uint8_t saw_events
[3] = { 0 };
532 uint8_t *saw_mapping
= &(saw_events
[0]);
533 uint8_t *saw_unmapping
= &(saw_events
[1]);
534 uint8_t *saw_shared_cache
= &(saw_events
[2]);
536 start_controlling_ktrace();
538 ktrace_session_t s
= ktrace_session_create();
539 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
542 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()),
543 "filtered to current process");
545 expect_dyld_events(s
, "mapping", DBG_DYLD_UUID_MAP_A
, map_uuid
,
546 MAP_LOAD_ADDR
, &map_fsid
, &map_fsobjid
, saw_mapping
);
547 expect_dyld_events(s
, "unmapping", DBG_DYLD_UUID_UNMAP_A
, unmap_uuid
,
548 UNMAP_LOAD_ADDR
, &unmap_fsid
, &unmap_fsobjid
, saw_unmapping
);
549 expect_dyld_events(s
, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A
,
550 sc_uuid
, SC_LOAD_ADDR
, &sc_fsid
, &sc_fsobjid
, saw_shared_cache
);
552 ktrace_set_completion_handler(s
, ^{
553 ktrace_session_destroy(s
);
555 T_EXPECT_EQ(__builtin_popcount(*saw_mapping
), DYLD_EVENTS
, NULL
);
556 T_EXPECT_EQ(__builtin_popcount(*saw_unmapping
), DYLD_EVENTS
, NULL
);
557 T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache
), DYLD_EVENTS
, NULL
);
561 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
563 info
.load_addr
= MAP_LOAD_ADDR
;
564 memcpy(info
.uuid
, map_uuid
, sizeof(info
.uuid
));
565 info
.fsid
= map_fsid
;
566 info
.fsobjid
= map_fsobjid
;
567 T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
568 &info
, 1), "registered dyld image info");
570 info
.load_addr
= UNMAP_LOAD_ADDR
;
571 memcpy(info
.uuid
, unmap_uuid
, sizeof(info
.uuid
));
572 info
.fsid
= unmap_fsid
;
573 info
.fsobjid
= unmap_fsobjid
;
574 T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
575 &info
, 1), "unregistered dyld image info");
577 info
.load_addr
= SC_LOAD_ADDR
;
578 memcpy(info
.uuid
, sc_uuid
, sizeof(info
.uuid
));
580 info
.fsobjid
= sc_fsobjid
;
581 T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
582 mach_task_self(), info
, FALSE
, FALSE
),
583 "registered dyld shared cache image info");
590 #pragma mark kdebug kernel macros
592 #define EXP_KERNEL_EVENTS 5U
594 static const uint32_t dev_evts
[EXP_KERNEL_EVENTS
] = {
595 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 0),
596 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 1),
597 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 2),
598 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 3),
599 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 4),
602 static const uint32_t rel_evts
[EXP_KERNEL_EVENTS
] = {
603 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 5),
604 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 6),
605 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 7),
606 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 8),
607 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 9),
610 static const uint32_t filt_evts
[EXP_KERNEL_EVENTS
] = {
611 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 10),
612 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 11),
613 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 12),
614 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 13),
615 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 14),
618 static const uint32_t noprocfilt_evts
[EXP_KERNEL_EVENTS
] = {
619 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 15),
620 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 16),
621 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 17),
622 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 18),
623 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 19),
627 is_development_kernel(void)
629 static dispatch_once_t is_development_once
;
630 static bool is_development
;
632 dispatch_once(&is_development_once
, ^{
634 size_t dev_size
= sizeof(dev
);
637 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev
,
638 &dev_size
, NULL
, 0), NULL
);
639 is_development
= (dev
!= 0);
642 return is_development
;
646 expect_event(struct trace_point
*tp
, const char *name
, unsigned int *events
,
647 const uint32_t *event_ids
, size_t event_ids_len
)
649 unsigned int event_idx
= *events
;
650 bool event_found
= false;
652 for (i
= 0; i
< event_ids_len
; i
++) {
653 if (event_ids
[i
] == (tp
->debugid
& KDBG_EVENTID_MASK
)) {
654 T_LOG("found %s event 0x%x", name
, tp
->debugid
);
664 for (i
= 0; i
< event_idx
; i
++) {
665 T_QUIET
; T_EXPECT_EQ(((uint64_t *)&tp
->arg1
)[i
], (uint64_t)i
+ 1,
669 T_QUIET
; T_EXPECT_EQ(((uint64_t *)&tp
->arg1
)[i
], (uint64_t)0, NULL
);
674 expect_release_event(struct trace_point
*tp
, unsigned int *events
)
676 expect_event(tp
, "release", events
, rel_evts
,
677 sizeof(rel_evts
) / sizeof(rel_evts
[0]));
681 expect_development_event(struct trace_point
*tp
, unsigned int *events
)
683 expect_event(tp
, "dev", events
, dev_evts
, sizeof(dev_evts
) / sizeof(dev_evts
[0]));
687 expect_filtered_event(struct trace_point
*tp
, unsigned int *events
)
689 expect_event(tp
, "filtered", events
, filt_evts
,
690 sizeof(filt_evts
) / sizeof(filt_evts
[0]));
694 expect_noprocfilt_event(struct trace_point
*tp
, unsigned int *events
)
696 expect_event(tp
, "noprocfilt", events
, noprocfilt_evts
,
697 sizeof(noprocfilt_evts
) / sizeof(noprocfilt_evts
[0]));
701 expect_kdbg_test_events(ktrace_session_t s
, bool use_all_callback
,
702 void (^cb
)(unsigned int dev_seen
, unsigned int rel_seen
,
703 unsigned int filt_seen
, unsigned int noprocfilt_seen
))
705 __block
unsigned int dev_seen
= 0;
706 __block
unsigned int rel_seen
= 0;
707 __block
unsigned int filt_seen
= 0;
708 __block
unsigned int noprocfilt_seen
= 0;
710 void (^evtcb
)(struct trace_point
*tp
) = ^(struct trace_point
*tp
) {
711 expect_development_event(tp
, &dev_seen
);
712 expect_release_event(tp
, &rel_seen
);
713 expect_filtered_event(tp
, &filt_seen
);
714 expect_noprocfilt_event(tp
, &noprocfilt_seen
);
717 if (use_all_callback
) {
718 ktrace_events_all(s
, evtcb
);
720 ktrace_events_range(s
, KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 0),
721 KDBG_EVENTID(DBG_BSD
+ 1, 0, 0), evtcb
);
724 ktrace_set_completion_handler(s
, ^{
725 ktrace_session_destroy(s
);
726 cb(dev_seen
, rel_seen
, filt_seen
, noprocfilt_seen
);
730 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
731 assert_kdebug_test(KDBG_TEST_MACROS
);
736 T_DECL(kernel_events
, "ensure kernel macros work")
738 start_controlling_ktrace();
740 ktrace_session_t s
= ktrace_session_create();
741 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
743 T_QUIET
; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()),
744 "filtered events to current process");
746 expect_kdbg_test_events(s
, false,
747 ^(unsigned int dev_seen
, unsigned int rel_seen
,
748 unsigned int filt_seen
, unsigned int noprocfilt_seen
) {
750 * Development-only events are only filtered if running on an embedded
753 unsigned int dev_exp
;
754 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
755 dev_exp
= is_development_kernel() ? EXP_KERNEL_EVENTS
: 0U;
757 dev_exp
= EXP_KERNEL_EVENTS
;
760 T_EXPECT_EQ(rel_seen
, EXP_KERNEL_EVENTS
,
761 "release and development events seen");
762 T_EXPECT_EQ(dev_seen
, dev_exp
, "development-only events %sseen",
763 dev_exp
? "" : "not ");
764 T_EXPECT_EQ(filt_seen
, dev_exp
, "filter-only events seen");
765 T_EXPECT_EQ(noprocfilt_seen
, EXP_KERNEL_EVENTS
,
766 "process filter-agnostic events seen");
772 T_DECL(kernel_events_filtered
, "ensure that the filtered kernel macros work")
774 start_controlling_ktrace();
776 ktrace_session_t s
= ktrace_session_create();
777 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
779 T_QUIET
; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()),
780 "filtered events to current process");
782 expect_kdbg_test_events(s
, true,
783 ^(unsigned int dev_seen
, unsigned int rel_seen
,
784 unsigned int filt_seen
, unsigned int noprocfilt_seen
) {
785 T_EXPECT_EQ(rel_seen
, EXP_KERNEL_EVENTS
, NULL
);
786 #if defined(__arm__) || defined(__arm64__)
787 T_EXPECT_EQ(dev_seen
, is_development_kernel() ? EXP_KERNEL_EVENTS
: 0U,
790 T_EXPECT_EQ(dev_seen
, EXP_KERNEL_EVENTS
,
791 "development-only events seen");
792 #endif /* defined(__arm__) || defined(__arm64__) */
793 T_EXPECT_EQ(filt_seen
, 0U, "no filter-only events seen");
794 T_EXPECT_EQ(noprocfilt_seen
, EXP_KERNEL_EVENTS
,
795 "process filter-agnostic events seen");
801 T_DECL(kernel_events_noprocfilt
,
802 "ensure that the no process filter kernel macros work")
804 start_controlling_ktrace();
806 ktrace_session_t s
= ktrace_session_create();
807 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
810 * Only allow launchd events through.
812 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, 1), "filtered events to launchd");
813 for (size_t i
= 0; i
< sizeof(noprocfilt_evts
) / sizeof(noprocfilt_evts
[0]); i
++) {
815 T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s
,
817 "ignored process filter for noprocfilt event");
820 expect_kdbg_test_events(s
, false,
821 ^(unsigned int dev_seen
, unsigned int rel_seen
,
822 unsigned int filt_seen
, unsigned int noprocfilt_seen
) {
823 T_EXPECT_EQ(rel_seen
, 0U, "release and development events not seen");
824 T_EXPECT_EQ(dev_seen
, 0U, "development-only events not seen");
825 T_EXPECT_EQ(filt_seen
, 0U, "filter-only events not seen");
827 T_EXPECT_EQ(noprocfilt_seen
, EXP_KERNEL_EVENTS
,
828 "process filter-agnostic events seen");
834 static volatile bool continue_abuse
= true;
836 #define STRESS_DEBUGID (0xfeedfac0)
837 #define ABUSE_SECS (2)
838 #define TIMER_NS (100 * NSEC_PER_USEC)
840 * Use the quantum as the gap threshold.
842 #define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
845 kdebug_abuser_thread(void *ctx
)
847 unsigned int id
= (unsigned int)ctx
;
849 while (continue_abuse
) {
850 kdebug_trace(STRESS_DEBUGID
, id
, i
, 0, 0);
857 T_DECL(stress
, "emit events on all but one CPU with a small buffer",
858 T_META_CHECK_LEAKS(false))
860 start_controlling_ktrace();
863 ktrace_session_t s
= ktrace_session_create();
864 T_WITH_ERRNO
; T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
866 /* Let's not waste any time with pleasantries. */
867 ktrace_set_uuid_map_enabled(s
, KTRACE_FEATURE_DISABLED
);
870 ktrace_events_all(s
, ^(__unused
struct trace_point
*tp
) {});
871 ktrace_set_vnode_paths_enabled(s
, KTRACE_FEATURE_ENABLED
);
872 (void)atexit_b(^{ kperf_reset(); });
873 (void)kperf_action_count_set(1);
874 (void)kperf_timer_count_set(1);
875 int kperror
= kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS
));
876 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperror
, "kperf_timer_period_set %llu ns",
878 kperror
= kperf_timer_action_set(0, 1);
879 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperror
, "kperf_timer_action_set");
880 kperror
= kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO
|
881 KPERF_SAMPLER_TH_SNAPSHOT
| KPERF_SAMPLER_KSTACK
|
882 KPERF_SAMPLER_USTACK
| KPERF_SAMPLER_MEMINFO
|
883 KPERF_SAMPLER_TINFO_SCHED
| KPERF_SAMPLER_TH_DISPATCH
|
884 KPERF_SAMPLER_TK_SNAPSHOT
| KPERF_SAMPLER_SYS_MEM
|
885 KPERF_SAMPLER_TH_INSTRS_CYCLES
);
886 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperror
, "kperf_action_samplers_set");
889 /* The coup-de-grace. */
890 ktrace_set_buffer_size(s
, 10);
892 char filepath_arr
[MAXPATHLEN
] = "";
893 strlcpy(filepath_arr
, dt_tmpdir(), sizeof(filepath_arr
));
894 strlcat(filepath_arr
, "/stress.ktrace", sizeof(filepath_arr
));
895 char *filepath
= filepath_arr
;
898 size_t ncpus_size
= sizeof(ncpus
);
899 int ret
= sysctlbyname("hw.logicalcpu_max", &ncpus
, &ncpus_size
, NULL
, 0);
900 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctlbyname(\"hw.logicalcpu_max\"");
901 T_QUIET
; T_ASSERT_GT(ncpus
, 0, "realistic number of CPUs");
903 pthread_t
*threads
= calloc((unsigned int)ncpus
- 1, sizeof(pthread_t
));
904 T_WITH_ERRNO
; T_QUIET
; T_ASSERT_NOTNULL(threads
, "calloc(%d threads)",
907 ktrace_set_completion_handler(s
, ^{
909 ktrace_session_destroy(s
);
911 T_LOG("trace ended, searching for gaps");
913 ktrace_session_t sread
= ktrace_session_create();
914 T_WITH_ERRNO
; T_QUIET
; T_ASSERT_NOTNULL(sread
, "ktrace_session_create");
916 int error
= ktrace_set_file(sread
, filepath
);
917 T_QUIET
; T_ASSERT_POSIX_ZERO(error
, "ktrace_set_file %s", filepath
);
919 ktrace_file_t f
= ktrace_file_open(filepath
, false);
920 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(f
, "ktrace_file_open %s",
922 uint64_t first_timestamp
= 0;
923 error
= ktrace_file_earliest_timestamp(f
, &first_timestamp
);
924 T_QUIET
; T_ASSERT_POSIX_ZERO(error
, "ktrace_file_earliest_timestamp");
926 uint64_t last_timestamp
= 0;
927 (void)ktrace_file_latest_timestamp(f
, &last_timestamp
);
929 __block
uint64_t prev_timestamp
= 0;
930 __block
uint64_t nevents
= 0;
931 ktrace_events_all(sread
, ^(struct trace_point
*tp
) {
933 uint64_t delta_ns
= 0;
934 T_QUIET
; T_EXPECT_GE(tp
->timestamp
, prev_timestamp
,
935 "timestamps are monotonically increasing");
936 int converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
937 tp
->timestamp
- prev_timestamp
, &delta_ns
);
938 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
, "convert timestamp to ns");
939 if (prev_timestamp
&& delta_ns
> GAP_THRESHOLD_NS
) {
941 T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
942 (double)delta_ns
/ 1e9
, prev_timestamp
,
943 tp
->timestamp
, tp
->cpuid
, tp
->debugname
, tp
->debugid
);
945 T_LOG("gap: %gs at %llu - %llu on %d: %#x",
946 (double)delta_ns
/ 1e9
, prev_timestamp
,
947 tp
->timestamp
, tp
->cpuid
, tp
->debugid
);
951 * These gaps are ok -- they appear after CPUs are brought back
954 #define INTERRUPT (0x1050000)
955 #define PERF_CPU_IDLE (0x27001000)
956 #define INTC_HANDLER (0x5000004)
957 #define DECR_TRAP (0x1090000)
958 uint32_t eventid
= tp
->debugid
& KDBG_EVENTID_MASK
;
959 if (eventid
!= INTERRUPT
&& eventid
!= PERF_CPU_IDLE
&&
960 eventid
!= INTC_HANDLER
&& eventid
!= DECR_TRAP
) {
961 unsigned int lost_events
= TRACE_LOST_EVENTS
;
962 T_QUIET
; T_EXPECT_EQ(tp
->debugid
, lost_events
,
963 "gaps should end with lost events");
967 prev_timestamp
= tp
->timestamp
;
969 ktrace_events_single(sread
, TRACE_LOST_EVENTS
, ^(struct trace_point
*tp
){
970 T_LOG("lost: %llu on %d (%llu)", tp
->timestamp
, tp
->cpuid
, tp
->arg1
);
973 __block
uint64_t last_write
= 0;
974 ktrace_events_single_paired(sread
, TRACE_WRITING_EVENTS
,
975 ^(struct trace_point
*start
, struct trace_point
*end
) {
977 int converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
978 start
->timestamp
- last_write
, &delta_ns
);
979 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
, "convert timestamp to ns");
982 converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
983 end
->timestamp
- start
->timestamp
, &dur_ns
);
984 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
, "convert timestamp to ns");
986 T_LOG("write: %llu (+%gs): %gus on %d: %llu events", start
->timestamp
,
987 (double)delta_ns
/ 1e9
, (double)dur_ns
/ 1e3
, end
->cpuid
, end
->arg1
);
988 last_write
= end
->timestamp
;
990 ktrace_set_completion_handler(sread
, ^{
991 uint64_t duration_ns
= 0;
992 if (last_timestamp
) {
993 int converror
= ktrace_convert_timestamp_to_nanoseconds(sread
,
994 last_timestamp
- first_timestamp
, &duration_ns
);
995 T_QUIET
; T_ASSERT_POSIX_ZERO(converror
,
996 "convert timestamp to ns");
997 T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
998 (double)duration_ns
/ 1e9
, nevents
,
999 (double)nevents
/ ((double)duration_ns
/ 1e6
) / ncpus
);
1001 (void)unlink(filepath
);
1002 ktrace_session_destroy(sread
);
1006 int starterror
= ktrace_start(sread
, dispatch_get_main_queue());
1007 T_QUIET
; T_ASSERT_POSIX_ZERO(starterror
, "ktrace_start read session");
1012 /* Just kidding... for now. */
1014 kperror
= kperf_sample_set(1);
1015 T_ASSERT_POSIX_SUCCESS(kperror
,
1016 "started kperf timer sampling every %llu ns", TIMER_NS
);
1019 for (int i
= 0; i
< (ncpus
- 1); i
++) {
1020 int error
= pthread_create(&threads
[i
], NULL
, kdebug_abuser_thread
,
1021 (void *)(uintptr_t)i
);
1022 T_QUIET
; T_ASSERT_POSIX_ZERO(error
,
1023 "pthread_create abuser thread %d", i
);
1026 int error
= ktrace_start_writing_file(s
, filepath
,
1027 ktrace_compression_none
, NULL
, NULL
);
1028 T_ASSERT_POSIX_ZERO(error
, "started writing ktrace to %s", filepath
);
1032 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, ABUSE_SECS
* NSEC_PER_SEC
),
1033 dispatch_get_main_queue(), ^{
1034 T_LOG("ending trace");
1037 continue_abuse
= false;
1038 for (int i
= 0; i
< (ncpus
- 1); i
++) {
1039 int joinerror
= pthread_join(threads
[i
], NULL
);
1040 T_QUIET
; T_EXPECT_POSIX_ZERO(joinerror
, "pthread_join thread %d",
1048 #define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
1049 #define ROUND_TRIPS_THRESHOLD UINT64_C(25)
1050 #define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
1051 #define COLLECTION_INTERVAL_MS 100
1054 * Test a sustained tracing session, involving multiple round-trips to the
1057 * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
1058 * unlikely to be emitted elsewhere. Look for this event, too, and make sure we
1059 * see as many of them as we emitted.
1061 * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
1062 * In the failure mode, we won't see any of these, so set a timeout of
1063 * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
1067 "test sustained tracing with multiple round-trips through the kernel")
1069 start_controlling_ktrace();
1071 ktrace_session_t s
= ktrace_session_create();
1072 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
1075 * Set a small buffer and collection interval to increase the number of
1078 ktrace_set_buffer_size(s
, 50);
1079 ktrace_set_collection_interval(s
, COLLECTION_INTERVAL_MS
);
1081 __block
uint64_t events
= 0;
1082 __block
uint64_t emitted
= 0;
1083 __block
uint64_t seen
= 0;
1084 ktrace_events_all(s
, ^(__unused
struct trace_point
*tp
) {
1086 if (events
% ROUND_TRIP_PERIOD
== 0) {
1087 T_LOG("emitting round-trip event %" PRIu64
, emitted
);
1088 kdebug_trace(TRACE_DEBUGID
, events
, 0, 0, 0);
1093 ktrace_events_single(s
, TRACE_DEBUGID
, ^(__unused
struct trace_point
*tp
) {
1094 T_LOG("saw round-trip event after %" PRIu64
" events", events
);
1096 if (seen
>= ROUND_TRIPS_THRESHOLD
) {
1097 T_LOG("ending trace after seeing %" PRIu64
" events, "
1098 "emitting %" PRIu64
, seen
, emitted
);
1103 ktrace_set_completion_handler(s
, ^{
1104 T_EXPECT_GE(emitted
, ROUND_TRIPS_THRESHOLD
,
1105 "emitted %" PRIu64
" round-trip events", emitted
);
1106 T_EXPECT_GE(seen
, ROUND_TRIPS_THRESHOLD
,
1107 "saw %" PRIu64
" round-trip events", seen
);
1108 ktrace_session_destroy(s
);
1112 int error
= ktrace_start(s
, dispatch_get_main_queue());
1113 T_ASSERT_POSIX_ZERO(error
, "started tracing");
1115 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
,
1116 ROUND_TRIPS_TIMEOUT_SECS
* NSEC_PER_SEC
), dispatch_get_main_queue(),
1118 T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS
);
1125 #define HEARTBEAT_INTERVAL_SECS 2
1126 #define HEARTBEAT_COUNT 20
1129 * Ensure we see events periodically, checking for recent events on a
1132 T_DECL(event_coverage
, "ensure events appear up to the end of tracing")
1134 start_controlling_ktrace();
1136 ktrace_session_t s
= ktrace_session_create();
1137 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
1139 __block
uint64_t current_timestamp
= 0;
1140 __block
uint64_t events
= 0;
1141 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
1142 current_timestamp
= tp
->timestamp
;
1146 ktrace_set_buffer_size(s
, 20);
1147 ktrace_set_collection_interval(s
, COLLECTION_INTERVAL_MS
);
1149 __block
uint64_t last_timestamp
= 0;
1150 __block
uint64_t last_events
= 0;
1151 __block
unsigned int heartbeats
= 0;
1153 ktrace_set_completion_handler(s
, ^{
1154 ktrace_session_destroy(s
);
1155 T_QUIET
; T_EXPECT_GT(events
, 0ULL, "should have seen some events");
1159 dispatch_source_t timer
= dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER
,
1160 0, 0, dispatch_get_main_queue());
1161 dispatch_source_set_timer(timer
, dispatch_time(DISPATCH_TIME_NOW
,
1162 HEARTBEAT_INTERVAL_SECS
* NSEC_PER_SEC
),
1163 HEARTBEAT_INTERVAL_SECS
* NSEC_PER_SEC
, 0);
1164 dispatch_source_set_cancel_handler(timer
, ^{
1165 dispatch_release(timer
);
1168 dispatch_source_set_event_handler(timer
, ^{
1171 T_LOG("heartbeat %u at time %lld, seen %" PRIu64
" events, "
1172 "current event time %lld", heartbeats
, mach_absolute_time(),
1173 events
, current_timestamp
);
1175 if (current_timestamp
> 0) {
1176 T_EXPECT_GT(current_timestamp
, last_timestamp
,
1177 "event timestamps should be increasing");
1178 T_QUIET
; T_EXPECT_GT(events
, last_events
,
1179 "number of events should be increasing");
1182 last_timestamp
= current_timestamp
;
1183 last_events
= events
;
1185 if (heartbeats
>= HEARTBEAT_COUNT
) {
1186 T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT
);
1191 int error
= ktrace_start(s
, dispatch_get_main_queue());
1192 T_ASSERT_POSIX_ZERO(error
, "started tracing");
1194 dispatch_activate(timer
);
1200 set_nevents(unsigned int nevents
)
1203 T_ASSERT_POSIX_SUCCESS(sysctl(
1204 (int[]){ CTL_KERN
, KERN_KDEBUG
, KERN_KDSETBUF
, (int)nevents
}, 4,
1205 NULL
, 0, NULL
, 0), "set kdebug buffer size");
1208 T_ASSERT_POSIX_SUCCESS(sysctl(
1209 (int[]){ CTL_KERN
, KERN_KDEBUG
, KERN_KDSETUP
, (int)nevents
}, 4,
1210 NULL
, 0, NULL
, 0), "setup kdebug buffers");
1212 kbufinfo_t bufinfo
= { 0 };
1214 T_ASSERT_POSIX_SUCCESS(sysctl(
1215 (int[]){ CTL_KERN
, KERN_KDEBUG
, KERN_KDGETBUF
}, 3,
1216 &bufinfo
, &(size_t){ sizeof(bufinfo
) }, NULL
, 0),
1217 "get kdebug buffer size");
1220 T_ASSERT_POSIX_SUCCESS(sysctl(
1221 (int[]){ CTL_KERN
, KERN_KDEBUG
, KERN_KDREMOVE
}, 3,
1223 "remove kdebug buffers");
1225 return (unsigned int)bufinfo
.nkdbufs
;
1228 T_DECL(set_buffer_size
, "ensure large buffer sizes can be set")
1230 start_controlling_ktrace();
1232 uint64_t memsize
= 0;
1233 T_QUIET
; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &memsize
,
1234 &(size_t){ sizeof(memsize
) }, NULL
, 0), "get memory size");
1237 * Try to allocate up to one-eighth of available memory towards
1240 uint64_t maxevents_u64
= memsize
/ 8 / sizeof(kd_buf
);
1241 if (maxevents_u64
> UINT32_MAX
) {
1242 maxevents_u64
= UINT32_MAX
;
1244 unsigned int maxevents
= (unsigned int)maxevents_u64
;
1246 unsigned int minevents
= set_nevents(0);
1247 T_ASSERT_GT(minevents
, 0, "saw non-zero minimum event count of %u",
1250 unsigned int step
= ((maxevents
- minevents
- 1) / 4);
1251 T_ASSERT_GT(step
, 0, "stepping by %u events", step
);
1253 for (unsigned int i
= minevents
+ step
; i
< maxevents
; i
+= step
) {
1254 unsigned int actualevents
= set_nevents(i
);
1255 T_ASSERT_GE(actualevents
, i
- minevents
,
1256 "%u events in kernel when %u requested", actualevents
, i
);
1261 donothing(__unused
void *arg
)
1266 T_DECL(long_names
, "ensure long command names are reported")
1268 start_controlling_ktrace();
1270 char longname
[] = "thisisaverylongprocessname!";
1271 char *longname_ptr
= longname
;
1272 static_assert(sizeof(longname
) > 16,
1273 "the name should be longer than MAXCOMLEN");
1275 int ret
= sysctlbyname("kern.procname", NULL
, NULL
, longname
,
1277 T_ASSERT_POSIX_SUCCESS(ret
,
1278 "use sysctl kern.procname to lengthen the name");
1280 ktrace_session_t ktsess
= ktrace_session_create();
1283 * 32-bit kernels can only trace 16 bytes of the string in their event
1286 if (!ktrace_is_kernel_64_bit(ktsess
)) {
1287 longname
[16] = '\0';
1290 ktrace_filter_pid(ktsess
, getpid());
1292 __block
bool saw_newthread
= false;
1293 ktrace_events_single(ktsess
, TRACE_STRING_NEWTHREAD
,
1294 ^(struct trace_point
*tp
) {
1295 if (ktrace_get_pid_for_thread(ktsess
, tp
->threadid
) ==
1297 saw_newthread
= true;
1299 char argname
[32] = {};
1300 strncat(argname
, (char *)&tp
->arg1
, sizeof(tp
->arg1
));
1301 strncat(argname
, (char *)&tp
->arg2
, sizeof(tp
->arg2
));
1302 strncat(argname
, (char *)&tp
->arg3
, sizeof(tp
->arg3
));
1303 strncat(argname
, (char *)&tp
->arg4
, sizeof(tp
->arg4
));
1305 T_EXPECT_EQ_STR((char *)argname
, longname_ptr
,
1306 "process name of new thread should be long");
1308 ktrace_end(ktsess
, 1);
1312 ktrace_set_completion_handler(ktsess
, ^{
1313 ktrace_session_destroy(ktsess
);
1314 T_EXPECT_TRUE(saw_newthread
,
1315 "should have seen the new thread");
1319 int error
= ktrace_start(ktsess
, dispatch_get_main_queue());
1320 T_ASSERT_POSIX_ZERO(error
, "started tracing");
1322 pthread_t thread
= NULL
;
1323 error
= pthread_create(&thread
, NULL
, donothing
, NULL
);
1324 T_ASSERT_POSIX_ZERO(error
, "create new thread");
1326 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 5 * NSEC_PER_SEC
),
1327 dispatch_get_main_queue(), ^{
1328 ktrace_end(ktsess
, 0);
1331 error
= pthread_join(thread
, NULL
);
1332 T_ASSERT_POSIX_ZERO(error
, "join to thread");
1337 T_DECL(continuous_time
, "make sure continuous time status can be queried",
1338 T_META_RUN_CONCURRENTLY(true))
1340 bool cont_time
= kdebug_using_continuous_time();
1341 T_ASSERT_FALSE(cont_time
, "should not be using continuous time yet");
1344 static const uint32_t frame_eventid
= KDBG_EVENTID(DBG_BSD
,
1345 DBG_BSD_KDEBUG_TEST
, 1);
1347 static ktrace_session_t
1348 future_events_session(void)
1350 ktrace_session_t ktsess
= ktrace_session_create();
1351 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(ktsess
, "failed to create session");
1353 ktrace_events_single(ktsess
, KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 0),
1354 ^(struct trace_point
*tp __unused
) {
1355 T_FAIL("saw future test event from IOP");
1357 ktrace_events_single(ktsess
, frame_eventid
, ^(struct trace_point
*tp
) {
1358 if (tp
->debugid
& DBG_FUNC_START
) {
1359 T_LOG("saw start event");
1361 T_LOG("saw event traced after trying to trace future event, ending");
1362 ktrace_end(ktsess
, 1);
1366 ktrace_set_collection_interval(ktsess
, 100);
1370 T_DECL(future_iop_events
,
1371 "make sure IOPs cannot trace events in the future while live tracing")
1373 start_controlling_ktrace();
1374 ktrace_session_t ktsess
= future_events_session();
1375 ktrace_set_completion_handler(ktsess
, ^{
1376 ktrace_session_destroy(ktsess
);
1380 T_ASSERT_POSIX_ZERO(ktrace_start(ktsess
, dispatch_get_main_queue()),
1382 kdebug_trace(frame_eventid
| DBG_FUNC_START
, 0, 0, 0, 0);
1383 assert_kdebug_test(KDBG_TEST_FUTURE_TIMES
);
1384 kdebug_trace(frame_eventid
| DBG_FUNC_END
, 0, 0, 0, 0);
1386 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 5 * NSEC_PER_SEC
),
1387 dispatch_get_main_queue(), ^{
1388 T_FAIL("ending tracing after timeout");
1389 ktrace_end(ktsess
, 0);
1395 T_DECL(future_iop_events_disabled
,
1396 "make sure IOPs cannot trace events in the future after disabling tracing")
1398 start_controlling_ktrace();
1399 ktrace_session_t ktsess
= future_events_session();
1400 T_ASSERT_POSIX_ZERO(ktrace_configure(ktsess
), "configure tracing");
1402 kdebug_trace(frame_eventid
| DBG_FUNC_START
, 0, 0, 0, 0);
1403 assert_kdebug_test(KDBG_TEST_FUTURE_TIMES
);
1404 kdebug_trace(frame_eventid
| DBG_FUNC_END
, 0, 0, 0, 0);
1406 T_ASSERT_POSIX_ZERO(ktrace_disable_configured(ktsess
),
1408 ktrace_session_destroy(ktsess
);
1410 ktsess
= future_events_session();
1412 T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(ktsess
), "use existing trace");
1413 ktrace_set_completion_handler(ktsess
, ^{
1414 ktrace_session_destroy(ktsess
);
1418 T_ASSERT_POSIX_ZERO(ktrace_start(ktsess
, dispatch_get_main_queue()),
1419 "start tracing existing session");
1424 T_DECL(iop_events_disable
,
1425 "make sure IOP events are flushed before disabling trace")
1427 start_controlling_ktrace();
1428 ktrace_session_t ktsess
= future_events_session();
1430 assert_kdebug_test(KDBG_TEST_IOP_SYNC_FLUSH
);
1431 T_ASSERT_POSIX_ZERO(ktrace_configure(ktsess
), "configure tracing");
1433 kdebug_trace(frame_eventid
| DBG_FUNC_START
, 0, 0, 0, 0);
1435 T_ASSERT_POSIX_ZERO(ktrace_disable_configured(ktsess
),
1437 ktrace_session_destroy(ktsess
);
1439 ktsess
= ktrace_session_create();
1440 T_QUIET
; T_WITH_ERRNO
;
1441 T_ASSERT_NOTNULL(ktsess
, "create session");
1443 ktrace_events_single(ktsess
,
1444 KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 0xff),
1445 ^(struct trace_point
*tp __unused
) {
1446 T_PASS("saw IOP event from sync flush");
1450 T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(ktsess
), "use existing trace");
1451 ktrace_set_completion_handler(ktsess
, ^{
1452 ktrace_session_destroy(ktsess
);
1456 T_ASSERT_POSIX_ZERO(ktrace_start(ktsess
, dispatch_get_main_queue()),
1457 "start tracing existing session");
1462 T_DECL(lookup_long_paths
,
1463 "lookup long path names")
1465 start_controlling_ktrace();
1467 int ret
= chdir("/tmp");
1468 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "chdir to /tmp");
1469 const char *dir
= "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/";
1472 ret
= mkdir(dir
, S_IRUSR
| S_IWUSR
| S_IXUSR
);
1473 if (ret
>= 0 || errno
!= EEXIST
) {
1474 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "mkdir of %d nested directory",
1478 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "chdir to %d nested directory", i
);
1481 ktrace_session_t s
= ktrace_session_create();
1482 ktrace_set_collection_interval(s
, 250);
1483 ktrace_filter_pid(s
, getpid());
1484 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "created session");
1485 ktrace_events_single(s
, VFS_LOOKUP
, ^(struct trace_point
*tp __unused
){});
1486 ktrace_set_vnode_paths_enabled(s
, KTRACE_FEATURE_ENABLED
);
1488 dispatch_queue_t q
= dispatch_queue_create("com.apple.kdebug-test", 0);
1490 ktrace_set_completion_handler(s
, ^{
1491 dispatch_release(q
);
1495 int error
= ktrace_start(s
, q
);
1496 T_ASSERT_POSIX_ZERO(error
, "started tracing");
1498 int fd
= open("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", O_RDWR
| O_CREAT
);
1499 T_ASSERT_POSIX_SUCCESS(fd
, "opened file at %d directories deep", i
);
1503 T_LOG("ending tracing");
1508 #pragma mark - boot tracing
1510 static const char *expected_subsystems
[] = {
1511 "tunables", "locks_early", "kprintf", "pmap_steal", "vm_kernel",
1512 "kmem", "kmem_alloc", "zalloc",
1513 /* "percpu", only has a startup phase on Intel */
1514 "locks", "codesigning", "oslog", "early_boot",
1516 #define EXPECTED_SUBSYSTEMS_LEN \
1517 (sizeof(expected_subsystems) / sizeof(expected_subsystems[0]))
1519 T_DECL(early_boot_tracing
, "ensure early boot strings are present",
1520 T_META_BOOTARGS_SET("trace=100000"))
1522 T_ATEND(reset_ktrace
);
1525 ktrace_session_t s
= ktrace_session_create();
1526 ktrace_set_collection_interval(s
, 250);
1527 int error
= ktrace_set_use_existing(s
);
1528 T_ASSERT_POSIX_ZERO(error
, "use existing trace buffer");
1530 #if defined(__x86_64__)
1531 #define FIRST_EVENT_STRING "i386_init"
1532 #else /* defined(__x86_64__) */
1533 #define FIRST_EVENT_STRING "kernel_startup_bootstrap"
1534 #endif /* !defined(__x86_64__) */
1536 __block
bool seen_event
= false;
1537 __block
unsigned int cur_subsystem
= 0;
1538 ktrace_events_single(s
, TRACE_INFO_STRING
, ^(struct trace_point
*tp
) {
1539 char early_str
[33] = "";
1540 size_t argsize
= ktrace_is_kernel_64_bit(s
) ? 8 : 4;
1541 memcpy(early_str
, &tp
->arg1
, argsize
);
1542 memcpy(early_str
+ argsize
, &tp
->arg2
, argsize
);
1543 memcpy(early_str
+ argsize
* 2, &tp
->arg3
, argsize
);
1544 memcpy(early_str
+ argsize
* 3, &tp
->arg4
, argsize
);
1547 T_LOG("found first string event with args: "
1548 "0x%" PRIx64
", 0x%" PRIx64
", 0x%" PRIx64
", 0x%" PRIx64
,
1549 tp
->arg1
, tp
->arg2
, tp
->arg3
, tp
->arg4
);
1550 char expect_str
[33] = FIRST_EVENT_STRING
;
1551 if (!ktrace_is_kernel_64_bit(s
)) {
1552 // Only the first 16 bytes of the string will be traced.
1553 expect_str
[16] = '\0';
1556 T_EXPECT_EQ_STR(early_str
, expect_str
,
1557 "first event in boot trace should be the bootstrap message");
1561 if (strcmp(early_str
, expected_subsystems
[cur_subsystem
]) == 0) {
1562 T_LOG("found log for subsystem %s",
1563 expected_subsystems
[cur_subsystem
]);
1567 if (cur_subsystem
== EXPECTED_SUBSYSTEMS_LEN
) {
1568 T_LOG("ending after seeing all expected logs");
1573 ktrace_set_completion_handler(s
, ^{
1574 T_EXPECT_TRUE(seen_event
, "should see an early boot string event");
1575 T_EXPECT_TRUE(cur_subsystem
== EXPECTED_SUBSYSTEMS_LEN
,
1576 "should see logs from all subsystems");
1577 if (cur_subsystem
!= EXPECTED_SUBSYSTEMS_LEN
) {
1578 T_LOG("missing log for %s", expected_subsystems
[cur_subsystem
]);
1583 error
= ktrace_start(s
, dispatch_get_main_queue());
1584 T_ASSERT_POSIX_ZERO(error
, "started tracing");
1591 T_DECL(typefilter_boot_arg
, "ensure typefilter is set up correctly at boot",
1592 T_META_BOOTARGS_SET("trace=100000 trace_typefilter=S0x0c00,C0xfe"))
1594 T_ATEND(reset_ktrace
);
1597 ktrace_config_t config
= ktrace_config_create_current();
1598 T_QUIET
; T_WITH_ERRNO
;
1599 T_ASSERT_NOTNULL(config
, "create config from current system");
1602 T_LOG("ktrace configuration:");
1603 ktrace_config_print_description(config
, stdout
);
1605 uint8_t *typefilt
= ktrace_config_kdebug_get_typefilter(config
);
1606 T_ASSERT_NOTNULL(typefilt
, "typefilter is active");
1607 T_EXPECT_TRUE(typefilt
[0x0c00 / 8],
1608 "specified subclass is set in typefilter");
1609 T_MAYFAIL
; // rdar://63625062 (UTD converts commas in boot-args to spaces)
1610 T_EXPECT_TRUE(typefilt
[0xfeed / 8],
1611 "specified class is set in typefilter");
1613 ktrace_config_destroy(config
);
1616 #pragma mark - events present
1618 static int recvd_sigchild
= 0;
1622 if (sig
!= SIGCHLD
) {
1623 T_ASSERT_FAIL("unexpected signal: %d", sig
);
1628 T_DECL(instrs_and_cycles_on_proc_exit
,
1629 "instructions and cycles should be traced on thread exit",
1630 T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1633 start_controlling_ktrace();
1635 struct rusage_info_v4
*rusage
= calloc(1, sizeof(*rusage
));
1636 char *args
[] = { "ls", "-l", NULL
, };
1638 dispatch_queue_t q
= dispatch_queue_create("com.apple.kdebug-test",
1639 DISPATCH_QUEUE_SERIAL
);
1640 T_QUIET
; T_ASSERT_POSIX_SUCCESS(signal(SIGCHLD
, sighandler
),
1641 "register signal handler");
1643 ktrace_session_t s
= ktrace_session_create();
1644 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
1647 __block
bool seen_event
= false;
1648 __block
uint64_t proc_instrs
= 0;
1649 __block
uint64_t proc_cycles
= 0;
1650 __block
uint64_t proc_sys_time
= 0;
1651 __block
uint64_t proc_usr_time
= 0;
1652 error
= ktrace_events_single(s
, DBG_MT_INSTRS_CYCLES_PROC_EXIT
,
1653 ^(ktrace_event_t tp
){
1654 if (tp
->pid
== pid
) {
1656 proc_instrs
= tp
->arg1
;
1657 proc_cycles
= tp
->arg2
;
1658 proc_sys_time
= tp
->arg3
;
1659 proc_usr_time
= tp
->arg4
;
1663 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_POSIX_ZERO(error
, "trace single event");
1664 ktrace_set_completion_handler(s
, ^{
1665 // TODO Check for equality once rdar://61948669 is fixed.
1666 T_ASSERT_GE(proc_instrs
, rusage
->ri_instructions
,
1667 "trace event instrs are >= to rusage instrs");
1668 T_ASSERT_GE(proc_cycles
, rusage
->ri_cycles
,
1669 "trace event cycles are >= to rusage cycles");
1670 T_ASSERT_GE(proc_sys_time
, rusage
->ri_system_time
,
1671 "trace event sys time is >= rusage sys time");
1672 T_ASSERT_GE(proc_usr_time
, rusage
->ri_user_time
,
1673 "trace event usr time >= rusage usr time");
1674 T_EXPECT_TRUE(seen_event
, "should see the proc exit trace event");
1677 ktrace_session_destroy(s
);
1678 dispatch_release(q
);
1681 error
= ktrace_start(s
, q
);
1682 T_ASSERT_POSIX_ZERO(error
, "start tracing");
1685 extern char **environ
;
1686 status
= posix_spawnp(&pid
, args
[0], NULL
, NULL
, args
, environ
);
1687 T_QUIET
; T_ASSERT_POSIX_SUCCESS(status
, "spawn process");
1689 while (!recvd_sigchild
) {
1692 error
= proc_pid_rusage(pid
, RUSAGE_INFO_V4
, (rusage_info_t
)rusage
);
1693 T_QUIET
; T_ASSERT_POSIX_ZERO(error
, "rusage");
1694 error
= waitpid(pid
, &status
, 0);
1695 T_QUIET
; T_ASSERT_POSIX_SUCCESS(error
, "waitpid");
1700 #define NO_OF_THREADS 2
1702 struct thread_counters_info
{
1703 uint64_t counts
[2]; //cycles and/or instrs
1707 typedef struct thread_counters_info
*tc_info_t
;
1710 get_thread_counters(void* ptr
)
1712 extern int thread_selfcounts(int type
, void *buf
, size_t nbytes
);
1713 extern uint64_t __thread_selfusage(void);
1714 extern uint64_t __thread_selfid(void);
1715 tc_info_t tc_info
= (tc_info_t
) ptr
;
1716 tc_info
->thread_id
= __thread_selfid();
1717 // Just to increase the instr, cycle count
1718 T_LOG("printing %llu\n", tc_info
->thread_id
);
1719 tc_info
->cpu_time
= __thread_selfusage();
1720 thread_selfcounts(1, tc_info
->counts
, sizeof(tc_info
->counts
));
1724 T_DECL(instrs_and_cycles_on_thread_exit
,
1725 "instructions and cycles should be traced on thread exit",
1726 T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1729 start_controlling_ktrace();
1732 pthread_t
*threads
= calloc((unsigned int)(NO_OF_THREADS
),
1734 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(threads
, "calloc(%d threads)",
1736 tc_info_t tc_infos
= calloc((unsigned int) (NO_OF_THREADS
),
1737 sizeof(struct thread_counters_info
));
1738 T_WITH_ERRNO
; T_QUIET
; T_ASSERT_NOTNULL(tc_infos
,
1739 "calloc(%d thread counters)", NO_OF_THREADS
);
1741 ktrace_session_t s
= ktrace_session_create();
1742 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
1743 ktrace_filter_pid(s
, getpid());
1745 __block
int nevents
= 0;
1746 error
= ktrace_events_single(s
, DBG_MT_INSTRS_CYCLES_THR_EXIT
,
1747 ^(ktrace_event_t tp
) {
1748 for (int i
= 0; i
< NO_OF_THREADS
; i
++) {
1749 if (tp
->threadid
== tc_infos
[i
].thread_id
) {
1751 uint64_t cpu_time
= tp
->arg3
+ tp
->arg4
;
1753 * as we are getting counts before thread exit,
1754 * the counts at thread exit should be greater than
1757 T_ASSERT_GE(tp
->arg1
, tc_infos
[i
].counts
[0],
1758 "trace event instrs are >= to thread's instrs");
1759 T_ASSERT_GE(tp
->arg2
, tc_infos
[i
].counts
[1],
1760 "trace event cycles are >= to thread's cycles");
1761 T_ASSERT_GE(cpu_time
, tc_infos
[i
].cpu_time
,
1762 "trace event cpu time is >= thread's cpu time");
1764 if (nevents
== NO_OF_THREADS
) {
1769 T_QUIET
; T_ASSERT_POSIX_ZERO(error
, "trace single event");
1770 ktrace_set_completion_handler(s
, ^{
1771 T_EXPECT_EQ(NO_OF_THREADS
, nevents
, "seen %d thread exit trace events",
1774 ktrace_session_destroy(s
);
1777 error
= ktrace_start(s
, dispatch_get_main_queue());
1778 T_ASSERT_POSIX_ZERO(error
, "start tracing");
1780 for (int i
= 0; i
< NO_OF_THREADS
; i
++) {
1781 error
= pthread_create(&threads
[i
], NULL
, get_thread_counters
,
1782 (void *)&tc_infos
[i
]);
1783 T_QUIET
; T_ASSERT_POSIX_ZERO(error
, "pthread_create thread %d", i
);
1786 for (int i
= 0; i
< NO_OF_THREADS
; i
++) {
1787 error
= pthread_join(threads
[i
], NULL
);
1788 T_QUIET
; T_EXPECT_POSIX_ZERO(error
, "pthread_join thread %d", i
);