1 #include <darwintest.h>
2 #include <dispatch/dispatch.h>
4 #include <ktrace/session.h>
5 #include <ktrace/private.h>
6 #include <mach/dyld_kernel.h>
7 #include <mach/host_info.h>
9 #include <mach/mach_init.h>
10 #include <mach/task.h>
11 #include <os/assumes.h>
12 #include <sys/kdebug.h>
13 #include <sys/kdebug_signpost.h>
14 #include <sys/sysctl.h>
16 #define KDBG_TEST_MACROS 1
17 #define KDBG_TEST_OLD_TIMES 2
20 assert_kdebug_test(unsigned int flavor
)
23 int mib
[] = { CTL_KERN
, KERN_KDEBUG
, KERN_KDTEST
};
24 T_ASSERT_POSIX_SUCCESS(
25 sysctl(mib
, sizeof(mib
) / sizeof(mib
[0]), NULL
, &size
, NULL
, 0),
26 "KERN_KDTEST sysctl");
29 #pragma mark kdebug syscalls
31 #define TRACE_DEBUGID (0xfedfed00U)
33 T_DECL(kdebug_trace_syscall
, "test that kdebug_trace(2) emits correct events",
37 __block
int events_seen
= 0;
39 s
= ktrace_session_create();
42 ktrace_events_class(s
, DBG_MACH
, ^(__unused
struct trace_point
*tp
){});
43 ktrace_events_single(s
, TRACE_DEBUGID
, ^void(struct trace_point
*tp
) {
45 T_PASS("saw traced event");
47 T_EXPECT_EQ(tp
->arg1
, 1UL, "argument 1 of traced event is correct");
48 T_EXPECT_EQ(tp
->arg2
, 2UL, "argument 2 of traced event is correct");
49 T_EXPECT_EQ(tp
->arg3
, 3UL, "argument 3 of traced event is correct");
50 T_EXPECT_EQ(tp
->arg4
, 4UL, "argument 4 of traced event is correct");
55 ktrace_set_completion_handler(s
, ^(void) {
56 T_EXPECT_GE(events_seen
, 1, NULL
);
57 ktrace_session_destroy(s
);
61 ktrace_filter_pid(s
, getpid());
63 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
64 T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID
, 1, 2, 3, 4), NULL
);
70 #define SIGNPOST_SINGLE_CODE (0x10U)
71 #define SIGNPOST_PAIRED_CODE (0x20U)
73 T_DECL(kdebug_signpost_syscall
,
74 "test that kdebug_signpost(2) emits correct events",
78 __block
int single_seen
= 0;
79 __block
int paired_seen
= 0;
81 s
= ktrace_session_create();
82 T_ASSERT_NOTNULL(s
, NULL
);
84 /* make sure to get enough events for the KDBUFWAIT to trigger */
85 // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
86 ktrace_events_single(s
,
87 APPSDBG_CODE(DBG_APP_SIGNPOST
, SIGNPOST_SINGLE_CODE
),
88 ^void(struct trace_point
*tp
)
91 T_PASS("single signpost is traced");
93 T_EXPECT_EQ(tp
->arg1
, 1UL, "argument 1 of single signpost is correct");
94 T_EXPECT_EQ(tp
->arg2
, 2UL, "argument 2 of single signpost is correct");
95 T_EXPECT_EQ(tp
->arg3
, 3UL, "argument 3 of single signpost is correct");
96 T_EXPECT_EQ(tp
->arg4
, 4UL, "argument 4 of single signpost is correct");
99 ktrace_events_single_paired(s
,
100 APPSDBG_CODE(DBG_APP_SIGNPOST
, SIGNPOST_PAIRED_CODE
),
101 ^void(struct trace_point
*start
, struct trace_point
*end
)
104 T_PASS("paired signposts are traced");
106 T_EXPECT_EQ(start
->arg1
, 5UL, "argument 1 of start signpost is correct");
107 T_EXPECT_EQ(start
->arg2
, 6UL, "argument 2 of start signpost is correct");
108 T_EXPECT_EQ(start
->arg3
, 7UL, "argument 3 of start signpost is correct");
109 T_EXPECT_EQ(start
->arg4
, 8UL, "argument 4 of start signpost is correct");
111 T_EXPECT_EQ(end
->arg1
, 9UL, "argument 1 of end signpost is correct");
112 T_EXPECT_EQ(end
->arg2
, 10UL, "argument 2 of end signpost is correct");
113 T_EXPECT_EQ(end
->arg3
, 11UL, "argument 3 of end signpost is correct");
114 T_EXPECT_EQ(end
->arg4
, 12UL, "argument 4 of end signpost is correct");
116 T_EXPECT_EQ(single_seen
, 1,
117 "signposts are traced in the correct order");
122 ktrace_set_completion_handler(s
, ^(void) {
123 if (single_seen
== 0) {
124 T_FAIL("did not see single tracepoint before timeout");
126 if (paired_seen
== 0) {
127 T_FAIL("did not see paired tracepoints before timeout");
129 ktrace_session_destroy(s
);
133 ktrace_filter_pid(s
, getpid());
135 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
137 T_EXPECT_POSIX_SUCCESS(kdebug_signpost(
138 SIGNPOST_SINGLE_CODE
, 1, 2, 3, 4), NULL
);
139 T_EXPECT_POSIX_SUCCESS(kdebug_signpost_start(
140 SIGNPOST_PAIRED_CODE
, 5, 6, 7, 8), NULL
);
141 T_EXPECT_POSIX_SUCCESS(kdebug_signpost_end(
142 SIGNPOST_PAIRED_CODE
, 9, 10, 11, 12), NULL
);
148 #pragma mark kdebug behaviors
150 #define WRAPPING_EVENTS_COUNT (150000)
151 #define TRACE_ITERATIONS (5000)
152 #define WRAPPING_EVENTS_THRESHOLD (100)
155 "ensure that wrapping traces lost events and no events prior to the wrap",
156 T_META_ASROOT(true), T_META_CHECK_LEAKS(false))
159 __block
int events
= 0;
163 int wait_wrapping_secs
= (WRAPPING_EVENTS_COUNT
/ TRACE_ITERATIONS
) + 5;
164 int current_secs
= wait_wrapping_secs
;
166 /* use sysctls manually to bypass libktrace assumptions */
168 mib
[0] = CTL_KERN
; mib
[1] = KERN_KDEBUG
; mib
[2] = KERN_KDSETUP
; mib
[3] = 0;
170 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 3, NULL
, &needed
, NULL
, 0),
173 mib
[2] = KERN_KDSETBUF
; mib
[3] = WRAPPING_EVENTS_COUNT
;
174 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 4, NULL
, 0, NULL
, 0), "KERN_KDSETBUF");
176 mib
[2] = KERN_KDENABLE
; mib
[3] = 1;
177 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 4, NULL
, 0, NULL
, 0), "KERN_KDENABLE");
179 /* wrapping is on by default */
181 /* wait until wrapped */
182 T_LOG("waiting for trace to wrap");
183 mib
[2] = KERN_KDGETBUF
;
184 needed
= sizeof(buf_info
);
187 for (int i
= 0; i
< TRACE_ITERATIONS
; i
++) {
189 T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL
);
192 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 3, &buf_info
, &needed
, NULL
, 0),
194 } while (!(buf_info
.flags
& KDBG_WRAPPED
) && --current_secs
> 0);
196 T_ASSERT_TRUE(buf_info
.flags
& KDBG_WRAPPED
,
197 "trace wrapped (after %d seconds within %d second timeout)",
198 wait_wrapping_secs
- current_secs
, wait_wrapping_secs
);
200 s
= ktrace_session_create();
201 T_QUIET
; T_ASSERT_NOTNULL(s
, NULL
);
202 T_QUIET
; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s
), NULL
);
204 ktrace_events_all(s
, ^void(struct trace_point
*tp
) {
206 T_EXPECT_EQ(tp
->debugid
, (unsigned int)TRACE_LOST_EVENTS
,
207 "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
209 ktrace_name_for_eventid(s
, tp
->debugid
& KDBG_EVENTID_MASK
));
212 T_EXPECT_NE(tp
->debugid
, (unsigned int)TRACE_LOST_EVENTS
,
213 "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
215 ktrace_name_for_eventid(s
, tp
->debugid
& KDBG_EVENTID_MASK
));
219 if (events
> WRAPPING_EVENTS_THRESHOLD
) {
224 ktrace_set_completion_handler(s
, ^(void) {
225 ktrace_session_destroy(s
);
229 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
234 T_DECL(reject_old_events
,
235 "ensure that kdebug rejects events from before tracing began",
236 T_META_ASROOT(true), T_META_CHECK_LEAKS(false))
238 __block
uint64_t event_horizon_ts
;
239 __block
int events
= 0;
241 ktrace_session_t s
= ktrace_session_create();
242 T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
244 ktrace_events_range(s
, KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 0),
245 KDBG_EVENTID(DBG_BSD
+ 1, 0, 0),
246 ^(struct trace_point
*tp
)
249 T_EXPECT_GT(tp
->timestamp
, event_horizon_ts
,
250 "events in trace should be from after tracing began");
253 ktrace_set_completion_handler(s
, ^{
254 T_EXPECT_EQ(events
, 2, "should see only two events");
255 ktrace_session_destroy(s
);
259 event_horizon_ts
= mach_absolute_time();
261 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
262 /* first, try an old event at the beginning of trace */
263 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
264 /* after a good event has been traced, old events should be rejected */
265 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
271 #define ORDERING_TIMEOUT_SEC 5
273 T_DECL(ascending_time_order
,
274 "ensure that kdebug events are in ascending order based on time",
275 T_META_ASROOT(true), T_META_CHECK_LEAKS(false))
277 __block
uint64_t prev_ts
= 0;
278 __block
uint32_t prev_debugid
= 0;
279 __block
unsigned int prev_cpu
= 0;
280 __block
bool in_order
= true;
282 ktrace_session_t s
= ktrace_session_create();
283 T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
285 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
286 if (tp
->timestamp
< prev_ts
) {
288 T_FAIL("found timestamps out of order");
289 T_LOG("%" PRIu64
": %#" PRIx32
" (cpu %d)",
290 prev_ts
, prev_debugid
, prev_cpu
);
291 T_LOG("%" PRIu64
": %#" PRIx32
" (cpu %d)",
292 tp
->timestamp
, tp
->debugid
, tp
->cpuid
);
296 ktrace_set_completion_handler(s
, ^{
297 ktrace_session_destroy(s
);
298 T_EXPECT_TRUE(in_order
, "event timestamps were in-order");
302 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
304 /* try to inject old timestamps into trace */
305 assert_kdebug_test(KDBG_TEST_OLD_TIMES
);
307 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, ORDERING_TIMEOUT_SEC
* NSEC_PER_SEC
),
308 dispatch_get_main_queue(), ^{
309 T_LOG("ending test after timeout");
317 #pragma mark dyld tracing
319 __attribute__((aligned(8)))
320 static const char map_uuid
[16] = "map UUID";
322 __attribute__((aligned(8)))
323 static const char unmap_uuid
[16] = "unmap UUID";
325 __attribute__((aligned(8)))
326 static const char sc_uuid
[16] = "shared UUID";
328 static fsid_t map_fsid
= { .val
= { 42, 43 } };
329 static fsid_t unmap_fsid
= { .val
= { 44, 45 } };
330 static fsid_t sc_fsid
= { .val
= { 46, 47 } };
332 static fsobj_id_t map_fsobjid
= { .fid_objno
= 42, .fid_generation
= 43 };
333 static fsobj_id_t unmap_fsobjid
= { .fid_objno
= 44, .fid_generation
= 45 };
334 static fsobj_id_t sc_fsobjid
= { .fid_objno
= 46, .fid_generation
= 47 };
336 #define MAP_LOAD_ADDR 0xabadcafe
337 #define UNMAP_LOAD_ADDR 0xfeedface
338 #define SC_LOAD_ADDR 0xfedfaced
342 expect_dyld_image_info(struct trace_point
*tp
, const uint64_t *exp_uuid
,
343 uint64_t exp_load_addr
, fsid_t
*exp_fsid
, fsobj_id_t
*exp_fsobjid
,
346 #if defined(__LP64__)
352 uuid
[0] = (uint64_t)tp
->arg1
;
353 uuid
[1] = (uint64_t)tp
->arg2
;
354 load_addr
= (uint64_t)tp
->arg3
;
355 fsid
.val
[0] = (int32_t)(tp
->arg4
& UINT32_MAX
);
356 fsid
.val
[1] = (int32_t)((uint64_t)tp
->arg4
>> 32);
358 T_QUIET
; T_EXPECT_EQ(uuid
[0], exp_uuid
[0], NULL
);
359 T_QUIET
; T_EXPECT_EQ(uuid
[1], exp_uuid
[1], NULL
);
360 T_QUIET
; T_EXPECT_EQ(load_addr
, exp_load_addr
, NULL
);
361 T_QUIET
; T_EXPECT_EQ(fsid
.val
[0], exp_fsid
->val
[0], NULL
);
362 T_QUIET
; T_EXPECT_EQ(fsid
.val
[1], exp_fsid
->val
[1], NULL
);
363 } else if (order
== 1) {
366 fsobjid
.fid_objno
= (uint32_t)(tp
->arg1
& UINT32_MAX
);
367 fsobjid
.fid_generation
= (uint32_t)((uint64_t)tp
->arg1
>> 32);
369 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_objno
, exp_fsobjid
->fid_objno
, NULL
);
370 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_generation
,
371 exp_fsobjid
->fid_generation
, NULL
);
373 T_ASSERT_FAIL("unrecognized order of events %d", order
);
375 #else /* defined(__LP64__) */
379 uuid
[0] = (uint32_t)tp
->arg1
;
380 uuid
[1] = (uint32_t)tp
->arg2
;
381 uuid
[2] = (uint32_t)tp
->arg3
;
382 uuid
[3] = (uint32_t)tp
->arg4
;
384 T_QUIET
; T_EXPECT_EQ(uuid
[0], (uint32_t)exp_uuid
[0], NULL
);
385 T_QUIET
; T_EXPECT_EQ(uuid
[1], (uint32_t)(exp_uuid
[0] >> 32), NULL
);
386 T_QUIET
; T_EXPECT_EQ(uuid
[2], (uint32_t)exp_uuid
[1], NULL
);
387 T_QUIET
; T_EXPECT_EQ(uuid
[3], (uint32_t)(exp_uuid
[1] >> 32), NULL
);
388 } else if (order
== 1) {
393 load_addr
= (uint32_t)tp
->arg1
;
394 fsid
.val
[0] = (int32_t)tp
->arg2
;
395 fsid
.val
[1] = (int32_t)tp
->arg3
;
396 fsobjid
.fid_objno
= (uint32_t)tp
->arg4
;
398 T_QUIET
; T_EXPECT_EQ(load_addr
, (uint32_t)exp_load_addr
, NULL
);
399 T_QUIET
; T_EXPECT_EQ(fsid
.val
[0], exp_fsid
->val
[0], NULL
);
400 T_QUIET
; T_EXPECT_EQ(fsid
.val
[1], exp_fsid
->val
[1], NULL
);
401 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_objno
, exp_fsobjid
->fid_objno
, NULL
);
402 } else if (order
== 2) {
405 fsobjid
.fid_generation
= tp
->arg1
;
407 T_QUIET
; T_EXPECT_EQ(fsobjid
.fid_generation
,
408 exp_fsobjid
->fid_generation
, NULL
);
410 T_ASSERT_FAIL("unrecognized order of events %d", order
);
412 #endif /* defined(__LP64__) */
415 #if defined(__LP64__)
416 #define DYLD_CODE_OFFSET (0)
417 #define DYLD_EVENTS (2)
419 #define DYLD_CODE_OFFSET (2)
420 #define DYLD_EVENTS (3)
424 expect_dyld_events(ktrace_session_t s
, const char *name
, uint32_t base_code
,
425 const char *exp_uuid
, uint64_t exp_load_addr
, fsid_t
*exp_fsid
,
426 fsobj_id_t
*exp_fsobjid
, uint8_t *saw_events
)
428 for (int i
= 0; i
< DYLD_EVENTS
; i
++) {
429 ktrace_events_single(s
,
430 KDBG_EVENTID(DBG_DYLD
, DBG_DYLD_UUID
,
431 base_code
+ DYLD_CODE_OFFSET
+ (unsigned int)i
),
432 ^(struct trace_point
*tp
)
434 T_LOG("checking %s event %c", name
, 'A' + i
);
435 expect_dyld_image_info(tp
, (const void *)exp_uuid
, exp_load_addr
,
436 exp_fsid
, exp_fsobjid
, i
);
437 *saw_events
|= (1U << i
);
442 T_DECL(dyld_events
, "test that dyld registering libraries emits events",
446 dyld_kernel_image_info_t info
;
449 * Use pointers instead of __block variables in order to use these variables
450 * in the completion block below _and_ pass pointers to them to the
451 * expect_dyld_events function.
453 uint8_t saw_events
[3] = { 0 };
454 uint8_t *saw_mapping
= &(saw_events
[0]);
455 uint8_t *saw_unmapping
= &(saw_events
[1]);
456 uint8_t *saw_shared_cache
= &(saw_events
[2]);
458 s
= ktrace_session_create();
459 T_ASSERT_NOTNULL(s
, NULL
);
460 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s
, getpid()), NULL
);
462 expect_dyld_events(s
, "mapping", DBG_DYLD_UUID_MAP_A
, map_uuid
,
463 MAP_LOAD_ADDR
, &map_fsid
, &map_fsobjid
, saw_mapping
);
464 expect_dyld_events(s
, "unmapping", DBG_DYLD_UUID_UNMAP_A
, unmap_uuid
,
465 UNMAP_LOAD_ADDR
, &unmap_fsid
, &unmap_fsobjid
, saw_unmapping
);
466 expect_dyld_events(s
, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A
,
467 sc_uuid
, SC_LOAD_ADDR
, &sc_fsid
, &sc_fsobjid
, saw_shared_cache
);
469 ktrace_set_completion_handler(s
, ^(void) {
470 T_EXPECT_EQ(__builtin_popcount(*saw_mapping
), DYLD_EVENTS
, NULL
);
471 T_EXPECT_EQ(__builtin_popcount(*saw_unmapping
), DYLD_EVENTS
, NULL
);
472 T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache
), DYLD_EVENTS
, NULL
);
473 ktrace_session_destroy(s
);
477 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
479 info
.load_addr
= MAP_LOAD_ADDR
;
480 memcpy(info
.uuid
, map_uuid
, sizeof(info
.uuid
));
481 info
.fsid
= map_fsid
;
482 info
.fsobjid
= map_fsobjid
;
483 T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
486 info
.load_addr
= UNMAP_LOAD_ADDR
;
487 memcpy(info
.uuid
, unmap_uuid
, sizeof(info
.uuid
));
488 info
.fsid
= unmap_fsid
;
489 info
.fsobjid
= unmap_fsobjid
;
490 T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
493 info
.load_addr
= SC_LOAD_ADDR
;
494 memcpy(info
.uuid
, sc_uuid
, sizeof(info
.uuid
));
496 info
.fsobjid
= sc_fsobjid
;
497 T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
498 mach_task_self(), info
, FALSE
, FALSE
), NULL
);
505 #pragma mark kdebug kernel macros
507 #define EXP_KERNEL_EVENTS 5U
509 static const uint32_t dev_evts
[EXP_KERNEL_EVENTS
] = {
510 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 0),
511 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 1),
512 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 2),
513 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 3),
514 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 4),
517 static const uint32_t rel_evts
[EXP_KERNEL_EVENTS
] = {
518 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 5),
519 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 6),
520 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 7),
521 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 8),
522 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 9),
525 static const uint32_t filt_evts
[EXP_KERNEL_EVENTS
] = {
526 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 10),
527 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 11),
528 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 12),
529 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 13),
530 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST
, 14),
534 is_development_kernel(void)
536 static dispatch_once_t is_development_once
;
537 static bool is_development
;
539 dispatch_once(&is_development_once
, ^(void) {
541 size_t dev_size
= sizeof(dev
);
544 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev
,
545 &dev_size
, NULL
, 0), NULL
);
546 is_development
= (dev
!= 0);
549 return is_development
;
553 expect_event(struct trace_point
*tp
, unsigned int *events
,
554 const uint32_t *event_ids
, size_t event_ids_len
)
556 unsigned int event_idx
= *events
;
557 bool event_found
= false;
559 for (i
= 0; i
< event_ids_len
; i
++) {
560 if (event_ids
[i
] == (tp
->debugid
& KDBG_EVENTID_MASK
)) {
561 T_LOG("found event 0x%x", tp
->debugid
);
571 for (i
= 0; i
< event_idx
; i
++) {
572 T_QUIET
; T_EXPECT_EQ(((uintptr_t *)&tp
->arg1
)[i
], (uintptr_t)i
+ 1,
576 T_QUIET
; T_EXPECT_EQ(((uintptr_t *)&tp
->arg1
)[i
], (uintptr_t)0, NULL
);
581 expect_release_event(struct trace_point
*tp
, unsigned int *events
)
583 expect_event(tp
, events
, rel_evts
,
584 sizeof(rel_evts
) / sizeof(rel_evts
[0]));
588 expect_development_event(struct trace_point
*tp
, unsigned int *events
)
590 expect_event(tp
, events
, dev_evts
,
591 sizeof(dev_evts
) / sizeof(dev_evts
[0]));
595 expect_filtered_event(struct trace_point
*tp
, unsigned int *events
)
597 expect_event(tp
, events
, filt_evts
,
598 sizeof(filt_evts
) / sizeof(filt_evts
[0]));
601 T_DECL(kernel_events
, "ensure kernel macros work",
607 s
= ktrace_session_create();
608 T_QUIET
; T_ASSERT_NOTNULL(s
, NULL
);
610 __block
unsigned int dev_seen
= 0;
611 __block
unsigned int rel_seen
= 0;
612 __block
unsigned int filt_seen
= 0;
613 ktrace_events_range(s
, KDBG_EVENTID(DBG_BSD
, DBG_BSD_KDEBUG_TEST
, 0),
614 KDBG_EVENTID(DBG_BSD
+ 1, 0, 0),
615 ^(struct trace_point
*tp
)
617 expect_development_event(tp
, &dev_seen
);
618 expect_release_event(tp
, &rel_seen
);
619 expect_filtered_event(tp
, &filt_seen
);
622 ktrace_set_completion_handler(s
, ^(void) {
624 * Development-only events are only filtered if running on an embedded
627 unsigned int dev_exp
;
628 #if TARGET_OS_EMBEDDED
629 dev_exp
= is_development_kernel() ? EXP_KERNEL_EVENTS
: 0U;
631 dev_exp
= EXP_KERNEL_EVENTS
;
634 T_EXPECT_EQ(rel_seen
, EXP_KERNEL_EVENTS
,
635 "release and development events seen");
636 T_EXPECT_EQ(dev_seen
, dev_exp
, "development-only events seen/not seen");
637 T_EXPECT_EQ(filt_seen
, dev_exp
, "filter-only events seen");
638 ktrace_session_destroy(s
);
642 ktrace_filter_pid(s
, getpid());
644 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
645 assert_kdebug_test(KDBG_TEST_MACROS
);
652 T_DECL(kernel_events_filtered
, "ensure that the filtered kernel macros work",
657 s
= ktrace_session_create();
658 T_QUIET
; T_ASSERT_NOTNULL(s
, NULL
);
660 __block
unsigned int dev_seen
= 0;
661 __block
unsigned int rel_seen
= 0;
662 __block
unsigned int filt_seen
= 0;
663 ktrace_events_all(s
, ^(struct trace_point
*tp
) {
664 expect_development_event(tp
, &dev_seen
);
665 expect_release_event(tp
, &rel_seen
);
666 /* to make sure no filtered events are emitted */
667 expect_filtered_event(tp
, &filt_seen
);
670 ktrace_set_completion_handler(s
, ^(void) {
671 ktrace_session_destroy(s
);
673 T_EXPECT_EQ(rel_seen
, EXP_KERNEL_EVENTS
, NULL
);
674 #if defined(__arm__) || defined(__arm64__)
675 T_EXPECT_EQ(dev_seen
, is_development_kernel() ? EXP_KERNEL_EVENTS
: 0U,
678 T_EXPECT_EQ(dev_seen
, EXP_KERNEL_EVENTS
, NULL
);
679 #endif /* defined(__arm__) || defined(__arm64__) */
680 T_EXPECT_EQ(filt_seen
, 0U, NULL
);
684 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
685 assert_kdebug_test(KDBG_TEST_MACROS
);