]> git.saurik.com Git - apple/xnu.git/blob - tests/kdebug.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / tests / kdebug.c
1 #include <darwintest.h>
2 #include <darwintest_utils.h>
3 #include <dispatch/dispatch.h>
4 #include <inttypes.h>
5 #include <ktrace/session.h>
6 #include <ktrace/private.h>
7 #include <kperf/kperf.h>
8 #include <mach/clock_types.h>
9 #include <mach/dyld_kernel.h>
10 #include <mach/host_info.h>
11 #include <mach/mach.h>
12 #include <mach/mach_init.h>
13 #include <mach/task.h>
14 #include <os/assumes.h>
15 #include <stdlib.h>
16 #include <sys/kdebug.h>
17 #include <sys/kdebug_signpost.h>
18 #include <sys/sysctl.h>
19
20 T_GLOBAL_META(
21 T_META_NAMESPACE("xnu.ktrace"),
22 T_META_ASROOT(true));
23
24 #define KDBG_TEST_MACROS 1
25 #define KDBG_TEST_OLD_TIMES 2
26
27 static void
28 assert_kdebug_test(unsigned int flavor)
29 {
30 size_t size = flavor;
31 int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDTEST };
32 T_ASSERT_POSIX_SUCCESS(sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL,
33 &size, NULL, 0), "KERN_KDTEST sysctl");
34 }
35
36 #pragma mark kdebug syscalls
37
38 #define TRACE_DEBUGID (0xfedfed00U)
39
40 T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events")
41 {
42 ktrace_session_t s = ktrace_session_create();
43 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
44
45 ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
46
47 __block int events_seen = 0;
48 ktrace_events_single(s, TRACE_DEBUGID, ^void(struct trace_point *tp) {
49 events_seen++;
50 T_PASS("saw traced event");
51
52 T_EXPECT_EQ(tp->arg1, 1UL, "argument 1 of traced event is correct");
53 T_EXPECT_EQ(tp->arg2, 2UL, "argument 2 of traced event is correct");
54 T_EXPECT_EQ(tp->arg3, 3UL, "argument 3 of traced event is correct");
55 T_EXPECT_EQ(tp->arg4, 4UL, "argument 4 of traced event is correct");
56
57 ktrace_end(s, 1);
58 });
59
60 ktrace_set_completion_handler(s, ^{
61 T_EXPECT_GE(events_seen, 1, NULL);
62 ktrace_session_destroy(s);
63 T_END;
64 });
65
66 ktrace_filter_pid(s, getpid());
67
68 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
69 T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 1, 2, 3, 4), NULL);
70 ktrace_end(s, 0);
71
72 dispatch_main();
73 }
74
75 #define SIGNPOST_SINGLE_CODE (0x10U)
76 #define SIGNPOST_PAIRED_CODE (0x20U)
77
78 T_DECL(kdebug_signpost_syscall,
79 "test that kdebug_signpost(2) emits correct events")
80 {
81 ktrace_session_t s = ktrace_session_create();
82 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
83
84 __block int single_seen = 0;
85 __block int paired_seen = 0;
86
87 /* make sure to get enough events for the KDBUFWAIT to trigger */
88 // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
89 ktrace_events_single(s,
90 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE),
91 ^(struct trace_point *tp) {
92 single_seen++;
93 T_PASS("single signpost is traced");
94
95 T_EXPECT_EQ(tp->arg1, 1UL, "argument 1 of single signpost is correct");
96 T_EXPECT_EQ(tp->arg2, 2UL, "argument 2 of single signpost is correct");
97 T_EXPECT_EQ(tp->arg3, 3UL, "argument 3 of single signpost is correct");
98 T_EXPECT_EQ(tp->arg4, 4UL, "argument 4 of single signpost is correct");
99 });
100
101 ktrace_events_single_paired(s,
102 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE),
103 ^(struct trace_point *start, struct trace_point *end) {
104 paired_seen++;
105 T_PASS("paired signposts are traced");
106
107 T_EXPECT_EQ(start->arg1, 5UL, "argument 1 of start signpost is correct");
108 T_EXPECT_EQ(start->arg2, 6UL, "argument 2 of start signpost is correct");
109 T_EXPECT_EQ(start->arg3, 7UL, "argument 3 of start signpost is correct");
110 T_EXPECT_EQ(start->arg4, 8UL, "argument 4 of start signpost is correct");
111
112 T_EXPECT_EQ(end->arg1, 9UL, "argument 1 of end signpost is correct");
113 T_EXPECT_EQ(end->arg2, 10UL, "argument 2 of end signpost is correct");
114 T_EXPECT_EQ(end->arg3, 11UL, "argument 3 of end signpost is correct");
115 T_EXPECT_EQ(end->arg4, 12UL, "argument 4 of end signpost is correct");
116
117 T_EXPECT_EQ(single_seen, 1,
118 "signposts are traced in the correct order");
119
120 ktrace_end(s, 1);
121 });
122
123 ktrace_set_completion_handler(s, ^(void) {
124 T_QUIET; T_EXPECT_NE(single_seen, 0,
125 "did not see single tracepoint before timeout");
126 T_QUIET; T_EXPECT_NE(paired_seen, 0,
127 "did not see single tracepoint before timeout");
128 ktrace_session_destroy(s);
129 T_END;
130 });
131
132 ktrace_filter_pid(s, getpid());
133
134 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
135 "started tracing");
136
137 T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE, 1, 2, 3, 4),
138 "emitted single signpost");
139 T_EXPECT_POSIX_SUCCESS(
140 kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8),
141 "emitted start signpost");
142 T_EXPECT_POSIX_SUCCESS(
143 kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12),
144 "emitted end signpost");
145 ktrace_end(s, 0);
146
147 dispatch_main();
148 }
149
150 #pragma mark kdebug behaviors
151
152 #define WRAPPING_EVENTS_COUNT (150000)
153 #define TRACE_ITERATIONS (5000)
154 #define WRAPPING_EVENTS_THRESHOLD (100)
155
156 T_DECL(wrapping,
157 "ensure that wrapping traces lost events and no events prior to the wrap",
158 T_META_CHECK_LEAKS(false))
159 {
160 int mib[4];
161 kbufinfo_t buf_info;
162 int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5;
163 int current_secs = wait_wrapping_secs;
164
165 /* use sysctls manually to bypass libktrace assumptions */
166
167 mib[0] = CTL_KERN; mib[1] = KERN_KDEBUG; mib[2] = KERN_KDSETUP; mib[3] = 0;
168 size_t needed = 0;
169 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0),
170 "KERN_KDSETUP");
171
172 mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT;
173 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF");
174
175 mib[2] = KERN_KDENABLE; mib[3] = 1;
176 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE");
177
178 /* wrapping is on by default */
179
180 /* wait until wrapped */
181 T_LOG("waiting for trace to wrap");
182 mib[2] = KERN_KDGETBUF;
183 needed = sizeof(buf_info);
184 do {
185 sleep(1);
186 for (int i = 0; i < TRACE_ITERATIONS; i++) {
187 T_QUIET;
188 T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL);
189 }
190 T_QUIET;
191 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0),
192 NULL);
193 } while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0);
194
195 T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED,
196 "trace wrapped (after %d seconds within %d second timeout)",
197 wait_wrapping_secs - current_secs, wait_wrapping_secs);
198
199 ktrace_session_t s = ktrace_session_create();
200 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
201 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL);
202
203 __block int events = 0;
204
205 ktrace_events_all(s, ^(struct trace_point *tp) {
206 if (events == 0) {
207 T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
208 "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
209 tp->debugid,
210 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
211 } else {
212 T_QUIET;
213 T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
214 "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
215 tp->debugid,
216 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
217 }
218
219 events++;
220 if (events > WRAPPING_EVENTS_THRESHOLD) {
221 ktrace_end(s, 1);
222 }
223 });
224
225 ktrace_set_completion_handler(s, ^{
226 ktrace_session_destroy(s);
227 T_END;
228 });
229
230 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
231 "started tracing");
232
233 dispatch_main();
234 }
235
236 T_DECL(reject_old_events,
237 "ensure that kdebug rejects events from before tracing began",
238 T_META_CHECK_LEAKS(false))
239 {
240 __block uint64_t event_horizon_ts;
241
242 ktrace_session_t s = ktrace_session_create();
243 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
244
245 __block int events = 0;
246 ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
247 KDBG_EVENTID(DBG_BSD + 1, 0, 0), ^(struct trace_point *tp) {
248 events++;
249 T_EXPECT_GT(tp->timestamp, event_horizon_ts,
250 "events in trace should be from after tracing began");
251 });
252
253 ktrace_set_completion_handler(s, ^{
254 T_EXPECT_EQ(events, 2, "should see only two events");
255 ktrace_session_destroy(s);
256 T_END;
257 });
258
259 event_horizon_ts = mach_absolute_time();
260
261 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
262 /* first, try an old event at the beginning of trace */
263 assert_kdebug_test(KDBG_TEST_OLD_TIMES);
264 /* after a good event has been traced, old events should be rejected */
265 assert_kdebug_test(KDBG_TEST_OLD_TIMES);
266 ktrace_end(s, 0);
267
268 dispatch_main();
269 }
270
271 #define ORDERING_TIMEOUT_SEC 5
272
273 T_DECL(ascending_time_order,
274 "ensure that kdebug events are in ascending order based on time",
275 T_META_CHECK_LEAKS(false))
276 {
277 __block uint64_t prev_ts = 0;
278 __block uint32_t prev_debugid = 0;
279 __block unsigned int prev_cpu = 0;
280 __block bool in_order = true;
281
282 ktrace_session_t s = ktrace_session_create();
283 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
284
285 ktrace_events_all(s, ^(struct trace_point *tp) {
286 if (tp->timestamp < prev_ts) {
287 in_order = false;
288 T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
289 prev_ts, prev_debugid, prev_cpu);
290 T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
291 tp->timestamp, tp->debugid, tp->cpuid);
292 ktrace_end(s, 1);
293 }
294 });
295
296 ktrace_set_completion_handler(s, ^{
297 ktrace_session_destroy(s);
298 T_EXPECT_TRUE(in_order, "event timestamps were in-order");
299 T_END;
300 });
301
302 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
303 "started tracing");
304
305 /* try to inject old timestamps into trace */
306 assert_kdebug_test(KDBG_TEST_OLD_TIMES);
307
308 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC),
309 dispatch_get_main_queue(), ^{
310 T_LOG("ending test after timeout");
311 ktrace_end(s, 1);
312 });
313
314 dispatch_main();
315 }
316
317 #pragma mark dyld tracing
318
319 __attribute__((aligned(8)))
320 static const char map_uuid[16] = "map UUID";
321
322 __attribute__((aligned(8)))
323 static const char unmap_uuid[16] = "unmap UUID";
324
325 __attribute__((aligned(8)))
326 static const char sc_uuid[16] = "shared UUID";
327
328 static fsid_t map_fsid = { .val = { 42, 43 } };
329 static fsid_t unmap_fsid = { .val = { 44, 45 } };
330 static fsid_t sc_fsid = { .val = { 46, 47 } };
331
332 static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 };
333 static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 };
334 static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 };
335
336 #define MAP_LOAD_ADDR 0xabadcafe
337 #define UNMAP_LOAD_ADDR 0xfeedface
338 #define SC_LOAD_ADDR 0xfedfaced
339
340 __unused
341 static void
342 expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid,
343 uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid,
344 int order)
345 {
346 #if defined(__LP64__) || defined(__arm64__)
347 if (order == 0) {
348 uint64_t uuid[2];
349 uint64_t load_addr;
350 fsid_t fsid;
351
352 uuid[0] = (uint64_t)tp->arg1;
353 uuid[1] = (uint64_t)tp->arg2;
354 load_addr = (uint64_t)tp->arg3;
355 fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX);
356 fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32);
357
358 T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL);
359 T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL);
360 T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL);
361 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
362 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
363 } else if (order == 1) {
364 fsobj_id_t fsobjid;
365
366 fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX);
367 fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32);
368
369 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
370 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
371 exp_fsobjid->fid_generation, NULL);
372 } else {
373 T_ASSERT_FAIL("unrecognized order of events %d", order);
374 }
375 #else /* defined(__LP64__) */
376 if (order == 0) {
377 uint32_t uuid[4];
378
379 uuid[0] = (uint32_t)tp->arg1;
380 uuid[1] = (uint32_t)tp->arg2;
381 uuid[2] = (uint32_t)tp->arg3;
382 uuid[3] = (uint32_t)tp->arg4;
383
384 T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL);
385 T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL);
386 T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL);
387 T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL);
388 } else if (order == 1) {
389 uint32_t load_addr;
390 fsid_t fsid;
391 fsobj_id_t fsobjid;
392
393 load_addr = (uint32_t)tp->arg1;
394 fsid.val[0] = (int32_t)tp->arg2;
395 fsid.val[1] = (int32_t)tp->arg3;
396 fsobjid.fid_objno = (uint32_t)tp->arg4;
397
398 T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL);
399 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
400 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
401 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
402 } else if (order == 2) {
403 fsobj_id_t fsobjid;
404
405 fsobjid.fid_generation = tp->arg1;
406
407 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
408 exp_fsobjid->fid_generation, NULL);
409 } else {
410 T_ASSERT_FAIL("unrecognized order of events %d", order);
411 }
412 #endif /* defined(__LP64__) */
413 }
414
415 #if defined(__LP64__) || defined(__arm64__)
416 #define DYLD_CODE_OFFSET (0)
417 #define DYLD_EVENTS (2)
418 #else
419 #define DYLD_CODE_OFFSET (2)
420 #define DYLD_EVENTS (3)
421 #endif
422
423 static void
424 expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code,
425 const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid,
426 fsobj_id_t *exp_fsobjid, uint8_t *saw_events)
427 {
428 for (int i = 0; i < DYLD_EVENTS; i++) {
429 ktrace_events_single(s, KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID,
430 base_code + DYLD_CODE_OFFSET + (unsigned int)i),
431 ^(struct trace_point *tp) {
432 T_LOG("checking %s event %c", name, 'A' + i);
433 expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr,
434 exp_fsid, exp_fsobjid, i);
435 *saw_events |= (1U << i);
436 });
437 }
438 }
439
440 T_DECL(dyld_events, "test that dyld registering libraries emits events")
441 {
442 dyld_kernel_image_info_t info;
443
444 /*
445 * Use pointers instead of __block variables in order to use these variables
446 * in the completion block below _and_ pass pointers to them to the
447 * expect_dyld_events function.
448 */
449 uint8_t saw_events[3] = { 0 };
450 uint8_t *saw_mapping = &(saw_events[0]);
451 uint8_t *saw_unmapping = &(saw_events[1]);
452 uint8_t *saw_shared_cache = &(saw_events[2]);
453
454 ktrace_session_t s = ktrace_session_create();
455 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
456
457 T_QUIET;
458 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
459 "filtered to current process");
460
461 expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid,
462 MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping);
463 expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid,
464 UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping);
465 expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A,
466 sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache);
467
468 ktrace_set_completion_handler(s, ^{
469 ktrace_session_destroy(s);
470
471 T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL);
472 T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL);
473 T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL);
474 T_END;
475 });
476
477 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
478
479 info.load_addr = MAP_LOAD_ADDR;
480 memcpy(info.uuid, map_uuid, sizeof(info.uuid));
481 info.fsid = map_fsid;
482 info.fsobjid = map_fsobjid;
483 T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
484 &info, 1), "registered dyld image info");
485
486 info.load_addr = UNMAP_LOAD_ADDR;
487 memcpy(info.uuid, unmap_uuid, sizeof(info.uuid));
488 info.fsid = unmap_fsid;
489 info.fsobjid = unmap_fsobjid;
490 T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
491 &info, 1), "unregistered dyld image info");
492
493 info.load_addr = SC_LOAD_ADDR;
494 memcpy(info.uuid, sc_uuid, sizeof(info.uuid));
495 info.fsid = sc_fsid;
496 info.fsobjid = sc_fsobjid;
497 T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
498 mach_task_self(), info, FALSE, FALSE),
499 "registered dyld shared cache image info");
500
501 ktrace_end(s, 0);
502
503 dispatch_main();
504 }
505
506 #pragma mark kdebug kernel macros
507
508 #define EXP_KERNEL_EVENTS 5U
509
510 static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = {
511 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0),
512 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1),
513 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2),
514 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3),
515 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4),
516 };
517
518 static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = {
519 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5),
520 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6),
521 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7),
522 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8),
523 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9),
524 };
525
526 static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = {
527 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10),
528 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11),
529 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12),
530 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13),
531 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14),
532 };
533
534 static const uint32_t noprocfilt_evts[EXP_KERNEL_EVENTS] = {
535 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 15),
536 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 16),
537 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 17),
538 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 18),
539 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 19),
540 };
541
542 static bool
543 is_development_kernel(void)
544 {
545 static dispatch_once_t is_development_once;
546 static bool is_development;
547
548 dispatch_once(&is_development_once, ^{
549 int dev;
550 size_t dev_size = sizeof(dev);
551
552 T_QUIET;
553 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev,
554 &dev_size, NULL, 0), NULL);
555 is_development = (dev != 0);
556 });
557
558 return is_development;
559 }
560
561 static void
562 expect_event(struct trace_point *tp, const char *name, unsigned int *events,
563 const uint32_t *event_ids, size_t event_ids_len)
564 {
565 unsigned int event_idx = *events;
566 bool event_found = false;
567 size_t i;
568 for (i = 0; i < event_ids_len; i++) {
569 if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) {
570 T_LOG("found %s event 0x%x", name, tp->debugid);
571 event_found = true;
572 }
573 }
574
575 if (!event_found) {
576 return;
577 }
578
579 *events += 1;
580 for (i = 0; i < event_idx; i++) {
581 T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)i + 1,
582 NULL);
583 }
584 for (; i < 4; i++) {
585 T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)0, NULL);
586 }
587 }
588
589 static void
590 expect_release_event(struct trace_point *tp, unsigned int *events)
591 {
592 expect_event(tp, "release", events, rel_evts,
593 sizeof(rel_evts) / sizeof(rel_evts[0]));
594 }
595
596 static void
597 expect_development_event(struct trace_point *tp, unsigned int *events)
598 {
599 expect_event(tp, "dev", events, dev_evts, sizeof(dev_evts) / sizeof(dev_evts[0]));
600 }
601
602 static void
603 expect_filtered_event(struct trace_point *tp, unsigned int *events)
604 {
605 expect_event(tp, "filtered", events, filt_evts,
606 sizeof(filt_evts) / sizeof(filt_evts[0]));
607 }
608
609 static void
610 expect_noprocfilt_event(struct trace_point *tp, unsigned int *events)
611 {
612 expect_event(tp, "noprocfilt", events, noprocfilt_evts,
613 sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]));
614 }
615
616 static void
617 expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback,
618 void (^cb)(unsigned int dev_seen, unsigned int rel_seen,
619 unsigned int filt_seen, unsigned int noprocfilt_seen))
620 {
621 __block unsigned int dev_seen = 0;
622 __block unsigned int rel_seen = 0;
623 __block unsigned int filt_seen = 0;
624 __block unsigned int noprocfilt_seen = 0;
625
626 void (^evtcb)(struct trace_point *tp) = ^(struct trace_point *tp) {
627 expect_development_event(tp, &dev_seen);
628 expect_release_event(tp, &rel_seen);
629 expect_filtered_event(tp, &filt_seen);
630 expect_noprocfilt_event(tp, &noprocfilt_seen);
631 };
632
633 if (use_all_callback) {
634 ktrace_events_all(s, evtcb);
635 } else {
636 ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
637 KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb);
638 }
639
640 ktrace_set_completion_handler(s, ^{
641 ktrace_session_destroy(s);
642 cb(dev_seen, rel_seen, filt_seen, noprocfilt_seen);
643 T_END;
644 });
645
646 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
647 assert_kdebug_test(KDBG_TEST_MACROS);
648
649 ktrace_end(s, 0);
650 }
651
652 T_DECL(kernel_events, "ensure kernel macros work")
653 {
654 ktrace_session_t s = ktrace_session_create();
655 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
656
657 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
658 "filtered events to current process");
659
660 expect_kdbg_test_events(s, false,
661 ^(unsigned int dev_seen, unsigned int rel_seen,
662 unsigned int filt_seen, unsigned int noprocfilt_seen) {
663 /*
664 * Development-only events are only filtered if running on an embedded
665 * OS.
666 */
667 unsigned int dev_exp;
668 #if TARGET_OS_EMBEDDED
669 dev_exp = is_development_kernel() ? EXP_KERNEL_EVENTS : 0U;
670 #else
671 dev_exp = EXP_KERNEL_EVENTS;
672 #endif
673
674 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS,
675 "release and development events seen");
676 T_EXPECT_EQ(dev_seen, dev_exp, "development-only events %sseen",
677 dev_exp ? "" : "not ");
678 T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen");
679 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
680 "process filter-agnostic events seen");
681 });
682
683 dispatch_main();
684 }
685
686 T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work")
687 {
688 ktrace_session_t s = ktrace_session_create();
689 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
690
691 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
692 "filtered events to current process");
693
694 expect_kdbg_test_events(s, true,
695 ^(unsigned int dev_seen, unsigned int rel_seen,
696 unsigned int filt_seen, unsigned int noprocfilt_seen) {
697 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL);
698 #if defined(__arm__) || defined(__arm64__)
699 T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U,
700 NULL);
701 #else
702 T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS,
703 "development-only events seen");
704 #endif /* defined(__arm__) || defined(__arm64__) */
705 T_EXPECT_EQ(filt_seen, 0U, "no filter-only events seen");
706 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
707 "process filter-agnostic events seen");
708 });
709
710 dispatch_main();
711 }
712
713 T_DECL(kernel_events_noprocfilt,
714 "ensure that the no process filter kernel macros work")
715 {
716 ktrace_session_t s = ktrace_session_create();
717 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
718
719 /*
720 * Only allow launchd events through.
721 */
722 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, 1), "filtered events to launchd");
723 for (size_t i = 0; i < sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]); i++) {
724 T_QUIET;
725 T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s,
726 noprocfilt_evts[i]),
727 "ignored process filter for noprocfilt event");
728 }
729
730 expect_kdbg_test_events(s, false,
731 ^(unsigned int dev_seen, unsigned int rel_seen,
732 unsigned int filt_seen, unsigned int noprocfilt_seen) {
733 T_EXPECT_EQ(rel_seen, 0U, "release and development events not seen");
734 T_EXPECT_EQ(dev_seen, 0U, "development-only events not seen");
735 T_EXPECT_EQ(filt_seen, 0U, "filter-only events not seen");
736
737 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
738 "process filter-agnostic events seen");
739 });
740
741 dispatch_main();
742 }
743
744 static volatile bool continue_abuse = true;
745
746 #define STRESS_DEBUGID (0xfeedfac0)
747 #define ABUSE_SECS (10)
748 #define TIMER_NS (100 * NSEC_PER_USEC)
749 /*
750 * Use the quantum as the gap threshold.
751 */
752 #define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
753
754 static void *
755 kdebug_abuser_thread(void *ctx)
756 {
757 unsigned int id = (unsigned int)ctx;
758 uint64_t i = 0;
759 while (continue_abuse) {
760 kdebug_trace(STRESS_DEBUGID, id, i, 0, 0);
761 i++;
762 }
763
764 return NULL;
765 }
766
767 T_DECL(stress, "emit events on all but one CPU with a small buffer",
768 T_META_CHECK_LEAKS(false))
769 {
770 T_SETUPBEGIN;
771 ktrace_session_t s = ktrace_session_create();
772 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
773
774 /* Let's not waste any time with pleasantries. */
775 ktrace_set_uuid_map_enabled(s, KTRACE_FEATURE_DISABLED);
776
777 /* Ouch. */
778 ktrace_events_all(s, ^(__unused struct trace_point *tp) {});
779 ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
780 (void)atexit_b(^{ kperf_reset(); });
781 (void)kperf_action_count_set(1);
782 (void)kperf_timer_count_set(1);
783 int kperror = kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS));
784 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_period_set %llu ns",
785 TIMER_NS);
786 kperror = kperf_timer_action_set(0, 1);
787 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_action_set");
788 kperror = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO |
789 KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK |
790 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO |
791 KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH |
792 KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM |
793 KPERF_SAMPLER_TH_INSTRS_CYCLES);
794 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_action_samplers_set");
795 /* You monster... */
796
797 /* The coup-de-grace. */
798 ktrace_set_buffer_size(s, 10);
799
800 char filepath_arr[MAXPATHLEN] = "";
801 strlcpy(filepath_arr, dt_tmpdir(), sizeof(filepath_arr));
802 strlcat(filepath_arr, "/stress.ktrace", sizeof(filepath_arr));
803 char *filepath = filepath_arr;
804
805 int ncpus = 0;
806 size_t ncpus_size = sizeof(ncpus);
807 int ret = sysctlbyname("hw.logicalcpu_max", &ncpus, &ncpus_size, NULL, 0);
808 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"hw.logicalcpu_max\"");
809 T_QUIET; T_ASSERT_GT(ncpus, 0, "realistic number of CPUs");
810
811 pthread_t *threads = calloc((unsigned int)ncpus - 1, sizeof(pthread_t));
812 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
813 ncpus - 1);
814
815 ktrace_set_completion_handler(s, ^{
816 T_SETUPBEGIN;
817 ktrace_session_destroy(s);
818
819 T_LOG("trace ended, searching for gaps");
820
821 ktrace_session_t sread = ktrace_session_create();
822 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(sread, "ktrace_session_create");
823
824 int error = ktrace_set_file(sread, filepath);
825 T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_set_file %s", filepath);
826
827 ktrace_file_t f = ktrace_file_open(filepath, false);
828 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(f, "ktrace_file_open %s",
829 filepath);
830 uint64_t first_timestamp = 0;
831 error = ktrace_file_earliest_timestamp(f, &first_timestamp);
832 T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_file_earliest_timestamp");
833
834 uint64_t last_timestamp = 0;
835 (void)ktrace_file_latest_timestamp(f, &last_timestamp);
836
837 __block uint64_t prev_timestamp = 0;
838 __block uint64_t nevents = 0;
839 ktrace_events_all(sread, ^(struct trace_point *tp) {
840 nevents++;
841 uint64_t delta_ns = 0;
842 T_QUIET; T_EXPECT_GE(tp->timestamp, prev_timestamp,
843 "timestamps are monotonically increasing");
844 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
845 tp->timestamp - prev_timestamp, &delta_ns);
846 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
847 if (prev_timestamp && delta_ns > GAP_THRESHOLD_NS) {
848 if (tp->debugname) {
849 T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
850 (double)delta_ns / 1e9, prev_timestamp,
851 tp->timestamp, tp->cpuid, tp->debugname, tp->debugid);
852 } else {
853 T_LOG("gap: %gs at %llu - %llu on %d: %#x",
854 (double)delta_ns / 1e9, prev_timestamp,
855 tp->timestamp, tp->cpuid, tp->debugid);
856 }
857
858 /*
859 * These gaps are ok -- they appear after CPUs are brought back
860 * up.
861 */
862 #define INTERRUPT (0x1050000)
863 #define PERF_CPU_IDLE (0x27001000)
864 #define INTC_HANDLER (0x5000004)
865 #define DECR_TRAP (0x1090000)
866 uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK;
867 if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE &&
868 eventid != INTC_HANDLER && eventid != DECR_TRAP) {
869 unsigned int lost_events = TRACE_LOST_EVENTS;
870 T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events,
871 "gaps should end with lost events");
872 }
873 }
874
875 prev_timestamp = tp->timestamp;
876 });
877 ktrace_events_single(sread, TRACE_LOST_EVENTS, ^(struct trace_point *tp){
878 T_LOG("lost: %llu on %d (%lu)", tp->timestamp, tp->cpuid, tp->arg1);
879 });
880
881 __block uint64_t last_write = 0;
882 ktrace_events_single_paired(sread, TRACE_WRITING_EVENTS,
883 ^(struct trace_point *start, struct trace_point *end) {
884 uint64_t delta_ns;
885 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
886 start->timestamp - last_write, &delta_ns);
887 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
888
889 uint64_t dur_ns;
890 converror = ktrace_convert_timestamp_to_nanoseconds(sread,
891 end->timestamp - start->timestamp, &dur_ns);
892 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
893
894 T_LOG("write: %llu (+%gs): %gus on %d: %lu events", start->timestamp,
895 (double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1);
896 last_write = end->timestamp;
897 });
898 ktrace_set_completion_handler(sread, ^{
899 uint64_t duration_ns = 0;
900 if (last_timestamp) {
901 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
902 last_timestamp - first_timestamp, &duration_ns);
903 T_QUIET; T_ASSERT_POSIX_ZERO(converror,
904 "convert timestamp to ns");
905 T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
906 (double)duration_ns / 1e9, nevents,
907 (double)nevents / ((double)duration_ns / 1e6) / ncpus);
908 }
909 (void)unlink(filepath);
910 ktrace_session_destroy(sread);
911 T_END;
912 });
913
914 int starterror = ktrace_start(sread, dispatch_get_main_queue());
915 T_QUIET; T_ASSERT_POSIX_ZERO(starterror, "ktrace_start read session");
916
917 T_SETUPEND;
918 });
919
920 /* Just kidding... for now. */
921 #if 0
922 kperror = kperf_sample_set(1);
923 T_ASSERT_POSIX_SUCCESS(kperror,
924 "started kperf timer sampling every %llu ns", TIMER_NS);
925 #endif
926
927 for (int i = 0; i < (ncpus - 1); i++) {
928 int error = pthread_create(&threads[i], NULL, kdebug_abuser_thread,
929 (void *)(uintptr_t)i);
930 T_QUIET; T_ASSERT_POSIX_ZERO(error,
931 "pthread_create abuser thread %d", i);
932 }
933
934 int error = ktrace_start_writing_file(s, filepath,
935 ktrace_compression_none, NULL, NULL);
936 T_ASSERT_POSIX_ZERO(error, "started writing ktrace to %s", filepath);
937
938 T_SETUPEND;
939
940 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ABUSE_SECS * NSEC_PER_SEC),
941 dispatch_get_main_queue(), ^{
942 T_LOG("ending trace");
943 ktrace_end(s, 1);
944
945 continue_abuse = false;
946 for (int i = 0; i < (ncpus - 1); i++) {
947 int joinerror = pthread_join(threads[i], NULL);
948 T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d",
949 i);
950 }
951 });
952
953 dispatch_main();
954 }
955
956 #define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
957 #define ROUND_TRIPS_THRESHOLD UINT64_C(25)
958 #define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
959 #define COLLECTION_INTERVAL_MS 100
960
961 /*
962 * Test a sustained tracing session, involving multiple round-trips to the
963 * kernel.
964 *
965 * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
966 * unlikely to be emitted elsewhere. Look for this event, too, and make sure we
967 * see as many of them as we emitted.
968 *
969 * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
970 * In the failure mode, we won't see any of these, so set a timeout of
971 * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
972 * never see.
973 */
974 T_DECL(round_trips,
975 "test sustained tracing with multiple round-trips through the kernel")
976 {
977 ktrace_session_t s = ktrace_session_create();
978 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
979
980 /*
981 * Set a small buffer and collection interval to increase the number of
982 * round-trips.
983 */
984 ktrace_set_buffer_size(s, 50);
985 ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
986
987 __block uint64_t events = 0;
988 __block uint64_t emitted = 0;
989 __block uint64_t seen = 0;
990 ktrace_events_all(s, ^(__unused struct trace_point *tp) {
991 events++;
992 if (events % ROUND_TRIP_PERIOD == 0) {
993 T_LOG("emitting round-trip event %" PRIu64, emitted);
994 kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0);
995 emitted++;
996 }
997 });
998
999 ktrace_events_single(s, TRACE_DEBUGID, ^(__unused struct trace_point *tp) {
1000 T_LOG("saw round-trip event after %" PRIu64 " events", events);
1001 seen++;
1002 if (seen >= ROUND_TRIPS_THRESHOLD) {
1003 T_LOG("ending trace after seeing %" PRIu64 " events, "
1004 "emitting %" PRIu64, seen, emitted);
1005 ktrace_end(s, 1);
1006 }
1007 });
1008
1009 ktrace_set_completion_handler(s, ^{
1010 T_EXPECT_GE(emitted, ROUND_TRIPS_THRESHOLD,
1011 "emitted %" PRIu64 " round-trip events", emitted);
1012 T_EXPECT_GE(seen, ROUND_TRIPS_THRESHOLD,
1013 "saw %" PRIu64 " round-trip events", seen);
1014 ktrace_session_destroy(s);
1015 T_END;
1016 });
1017
1018 int error = ktrace_start(s, dispatch_get_main_queue());
1019 T_ASSERT_POSIX_ZERO(error, "started tracing");
1020
1021 dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
1022 ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(),
1023 ^{
1024 T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS);
1025 ktrace_end(s, 0);
1026 });
1027
1028 dispatch_main();
1029 }
1030
1031 #define HEARTBEAT_INTERVAL_SECS 2
1032 #define HEARTBEAT_COUNT 20
1033
1034 /*
1035 * Ensure we see events periodically, checking for recent events on a
1036 * heart-beat.
1037 */
1038 T_DECL(event_coverage, "ensure events appear up to the end of tracing")
1039 {
1040 ktrace_session_t s = ktrace_session_create();
1041 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1042
1043 __block uint64_t current_timestamp = 0;
1044 __block uint64_t events = 0;
1045 ktrace_events_all(s, ^(struct trace_point *tp) {
1046 current_timestamp = tp->timestamp;
1047 events++;
1048 });
1049
1050 ktrace_set_buffer_size(s, 20);
1051 ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1052
1053 __block uint64_t last_timestamp = 0;
1054 __block uint64_t last_events = 0;
1055 __block unsigned int heartbeats = 0;
1056
1057 ktrace_set_completion_handler(s, ^{
1058 ktrace_session_destroy(s);
1059 T_QUIET; T_EXPECT_GT(events, 0ULL, "should have seen some events");
1060 T_END;
1061 });
1062
1063 dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER,
1064 0, 0, dispatch_get_main_queue());
1065 dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
1066 HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC),
1067 HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0);
1068 dispatch_source_set_cancel_handler(timer, ^{
1069 dispatch_release(timer);
1070 });
1071
1072 dispatch_source_set_event_handler(timer, ^{
1073 heartbeats++;
1074
1075 T_LOG("heartbeat %u at time %lld, seen %" PRIu64 " events, "
1076 "current event time %lld", heartbeats, mach_absolute_time(),
1077 events, current_timestamp);
1078
1079 if (current_timestamp > 0) {
1080 T_EXPECT_GT(current_timestamp, last_timestamp,
1081 "event timestamps should be increasing");
1082 T_QUIET; T_EXPECT_GT(events, last_events,
1083 "number of events should be increasing");
1084 }
1085
1086 last_timestamp = current_timestamp;
1087 last_events = events;
1088
1089 if (heartbeats >= HEARTBEAT_COUNT) {
1090 T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT);
1091 ktrace_end(s, 0);
1092 }
1093 });
1094
1095 int error = ktrace_start(s, dispatch_get_main_queue());
1096 T_ASSERT_POSIX_ZERO(error, "started tracing");
1097
1098 dispatch_activate(timer);
1099
1100 dispatch_main();
1101 }