]> git.saurik.com Git - apple/xnu.git/blame - tests/kdebug.c
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / tests / kdebug.c
CommitLineData
d9a64523
A
1#include <darwintest.h>
2#include <darwintest_utils.h>
3#include <dispatch/dispatch.h>
4#include <inttypes.h>
5#include <ktrace/session.h>
6#include <ktrace/private.h>
7#include <kperf/kperf.h>
8#include <mach/clock_types.h>
9#include <mach/dyld_kernel.h>
10#include <mach/host_info.h>
11#include <mach/mach.h>
12#include <mach/mach_init.h>
13#include <mach/task.h>
14#include <os/assumes.h>
15#include <stdlib.h>
16#include <sys/kdebug.h>
17#include <sys/kdebug_signpost.h>
18#include <sys/sysctl.h>
cb323159
A
19#include <stdint.h>
20
21#include "ktrace_helpers.h"
d9a64523
A
22
23T_GLOBAL_META(
0a7de745
A
24 T_META_NAMESPACE("xnu.ktrace"),
25 T_META_ASROOT(true));
d9a64523 26
cb323159
A
27#define KDBG_TEST_MACROS 1
28#define KDBG_TEST_OLD_TIMES 2
29#define KDBG_TEST_FUTURE_TIMES 3
30#define KDBG_TEST_IOP_SYNC_FLUSH 4
d9a64523
A
31
32static void
33assert_kdebug_test(unsigned int flavor)
34{
35 size_t size = flavor;
36 int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDTEST };
37 T_ASSERT_POSIX_SUCCESS(sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL,
0a7de745 38 &size, NULL, 0), "KERN_KDTEST sysctl");
d9a64523
A
39}
40
41#pragma mark kdebug syscalls
42
43#define TRACE_DEBUGID (0xfedfed00U)
44
45T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events")
46{
cb323159
A
47 start_controlling_ktrace();
48
d9a64523
A
49 ktrace_session_t s = ktrace_session_create();
50 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
51
52 ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
53
54 __block int events_seen = 0;
0a7de745 55 ktrace_events_single(s, TRACE_DEBUGID, ^void (struct trace_point *tp) {
d9a64523
A
56 events_seen++;
57 T_PASS("saw traced event");
58
cb323159
A
59 if (ktrace_is_kernel_64_bit(s)) {
60 T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedfacefeedface),
61 "argument 1 of traced event is correct");
62 } else {
63 T_EXPECT_EQ(tp->arg1, UINT64_C(0xfeedface),
64 "argument 1 of traced event is correct");
65 }
66 T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of traced event is correct");
67 T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of traced event is correct");
68 T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of traced event is correct");
d9a64523
A
69
70 ktrace_end(s, 1);
71 });
72
73 ktrace_set_completion_handler(s, ^{
74 T_EXPECT_GE(events_seen, 1, NULL);
75 ktrace_session_destroy(s);
76 T_END;
77 });
78
79 ktrace_filter_pid(s, getpid());
80
81 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
cb323159
A
82 T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 0xfeedfacefeedface, 2,
83 3, 4), NULL);
d9a64523
A
84 ktrace_end(s, 0);
85
86 dispatch_main();
87}
88
89#define SIGNPOST_SINGLE_CODE (0x10U)
90#define SIGNPOST_PAIRED_CODE (0x20U)
91
92T_DECL(kdebug_signpost_syscall,
0a7de745 93 "test that kdebug_signpost(2) emits correct events")
d9a64523 94{
cb323159
A
95 start_controlling_ktrace();
96
d9a64523
A
97 ktrace_session_t s = ktrace_session_create();
98 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
99
100 __block int single_seen = 0;
101 __block int paired_seen = 0;
102
103 /* make sure to get enough events for the KDBUFWAIT to trigger */
104 // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
105 ktrace_events_single(s,
0a7de745
A
106 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE),
107 ^(struct trace_point *tp) {
d9a64523
A
108 single_seen++;
109 T_PASS("single signpost is traced");
110
cb323159
A
111 T_EXPECT_EQ(tp->arg1, 1ULL, "argument 1 of single signpost is correct");
112 T_EXPECT_EQ(tp->arg2, 2ULL, "argument 2 of single signpost is correct");
113 T_EXPECT_EQ(tp->arg3, 3ULL, "argument 3 of single signpost is correct");
114 T_EXPECT_EQ(tp->arg4, 4ULL, "argument 4 of single signpost is correct");
d9a64523
A
115 });
116
117 ktrace_events_single_paired(s,
0a7de745
A
118 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE),
119 ^(struct trace_point *start, struct trace_point *end) {
d9a64523
A
120 paired_seen++;
121 T_PASS("paired signposts are traced");
122
cb323159
A
123 T_EXPECT_EQ(start->arg1, 5ULL, "argument 1 of start signpost is correct");
124 T_EXPECT_EQ(start->arg2, 6ULL, "argument 2 of start signpost is correct");
125 T_EXPECT_EQ(start->arg3, 7ULL, "argument 3 of start signpost is correct");
126 T_EXPECT_EQ(start->arg4, 8ULL, "argument 4 of start signpost is correct");
d9a64523 127
cb323159
A
128 T_EXPECT_EQ(end->arg1, 9ULL, "argument 1 of end signpost is correct");
129 T_EXPECT_EQ(end->arg2, 10ULL, "argument 2 of end signpost is correct");
130 T_EXPECT_EQ(end->arg3, 11ULL, "argument 3 of end signpost is correct");
131 T_EXPECT_EQ(end->arg4, 12ULL, "argument 4 of end signpost is correct");
d9a64523 132
cb323159 133 T_EXPECT_EQ(single_seen, 1, "signposts are traced in the correct order");
d9a64523
A
134
135 ktrace_end(s, 1);
136 });
137
138 ktrace_set_completion_handler(s, ^(void) {
139 T_QUIET; T_EXPECT_NE(single_seen, 0,
0a7de745 140 "did not see single tracepoint before timeout");
d9a64523 141 T_QUIET; T_EXPECT_NE(paired_seen, 0,
0a7de745 142 "did not see single tracepoint before timeout");
d9a64523
A
143 ktrace_session_destroy(s);
144 T_END;
145 });
146
147 ktrace_filter_pid(s, getpid());
148
149 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
0a7de745 150 "started tracing");
d9a64523 151
cb323159
A
152#pragma clang diagnostic push
153#pragma clang diagnostic ignored "-Wdeprecated-declarations"
d9a64523 154 T_EXPECT_POSIX_SUCCESS(kdebug_signpost(SIGNPOST_SINGLE_CODE, 1, 2, 3, 4),
0a7de745 155 "emitted single signpost");
d9a64523 156 T_EXPECT_POSIX_SUCCESS(
0a7de745
A
157 kdebug_signpost_start(SIGNPOST_PAIRED_CODE, 5, 6, 7, 8),
158 "emitted start signpost");
d9a64523 159 T_EXPECT_POSIX_SUCCESS(
0a7de745
A
160 kdebug_signpost_end(SIGNPOST_PAIRED_CODE, 9, 10, 11, 12),
161 "emitted end signpost");
cb323159 162#pragma clang diagnostic pop
d9a64523
A
163 ktrace_end(s, 0);
164
165 dispatch_main();
166}
167
cb323159
A
168T_DECL(syscall_tracing,
169 "ensure that syscall arguments are traced propertly")
170{
171 ktrace_session_t s = ktrace_session_create();
172 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
173
174 __block bool seen = 0;
175
176 ktrace_filter_pid(s, getpid());
177
178 static const int telemetry_syscall_no = 451;
179 static const uint64_t arg1 = 0xfeedfacefeedface;
180
181 ktrace_events_single(s, BSDDBG_CODE(DBG_BSD_EXCP_SC, telemetry_syscall_no),
182 ^(struct trace_point *evt){
183 if (KDBG_EXTRACT_CODE(evt->debugid) != telemetry_syscall_no || seen) {
184 return;
185 }
186
187 seen = true;
188 if (ktrace_is_kernel_64_bit(s)) {
189 T_EXPECT_EQ(evt->arg1, arg1,
190 "argument 1 of syscall event is correct");
191 } else {
192 T_EXPECT_EQ(evt->arg1, (uint64_t)(uint32_t)(arg1),
193 "argument 1 of syscall event is correct");
194 }
195
196 ktrace_end(s, 1);
197 });
198
199 ktrace_set_completion_handler(s, ^{
200 T_ASSERT_TRUE(seen,
201 "should have seen a syscall event for kevent_id(2)");
202 ktrace_session_destroy(s);
203 T_END;
204 });
205
206 int error = ktrace_start(s, dispatch_get_main_queue());
207 T_ASSERT_POSIX_ZERO(error, "started tracing");
208
209 /*
210 * telemetry(2) has a 64-bit argument that will definitely be traced, and
211 * is unlikely to be used elsewhere by this process.
212 */
213 extern int __telemetry(uint64_t cmd, uint64_t deadline, uint64_t interval,
214 uint64_t leeway, uint64_t arg4, uint64_t arg5);
215 (void)__telemetry(arg1, 0, 0, 0, 0, 0);
216
217 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
218 dispatch_get_main_queue(), ^{
219 T_LOG("ending test due to timeout");
220 ktrace_end(s, 0);
221 });
222
223 dispatch_main();
224}
225
d9a64523
A
226#pragma mark kdebug behaviors
227
228#define WRAPPING_EVENTS_COUNT (150000)
229#define TRACE_ITERATIONS (5000)
230#define WRAPPING_EVENTS_THRESHOLD (100)
231
232T_DECL(wrapping,
0a7de745
A
233 "ensure that wrapping traces lost events and no events prior to the wrap",
234 T_META_CHECK_LEAKS(false))
d9a64523 235{
d9a64523
A
236 kbufinfo_t buf_info;
237 int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5;
238 int current_secs = wait_wrapping_secs;
239
cb323159
A
240 start_controlling_ktrace();
241
d9a64523
A
242 /* use sysctls manually to bypass libktrace assumptions */
243
0a7de745 244 int mib[4] = { CTL_KERN, KERN_KDEBUG };
d9a64523
A
245 mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT;
246 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF");
247
0a7de745
A
248 mib[2] = KERN_KDSETUP; mib[3] = 0;
249 size_t needed = 0;
250 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0),
251 "KERN_KDSETUP");
252
d9a64523
A
253 mib[2] = KERN_KDENABLE; mib[3] = 1;
254 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE");
255
256 /* wrapping is on by default */
257
258 /* wait until wrapped */
259 T_LOG("waiting for trace to wrap");
260 mib[2] = KERN_KDGETBUF;
261 needed = sizeof(buf_info);
262 do {
263 sleep(1);
264 for (int i = 0; i < TRACE_ITERATIONS; i++) {
265 T_QUIET;
266 T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL);
267 }
268 T_QUIET;
269 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0),
0a7de745 270 NULL);
d9a64523
A
271 } while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0);
272
273 T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED,
0a7de745
A
274 "trace wrapped (after %d seconds within %d second timeout)",
275 wait_wrapping_secs - current_secs, wait_wrapping_secs);
d9a64523
A
276
277 ktrace_session_t s = ktrace_session_create();
278 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
279 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL);
280
281 __block int events = 0;
282
283 ktrace_events_all(s, ^(struct trace_point *tp) {
284 if (events == 0) {
0a7de745
A
285 T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
286 "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
287 tp->debugid,
288 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
d9a64523 289 } else {
0a7de745
A
290 T_QUIET;
291 T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
292 "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
293 tp->debugid,
294 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
d9a64523
A
295 }
296
297 events++;
298 if (events > WRAPPING_EVENTS_THRESHOLD) {
0a7de745 299 ktrace_end(s, 1);
d9a64523
A
300 }
301 });
302
303 ktrace_set_completion_handler(s, ^{
304 ktrace_session_destroy(s);
305 T_END;
306 });
307
308 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
0a7de745 309 "started tracing");
d9a64523
A
310
311 dispatch_main();
312}
313
314T_DECL(reject_old_events,
0a7de745
A
315 "ensure that kdebug rejects events from before tracing began",
316 T_META_CHECK_LEAKS(false))
d9a64523
A
317{
318 __block uint64_t event_horizon_ts;
319
cb323159
A
320 start_controlling_ktrace();
321
d9a64523
A
322 ktrace_session_t s = ktrace_session_create();
323 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
324
325 __block int events = 0;
cb323159
A
326 ktrace_events_single(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 1),
327 ^(struct trace_point *tp) {
d9a64523
A
328 events++;
329 T_EXPECT_GT(tp->timestamp, event_horizon_ts,
0a7de745 330 "events in trace should be from after tracing began");
d9a64523
A
331 });
332
333 ktrace_set_completion_handler(s, ^{
334 T_EXPECT_EQ(events, 2, "should see only two events");
335 ktrace_session_destroy(s);
336 T_END;
337 });
338
339 event_horizon_ts = mach_absolute_time();
340
341 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
342 /* first, try an old event at the beginning of trace */
343 assert_kdebug_test(KDBG_TEST_OLD_TIMES);
344 /* after a good event has been traced, old events should be rejected */
345 assert_kdebug_test(KDBG_TEST_OLD_TIMES);
346 ktrace_end(s, 0);
347
348 dispatch_main();
349}
350
351#define ORDERING_TIMEOUT_SEC 5
352
353T_DECL(ascending_time_order,
0a7de745
A
354 "ensure that kdebug events are in ascending order based on time",
355 T_META_CHECK_LEAKS(false))
d9a64523
A
356{
357 __block uint64_t prev_ts = 0;
358 __block uint32_t prev_debugid = 0;
359 __block unsigned int prev_cpu = 0;
360 __block bool in_order = true;
361
cb323159
A
362 start_controlling_ktrace();
363
d9a64523
A
364 ktrace_session_t s = ktrace_session_create();
365 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
366
367 ktrace_events_all(s, ^(struct trace_point *tp) {
368 if (tp->timestamp < prev_ts) {
0a7de745
A
369 in_order = false;
370 T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
371 prev_ts, prev_debugid, prev_cpu);
372 T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)",
373 tp->timestamp, tp->debugid, tp->cpuid);
374 ktrace_end(s, 1);
d9a64523
A
375 }
376 });
377
378 ktrace_set_completion_handler(s, ^{
379 ktrace_session_destroy(s);
380 T_EXPECT_TRUE(in_order, "event timestamps were in-order");
381 T_END;
382 });
383
384 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()),
0a7de745 385 "started tracing");
d9a64523
A
386
387 /* try to inject old timestamps into trace */
388 assert_kdebug_test(KDBG_TEST_OLD_TIMES);
389
390 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC),
0a7de745 391 dispatch_get_main_queue(), ^{
d9a64523
A
392 T_LOG("ending test after timeout");
393 ktrace_end(s, 1);
394 });
395
396 dispatch_main();
397}
398
399#pragma mark dyld tracing
400
401__attribute__((aligned(8)))
0a7de745 402static const char map_uuid[16] = "map UUID";
d9a64523
A
403
404__attribute__((aligned(8)))
0a7de745 405static const char unmap_uuid[16] = "unmap UUID";
d9a64523
A
406
407__attribute__((aligned(8)))
0a7de745 408static const char sc_uuid[16] = "shared UUID";
d9a64523 409
0a7de745 410static fsid_t map_fsid = { .val = { 42, 43 } };
d9a64523
A
411static fsid_t unmap_fsid = { .val = { 44, 45 } };
412static fsid_t sc_fsid = { .val = { 46, 47 } };
413
414static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 };
415static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 };
416static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 };
417
418#define MAP_LOAD_ADDR 0xabadcafe
419#define UNMAP_LOAD_ADDR 0xfeedface
420#define SC_LOAD_ADDR 0xfedfaced
421
422__unused
423static void
424expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid,
0a7de745
A
425 uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid,
426 int order)
d9a64523
A
427{
428#if defined(__LP64__) || defined(__arm64__)
429 if (order == 0) {
430 uint64_t uuid[2];
431 uint64_t load_addr;
432 fsid_t fsid;
433
434 uuid[0] = (uint64_t)tp->arg1;
435 uuid[1] = (uint64_t)tp->arg2;
436 load_addr = (uint64_t)tp->arg3;
437 fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX);
438 fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32);
439
440 T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL);
441 T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL);
442 T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL);
443 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
444 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
445 } else if (order == 1) {
446 fsobj_id_t fsobjid;
447
448 fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX);
449 fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32);
450
451 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
452 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
0a7de745 453 exp_fsobjid->fid_generation, NULL);
d9a64523
A
454 } else {
455 T_ASSERT_FAIL("unrecognized order of events %d", order);
456 }
457#else /* defined(__LP64__) */
458 if (order == 0) {
459 uint32_t uuid[4];
460
461 uuid[0] = (uint32_t)tp->arg1;
462 uuid[1] = (uint32_t)tp->arg2;
463 uuid[2] = (uint32_t)tp->arg3;
464 uuid[3] = (uint32_t)tp->arg4;
465
466 T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL);
467 T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL);
468 T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL);
469 T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL);
470 } else if (order == 1) {
471 uint32_t load_addr;
472 fsid_t fsid;
473 fsobj_id_t fsobjid;
474
475 load_addr = (uint32_t)tp->arg1;
476 fsid.val[0] = (int32_t)tp->arg2;
477 fsid.val[1] = (int32_t)tp->arg3;
478 fsobjid.fid_objno = (uint32_t)tp->arg4;
479
480 T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL);
481 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
482 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
483 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
484 } else if (order == 2) {
485 fsobj_id_t fsobjid;
486
487 fsobjid.fid_generation = tp->arg1;
488
489 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
0a7de745 490 exp_fsobjid->fid_generation, NULL);
d9a64523
A
491 } else {
492 T_ASSERT_FAIL("unrecognized order of events %d", order);
493 }
494#endif /* defined(__LP64__) */
495}
496
497#if defined(__LP64__) || defined(__arm64__)
498#define DYLD_CODE_OFFSET (0)
499#define DYLD_EVENTS (2)
500#else
501#define DYLD_CODE_OFFSET (2)
502#define DYLD_EVENTS (3)
503#endif
504
505static void
506expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code,
0a7de745
A
507 const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid,
508 fsobj_id_t *exp_fsobjid, uint8_t *saw_events)
d9a64523
A
509{
510 for (int i = 0; i < DYLD_EVENTS; i++) {
511 ktrace_events_single(s, KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID,
0a7de745
A
512 base_code + DYLD_CODE_OFFSET + (unsigned int)i),
513 ^(struct trace_point *tp) {
d9a64523
A
514 T_LOG("checking %s event %c", name, 'A' + i);
515 expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr,
0a7de745 516 exp_fsid, exp_fsobjid, i);
d9a64523
A
517 *saw_events |= (1U << i);
518 });
519 }
520}
521
522T_DECL(dyld_events, "test that dyld registering libraries emits events")
523{
524 dyld_kernel_image_info_t info;
525
526 /*
527 * Use pointers instead of __block variables in order to use these variables
528 * in the completion block below _and_ pass pointers to them to the
529 * expect_dyld_events function.
530 */
531 uint8_t saw_events[3] = { 0 };
532 uint8_t *saw_mapping = &(saw_events[0]);
533 uint8_t *saw_unmapping = &(saw_events[1]);
534 uint8_t *saw_shared_cache = &(saw_events[2]);
535
cb323159
A
536 start_controlling_ktrace();
537
d9a64523
A
538 ktrace_session_t s = ktrace_session_create();
539 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
540
541 T_QUIET;
542 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
0a7de745 543 "filtered to current process");
d9a64523
A
544
545 expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid,
0a7de745 546 MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping);
d9a64523 547 expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid,
0a7de745 548 UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping);
d9a64523 549 expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A,
0a7de745 550 sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache);
d9a64523
A
551
552 ktrace_set_completion_handler(s, ^{
553 ktrace_session_destroy(s);
554
555 T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL);
556 T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL);
557 T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL);
558 T_END;
559 });
560
561 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
562
563 info.load_addr = MAP_LOAD_ADDR;
564 memcpy(info.uuid, map_uuid, sizeof(info.uuid));
565 info.fsid = map_fsid;
566 info.fsobjid = map_fsobjid;
567 T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
0a7de745 568 &info, 1), "registered dyld image info");
d9a64523
A
569
570 info.load_addr = UNMAP_LOAD_ADDR;
571 memcpy(info.uuid, unmap_uuid, sizeof(info.uuid));
572 info.fsid = unmap_fsid;
573 info.fsobjid = unmap_fsobjid;
574 T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
0a7de745 575 &info, 1), "unregistered dyld image info");
d9a64523
A
576
577 info.load_addr = SC_LOAD_ADDR;
578 memcpy(info.uuid, sc_uuid, sizeof(info.uuid));
579 info.fsid = sc_fsid;
580 info.fsobjid = sc_fsobjid;
581 T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
0a7de745
A
582 mach_task_self(), info, FALSE, FALSE),
583 "registered dyld shared cache image info");
d9a64523
A
584
585 ktrace_end(s, 0);
586
587 dispatch_main();
588}
589
590#pragma mark kdebug kernel macros
591
592#define EXP_KERNEL_EVENTS 5U
593
594static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = {
595 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0),
596 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1),
597 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2),
598 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3),
599 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4),
600};
601
602static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = {
603 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5),
604 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6),
605 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7),
606 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8),
607 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9),
608};
609
610static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = {
611 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10),
612 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11),
613 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12),
614 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13),
615 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14),
616};
617
618static const uint32_t noprocfilt_evts[EXP_KERNEL_EVENTS] = {
619 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 15),
620 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 16),
621 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 17),
622 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 18),
623 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 19),
624};
625
626static bool
627is_development_kernel(void)
628{
629 static dispatch_once_t is_development_once;
630 static bool is_development;
631
632 dispatch_once(&is_development_once, ^{
633 int dev;
634 size_t dev_size = sizeof(dev);
635
636 T_QUIET;
637 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev,
0a7de745 638 &dev_size, NULL, 0), NULL);
d9a64523
A
639 is_development = (dev != 0);
640 });
641
642 return is_development;
643}
644
645static void
646expect_event(struct trace_point *tp, const char *name, unsigned int *events,
0a7de745 647 const uint32_t *event_ids, size_t event_ids_len)
d9a64523
A
648{
649 unsigned int event_idx = *events;
650 bool event_found = false;
651 size_t i;
652 for (i = 0; i < event_ids_len; i++) {
653 if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) {
654 T_LOG("found %s event 0x%x", name, tp->debugid);
655 event_found = true;
656 }
657 }
658
659 if (!event_found) {
660 return;
661 }
662
663 *events += 1;
664 for (i = 0; i < event_idx; i++) {
665 T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)i + 1,
0a7de745 666 NULL);
d9a64523
A
667 }
668 for (; i < 4; i++) {
669 T_QUIET; T_EXPECT_EQ(((uint64_t *)&tp->arg1)[i], (uint64_t)0, NULL);
670 }
671}
672
673static void
674expect_release_event(struct trace_point *tp, unsigned int *events)
675{
676 expect_event(tp, "release", events, rel_evts,
0a7de745 677 sizeof(rel_evts) / sizeof(rel_evts[0]));
d9a64523
A
678}
679
680static void
681expect_development_event(struct trace_point *tp, unsigned int *events)
682{
683 expect_event(tp, "dev", events, dev_evts, sizeof(dev_evts) / sizeof(dev_evts[0]));
684}
685
686static void
687expect_filtered_event(struct trace_point *tp, unsigned int *events)
688{
689 expect_event(tp, "filtered", events, filt_evts,
0a7de745 690 sizeof(filt_evts) / sizeof(filt_evts[0]));
d9a64523
A
691}
692
693static void
694expect_noprocfilt_event(struct trace_point *tp, unsigned int *events)
695{
696 expect_event(tp, "noprocfilt", events, noprocfilt_evts,
0a7de745 697 sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]));
d9a64523
A
698}
699
700static void
701expect_kdbg_test_events(ktrace_session_t s, bool use_all_callback,
0a7de745
A
702 void (^cb)(unsigned int dev_seen, unsigned int rel_seen,
703 unsigned int filt_seen, unsigned int noprocfilt_seen))
d9a64523
A
704{
705 __block unsigned int dev_seen = 0;
706 __block unsigned int rel_seen = 0;
707 __block unsigned int filt_seen = 0;
708 __block unsigned int noprocfilt_seen = 0;
709
710 void (^evtcb)(struct trace_point *tp) = ^(struct trace_point *tp) {
711 expect_development_event(tp, &dev_seen);
712 expect_release_event(tp, &rel_seen);
713 expect_filtered_event(tp, &filt_seen);
714 expect_noprocfilt_event(tp, &noprocfilt_seen);
715 };
716
717 if (use_all_callback) {
718 ktrace_events_all(s, evtcb);
719 } else {
720 ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
0a7de745 721 KDBG_EVENTID(DBG_BSD + 1, 0, 0), evtcb);
d9a64523
A
722 }
723
724 ktrace_set_completion_handler(s, ^{
725 ktrace_session_destroy(s);
726 cb(dev_seen, rel_seen, filt_seen, noprocfilt_seen);
727 T_END;
728 });
729
730 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
731 assert_kdebug_test(KDBG_TEST_MACROS);
732
733 ktrace_end(s, 0);
734}
735
736T_DECL(kernel_events, "ensure kernel macros work")
737{
cb323159
A
738 start_controlling_ktrace();
739
d9a64523
A
740 ktrace_session_t s = ktrace_session_create();
741 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
742
743 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
0a7de745 744 "filtered events to current process");
d9a64523
A
745
746 expect_kdbg_test_events(s, false,
0a7de745
A
747 ^(unsigned int dev_seen, unsigned int rel_seen,
748 unsigned int filt_seen, unsigned int noprocfilt_seen) {
d9a64523
A
749 /*
750 * Development-only events are only filtered if running on an embedded
751 * OS.
752 */
753 unsigned int dev_exp;
cb323159 754#if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
d9a64523
A
755 dev_exp = is_development_kernel() ? EXP_KERNEL_EVENTS : 0U;
756#else
757 dev_exp = EXP_KERNEL_EVENTS;
758#endif
759
760 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS,
0a7de745 761 "release and development events seen");
d9a64523 762 T_EXPECT_EQ(dev_seen, dev_exp, "development-only events %sseen",
0a7de745 763 dev_exp ? "" : "not ");
d9a64523
A
764 T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen");
765 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
0a7de745 766 "process filter-agnostic events seen");
d9a64523
A
767 });
768
769 dispatch_main();
770}
771
772T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work")
773{
cb323159
A
774 start_controlling_ktrace();
775
d9a64523
A
776 ktrace_session_t s = ktrace_session_create();
777 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
778
779 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()),
0a7de745 780 "filtered events to current process");
d9a64523
A
781
782 expect_kdbg_test_events(s, true,
0a7de745
A
783 ^(unsigned int dev_seen, unsigned int rel_seen,
784 unsigned int filt_seen, unsigned int noprocfilt_seen) {
d9a64523
A
785 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL);
786#if defined(__arm__) || defined(__arm64__)
787 T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U,
0a7de745 788 NULL);
d9a64523
A
789#else
790 T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS,
0a7de745 791 "development-only events seen");
d9a64523
A
792#endif /* defined(__arm__) || defined(__arm64__) */
793 T_EXPECT_EQ(filt_seen, 0U, "no filter-only events seen");
794 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
0a7de745 795 "process filter-agnostic events seen");
d9a64523
A
796 });
797
798 dispatch_main();
799}
800
801T_DECL(kernel_events_noprocfilt,
0a7de745 802 "ensure that the no process filter kernel macros work")
d9a64523 803{
cb323159
A
804 start_controlling_ktrace();
805
d9a64523
A
806 ktrace_session_t s = ktrace_session_create();
807 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
808
809 /*
810 * Only allow launchd events through.
811 */
812 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, 1), "filtered events to launchd");
813 for (size_t i = 0; i < sizeof(noprocfilt_evts) / sizeof(noprocfilt_evts[0]); i++) {
814 T_QUIET;
815 T_ASSERT_POSIX_ZERO(ktrace_ignore_process_filter_for_event(s,
0a7de745
A
816 noprocfilt_evts[i]),
817 "ignored process filter for noprocfilt event");
d9a64523
A
818 }
819
820 expect_kdbg_test_events(s, false,
0a7de745
A
821 ^(unsigned int dev_seen, unsigned int rel_seen,
822 unsigned int filt_seen, unsigned int noprocfilt_seen) {
d9a64523
A
823 T_EXPECT_EQ(rel_seen, 0U, "release and development events not seen");
824 T_EXPECT_EQ(dev_seen, 0U, "development-only events not seen");
825 T_EXPECT_EQ(filt_seen, 0U, "filter-only events not seen");
826
827 T_EXPECT_EQ(noprocfilt_seen, EXP_KERNEL_EVENTS,
0a7de745 828 "process filter-agnostic events seen");
d9a64523
A
829 });
830
831 dispatch_main();
832}
833
834static volatile bool continue_abuse = true;
835
836#define STRESS_DEBUGID (0xfeedfac0)
cb323159 837#define ABUSE_SECS (2)
d9a64523
A
838#define TIMER_NS (100 * NSEC_PER_USEC)
839/*
840 * Use the quantum as the gap threshold.
841 */
842#define GAP_THRESHOLD_NS (10 * NSEC_PER_MSEC)
843
844static void *
845kdebug_abuser_thread(void *ctx)
846{
847 unsigned int id = (unsigned int)ctx;
848 uint64_t i = 0;
849 while (continue_abuse) {
850 kdebug_trace(STRESS_DEBUGID, id, i, 0, 0);
851 i++;
852 }
853
854 return NULL;
855}
856
857T_DECL(stress, "emit events on all but one CPU with a small buffer",
0a7de745 858 T_META_CHECK_LEAKS(false))
d9a64523 859{
cb323159
A
860 start_controlling_ktrace();
861
d9a64523
A
862 T_SETUPBEGIN;
863 ktrace_session_t s = ktrace_session_create();
864 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
865
866 /* Let's not waste any time with pleasantries. */
867 ktrace_set_uuid_map_enabled(s, KTRACE_FEATURE_DISABLED);
868
869 /* Ouch. */
870 ktrace_events_all(s, ^(__unused struct trace_point *tp) {});
871 ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
872 (void)atexit_b(^{ kperf_reset(); });
873 (void)kperf_action_count_set(1);
874 (void)kperf_timer_count_set(1);
875 int kperror = kperf_timer_period_set(0, kperf_ns_to_ticks(TIMER_NS));
876 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_period_set %llu ns",
0a7de745 877 TIMER_NS);
d9a64523
A
878 kperror = kperf_timer_action_set(0, 1);
879 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_timer_action_set");
880 kperror = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO |
0a7de745
A
881 KPERF_SAMPLER_TH_SNAPSHOT | KPERF_SAMPLER_KSTACK |
882 KPERF_SAMPLER_USTACK | KPERF_SAMPLER_MEMINFO |
883 KPERF_SAMPLER_TINFO_SCHED | KPERF_SAMPLER_TH_DISPATCH |
884 KPERF_SAMPLER_TK_SNAPSHOT | KPERF_SAMPLER_SYS_MEM |
885 KPERF_SAMPLER_TH_INSTRS_CYCLES);
d9a64523
A
886 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperror, "kperf_action_samplers_set");
887 /* You monster... */
888
889 /* The coup-de-grace. */
890 ktrace_set_buffer_size(s, 10);
891
892 char filepath_arr[MAXPATHLEN] = "";
893 strlcpy(filepath_arr, dt_tmpdir(), sizeof(filepath_arr));
894 strlcat(filepath_arr, "/stress.ktrace", sizeof(filepath_arr));
895 char *filepath = filepath_arr;
896
897 int ncpus = 0;
898 size_t ncpus_size = sizeof(ncpus);
899 int ret = sysctlbyname("hw.logicalcpu_max", &ncpus, &ncpus_size, NULL, 0);
900 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(\"hw.logicalcpu_max\"");
901 T_QUIET; T_ASSERT_GT(ncpus, 0, "realistic number of CPUs");
902
903 pthread_t *threads = calloc((unsigned int)ncpus - 1, sizeof(pthread_t));
904 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
0a7de745 905 ncpus - 1);
d9a64523
A
906
907 ktrace_set_completion_handler(s, ^{
908 T_SETUPBEGIN;
909 ktrace_session_destroy(s);
910
911 T_LOG("trace ended, searching for gaps");
912
913 ktrace_session_t sread = ktrace_session_create();
914 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(sread, "ktrace_session_create");
915
916 int error = ktrace_set_file(sread, filepath);
917 T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_set_file %s", filepath);
918
919 ktrace_file_t f = ktrace_file_open(filepath, false);
920 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(f, "ktrace_file_open %s",
0a7de745 921 filepath);
d9a64523
A
922 uint64_t first_timestamp = 0;
923 error = ktrace_file_earliest_timestamp(f, &first_timestamp);
924 T_QUIET; T_ASSERT_POSIX_ZERO(error, "ktrace_file_earliest_timestamp");
925
926 uint64_t last_timestamp = 0;
927 (void)ktrace_file_latest_timestamp(f, &last_timestamp);
928
929 __block uint64_t prev_timestamp = 0;
930 __block uint64_t nevents = 0;
931 ktrace_events_all(sread, ^(struct trace_point *tp) {
932 nevents++;
933 uint64_t delta_ns = 0;
934 T_QUIET; T_EXPECT_GE(tp->timestamp, prev_timestamp,
0a7de745 935 "timestamps are monotonically increasing");
d9a64523 936 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
0a7de745 937 tp->timestamp - prev_timestamp, &delta_ns);
d9a64523
A
938 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
939 if (prev_timestamp && delta_ns > GAP_THRESHOLD_NS) {
0a7de745
A
940 if (tp->debugname) {
941 T_LOG("gap: %gs at %llu - %llu on %d: %s (%#08x)",
942 (double)delta_ns / 1e9, prev_timestamp,
943 tp->timestamp, tp->cpuid, tp->debugname, tp->debugid);
d9a64523 944 } else {
0a7de745
A
945 T_LOG("gap: %gs at %llu - %llu on %d: %#x",
946 (double)delta_ns / 1e9, prev_timestamp,
947 tp->timestamp, tp->cpuid, tp->debugid);
d9a64523
A
948 }
949
0a7de745
A
950 /*
951 * These gaps are ok -- they appear after CPUs are brought back
952 * up.
953 */
d9a64523
A
954#define INTERRUPT (0x1050000)
955#define PERF_CPU_IDLE (0x27001000)
956#define INTC_HANDLER (0x5000004)
957#define DECR_TRAP (0x1090000)
0a7de745
A
958 uint32_t eventid = tp->debugid & KDBG_EVENTID_MASK;
959 if (eventid != INTERRUPT && eventid != PERF_CPU_IDLE &&
960 eventid != INTC_HANDLER && eventid != DECR_TRAP) {
961 unsigned int lost_events = TRACE_LOST_EVENTS;
962 T_QUIET; T_EXPECT_EQ(tp->debugid, lost_events,
963 "gaps should end with lost events");
d9a64523
A
964 }
965 }
966
967 prev_timestamp = tp->timestamp;
968 });
969 ktrace_events_single(sread, TRACE_LOST_EVENTS, ^(struct trace_point *tp){
cb323159 970 T_LOG("lost: %llu on %d (%llu)", tp->timestamp, tp->cpuid, tp->arg1);
d9a64523
A
971 });
972
973 __block uint64_t last_write = 0;
974 ktrace_events_single_paired(sread, TRACE_WRITING_EVENTS,
0a7de745 975 ^(struct trace_point *start, struct trace_point *end) {
d9a64523
A
976 uint64_t delta_ns;
977 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
0a7de745 978 start->timestamp - last_write, &delta_ns);
d9a64523
A
979 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
980
981 uint64_t dur_ns;
982 converror = ktrace_convert_timestamp_to_nanoseconds(sread,
0a7de745 983 end->timestamp - start->timestamp, &dur_ns);
d9a64523
A
984 T_QUIET; T_ASSERT_POSIX_ZERO(converror, "convert timestamp to ns");
985
cb323159 986 T_LOG("write: %llu (+%gs): %gus on %d: %llu events", start->timestamp,
0a7de745 987 (double)delta_ns / 1e9, (double)dur_ns / 1e3, end->cpuid, end->arg1);
d9a64523
A
988 last_write = end->timestamp;
989 });
990 ktrace_set_completion_handler(sread, ^{
991 uint64_t duration_ns = 0;
992 if (last_timestamp) {
0a7de745
A
993 int converror = ktrace_convert_timestamp_to_nanoseconds(sread,
994 last_timestamp - first_timestamp, &duration_ns);
995 T_QUIET; T_ASSERT_POSIX_ZERO(converror,
996 "convert timestamp to ns");
997 T_LOG("file was %gs long, %llu events: %g events/msec/cpu",
998 (double)duration_ns / 1e9, nevents,
999 (double)nevents / ((double)duration_ns / 1e6) / ncpus);
d9a64523
A
1000 }
1001 (void)unlink(filepath);
1002 ktrace_session_destroy(sread);
1003 T_END;
1004 });
1005
1006 int starterror = ktrace_start(sread, dispatch_get_main_queue());
1007 T_QUIET; T_ASSERT_POSIX_ZERO(starterror, "ktrace_start read session");
1008
1009 T_SETUPEND;
1010 });
1011
1012/* Just kidding... for now. */
1013#if 0
1014 kperror = kperf_sample_set(1);
1015 T_ASSERT_POSIX_SUCCESS(kperror,
0a7de745 1016 "started kperf timer sampling every %llu ns", TIMER_NS);
d9a64523
A
1017#endif
1018
1019 for (int i = 0; i < (ncpus - 1); i++) {
1020 int error = pthread_create(&threads[i], NULL, kdebug_abuser_thread,
0a7de745 1021 (void *)(uintptr_t)i);
d9a64523 1022 T_QUIET; T_ASSERT_POSIX_ZERO(error,
0a7de745 1023 "pthread_create abuser thread %d", i);
d9a64523
A
1024 }
1025
1026 int error = ktrace_start_writing_file(s, filepath,
0a7de745 1027 ktrace_compression_none, NULL, NULL);
d9a64523
A
1028 T_ASSERT_POSIX_ZERO(error, "started writing ktrace to %s", filepath);
1029
1030 T_SETUPEND;
1031
1032 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ABUSE_SECS * NSEC_PER_SEC),
0a7de745 1033 dispatch_get_main_queue(), ^{
d9a64523
A
1034 T_LOG("ending trace");
1035 ktrace_end(s, 1);
1036
1037 continue_abuse = false;
1038 for (int i = 0; i < (ncpus - 1); i++) {
0a7de745
A
1039 int joinerror = pthread_join(threads[i], NULL);
1040 T_QUIET; T_EXPECT_POSIX_ZERO(joinerror, "pthread_join thread %d",
1041 i);
d9a64523
A
1042 }
1043 });
1044
1045 dispatch_main();
1046}
1047
1048#define ROUND_TRIP_PERIOD UINT64_C(10 * 1000)
1049#define ROUND_TRIPS_THRESHOLD UINT64_C(25)
1050#define ROUND_TRIPS_TIMEOUT_SECS (2 * 60)
1051#define COLLECTION_INTERVAL_MS 100
1052
1053/*
1054 * Test a sustained tracing session, involving multiple round-trips to the
1055 * kernel.
1056 *
1057 * Trace all events, and every `ROUND_TRIP_PERIOD` events, emit an event that's
1058 * unlikely to be emitted elsewhere. Look for this event, too, and make sure we
1059 * see as many of them as we emitted.
1060 *
1061 * After seeing `ROUND_TRIPS_THRESHOLD` of the unlikely events, end tracing.
1062 * In the failure mode, we won't see any of these, so set a timeout of
1063 * `ROUND_TRIPS_TIMEOUT_SECS` to prevent hanging, waiting for events that we'll
1064 * never see.
1065 */
1066T_DECL(round_trips,
0a7de745 1067 "test sustained tracing with multiple round-trips through the kernel")
d9a64523 1068{
cb323159
A
1069 start_controlling_ktrace();
1070
d9a64523
A
1071 ktrace_session_t s = ktrace_session_create();
1072 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1073
1074 /*
1075 * Set a small buffer and collection interval to increase the number of
1076 * round-trips.
1077 */
1078 ktrace_set_buffer_size(s, 50);
1079 ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1080
1081 __block uint64_t events = 0;
1082 __block uint64_t emitted = 0;
1083 __block uint64_t seen = 0;
1084 ktrace_events_all(s, ^(__unused struct trace_point *tp) {
1085 events++;
1086 if (events % ROUND_TRIP_PERIOD == 0) {
0a7de745
A
1087 T_LOG("emitting round-trip event %" PRIu64, emitted);
1088 kdebug_trace(TRACE_DEBUGID, events, 0, 0, 0);
1089 emitted++;
d9a64523
A
1090 }
1091 });
1092
1093 ktrace_events_single(s, TRACE_DEBUGID, ^(__unused struct trace_point *tp) {
1094 T_LOG("saw round-trip event after %" PRIu64 " events", events);
1095 seen++;
1096 if (seen >= ROUND_TRIPS_THRESHOLD) {
0a7de745
A
1097 T_LOG("ending trace after seeing %" PRIu64 " events, "
1098 "emitting %" PRIu64, seen, emitted);
1099 ktrace_end(s, 1);
d9a64523
A
1100 }
1101 });
1102
1103 ktrace_set_completion_handler(s, ^{
1104 T_EXPECT_GE(emitted, ROUND_TRIPS_THRESHOLD,
0a7de745 1105 "emitted %" PRIu64 " round-trip events", emitted);
d9a64523 1106 T_EXPECT_GE(seen, ROUND_TRIPS_THRESHOLD,
0a7de745 1107 "saw %" PRIu64 " round-trip events", seen);
d9a64523
A
1108 ktrace_session_destroy(s);
1109 T_END;
1110 });
1111
1112 int error = ktrace_start(s, dispatch_get_main_queue());
1113 T_ASSERT_POSIX_ZERO(error, "started tracing");
1114
1115 dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
0a7de745
A
1116 ROUND_TRIPS_TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(),
1117 ^{
d9a64523
A
1118 T_LOG("ending trace after %d seconds", ROUND_TRIPS_TIMEOUT_SECS);
1119 ktrace_end(s, 0);
1120 });
1121
1122 dispatch_main();
1123}
1124
1125#define HEARTBEAT_INTERVAL_SECS 2
1126#define HEARTBEAT_COUNT 20
1127
1128/*
1129 * Ensure we see events periodically, checking for recent events on a
1130 * heart-beat.
1131 */
1132T_DECL(event_coverage, "ensure events appear up to the end of tracing")
1133{
cb323159
A
1134 start_controlling_ktrace();
1135
d9a64523
A
1136 ktrace_session_t s = ktrace_session_create();
1137 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1138
1139 __block uint64_t current_timestamp = 0;
1140 __block uint64_t events = 0;
1141 ktrace_events_all(s, ^(struct trace_point *tp) {
1142 current_timestamp = tp->timestamp;
1143 events++;
1144 });
1145
1146 ktrace_set_buffer_size(s, 20);
1147 ktrace_set_collection_interval(s, COLLECTION_INTERVAL_MS);
1148
1149 __block uint64_t last_timestamp = 0;
1150 __block uint64_t last_events = 0;
1151 __block unsigned int heartbeats = 0;
1152
1153 ktrace_set_completion_handler(s, ^{
1154 ktrace_session_destroy(s);
1155 T_QUIET; T_EXPECT_GT(events, 0ULL, "should have seen some events");
1156 T_END;
1157 });
1158
1159 dispatch_source_t timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER,
0a7de745 1160 0, 0, dispatch_get_main_queue());
d9a64523 1161 dispatch_source_set_timer(timer, dispatch_time(DISPATCH_TIME_NOW,
0a7de745
A
1162 HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC),
1163 HEARTBEAT_INTERVAL_SECS * NSEC_PER_SEC, 0);
d9a64523
A
1164 dispatch_source_set_cancel_handler(timer, ^{
1165 dispatch_release(timer);
1166 });
1167
1168 dispatch_source_set_event_handler(timer, ^{
1169 heartbeats++;
1170
1171 T_LOG("heartbeat %u at time %lld, seen %" PRIu64 " events, "
0a7de745
A
1172 "current event time %lld", heartbeats, mach_absolute_time(),
1173 events, current_timestamp);
d9a64523
A
1174
1175 if (current_timestamp > 0) {
0a7de745
A
1176 T_EXPECT_GT(current_timestamp, last_timestamp,
1177 "event timestamps should be increasing");
1178 T_QUIET; T_EXPECT_GT(events, last_events,
1179 "number of events should be increasing");
d9a64523
A
1180 }
1181
1182 last_timestamp = current_timestamp;
1183 last_events = events;
1184
1185 if (heartbeats >= HEARTBEAT_COUNT) {
0a7de745
A
1186 T_LOG("ending trace after %u heartbeats", HEARTBEAT_COUNT);
1187 ktrace_end(s, 0);
d9a64523
A
1188 }
1189 });
1190
1191 int error = ktrace_start(s, dispatch_get_main_queue());
1192 T_ASSERT_POSIX_ZERO(error, "started tracing");
1193
1194 dispatch_activate(timer);
1195
1196 dispatch_main();
1197}
0a7de745
A
1198
1199static unsigned int
1200set_nevents(unsigned int nevents)
1201{
1202 T_QUIET;
1203 T_ASSERT_POSIX_SUCCESS(sysctl(
1204 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETBUF, (int)nevents }, 4,
1205 NULL, 0, NULL, 0), "set kdebug buffer size");
1206
1207 T_QUIET;
1208 T_ASSERT_POSIX_SUCCESS(sysctl(
1209 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDSETUP, (int)nevents }, 4,
1210 NULL, 0, NULL, 0), "setup kdebug buffers");
1211
1212 kbufinfo_t bufinfo = { 0 };
1213 T_QUIET;
1214 T_ASSERT_POSIX_SUCCESS(sysctl(
1215 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDGETBUF }, 3,
1216 &bufinfo, &(size_t){ sizeof(bufinfo) }, NULL, 0),
1217 "get kdebug buffer size");
1218
1219 T_QUIET;
1220 T_ASSERT_POSIX_SUCCESS(sysctl(
1221 (int[]){ CTL_KERN, KERN_KDEBUG, KERN_KDREMOVE }, 3,
1222 NULL, 0, NULL, 0),
1223 "remove kdebug buffers");
1224
1225 return (unsigned int)bufinfo.nkdbufs;
1226}
1227
1228T_DECL(set_buffer_size, "ensure large buffer sizes can be set")
1229{
cb323159
A
1230 start_controlling_ktrace();
1231
0a7de745
A
1232 uint64_t memsize = 0;
1233 T_QUIET; T_ASSERT_POSIX_SUCCESS(sysctlbyname("hw.memsize", &memsize,
1234 &(size_t){ sizeof(memsize) }, NULL, 0), "get memory size");
1235
1236 /*
1237 * Try to allocate up to one-eighth of available memory towards
1238 * tracing.
1239 */
1240 uint64_t maxevents_u64 = memsize / 8 / sizeof(kd_buf);
1241 if (maxevents_u64 > UINT32_MAX) {
1242 maxevents_u64 = UINT32_MAX;
1243 }
1244 unsigned int maxevents = (unsigned int)maxevents_u64;
1245
1246 unsigned int minevents = set_nevents(0);
1247 T_ASSERT_GT(minevents, 0, "saw non-zero minimum event count of %u",
1248 minevents);
1249
1250 unsigned int step = ((maxevents - minevents - 1) / 4);
1251 T_ASSERT_GT(step, 0, "stepping by %u events", step);
1252
1253 for (unsigned int i = minevents + step; i < maxevents; i += step) {
1254 unsigned int actualevents = set_nevents(i);
1255 T_ASSERT_GE(actualevents, i - minevents,
1256 "%u events in kernel when %u requested", actualevents, i);
1257 }
1258}
cb323159
A
1259
1260static void *
1261donothing(__unused void *arg)
1262{
1263 return NULL;
1264}
1265
1266T_DECL(long_names, "ensure long command names are reported")
1267{
1268 start_controlling_ktrace();
1269
1270 char longname[] = "thisisaverylongprocessname!";
1271 char *longname_ptr = longname;
1272 static_assert(sizeof(longname) > 16,
1273 "the name should be longer than MAXCOMLEN");
1274
1275 int ret = sysctlbyname("kern.procname", NULL, NULL, longname,
1276 sizeof(longname));
1277 T_ASSERT_POSIX_SUCCESS(ret,
1278 "use sysctl kern.procname to lengthen the name");
1279
1280 ktrace_session_t ktsess = ktrace_session_create();
1281
1282 /*
1283 * 32-bit kernels can only trace 16 bytes of the string in their event
1284 * arguments.
1285 */
1286 if (!ktrace_is_kernel_64_bit(ktsess)) {
1287 longname[16] = '\0';
1288 }
1289
1290 ktrace_filter_pid(ktsess, getpid());
1291
1292 __block bool saw_newthread = false;
1293 ktrace_events_single(ktsess, TRACE_STRING_NEWTHREAD,
1294 ^(struct trace_point *tp) {
1295 if (ktrace_get_pid_for_thread(ktsess, tp->threadid) ==
1296 getpid()) {
1297 saw_newthread = true;
1298
1299 char argname[32] = {};
1300 strncat(argname, (char *)&tp->arg1, sizeof(tp->arg1));
1301 strncat(argname, (char *)&tp->arg2, sizeof(tp->arg2));
1302 strncat(argname, (char *)&tp->arg3, sizeof(tp->arg3));
1303 strncat(argname, (char *)&tp->arg4, sizeof(tp->arg4));
1304
1305 T_EXPECT_EQ_STR((char *)argname, longname_ptr,
1306 "process name of new thread should be long");
1307
1308 ktrace_end(ktsess, 1);
1309 }
1310 });
1311
1312 ktrace_set_completion_handler(ktsess, ^{
1313 ktrace_session_destroy(ktsess);
1314 T_EXPECT_TRUE(saw_newthread,
1315 "should have seen the new thread");
1316 T_END;
1317 });
1318
1319 int error = ktrace_start(ktsess, dispatch_get_main_queue());
1320 T_ASSERT_POSIX_ZERO(error, "started tracing");
1321
1322 pthread_t thread = NULL;
1323 error = pthread_create(&thread, NULL, donothing, NULL);
1324 T_ASSERT_POSIX_ZERO(error, "create new thread");
1325
1326 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
1327 dispatch_get_main_queue(), ^{
1328 ktrace_end(ktsess, 0);
1329 });
1330
1331 error = pthread_join(thread, NULL);
1332 T_ASSERT_POSIX_ZERO(error, "join to thread");
1333
1334 dispatch_main();
1335}
1336
1337T_DECL(continuous_time, "make sure continuous time status can be queried",
1338 T_META_RUN_CONCURRENTLY(true))
1339{
1340 bool cont_time = kdebug_using_continuous_time();
1341 T_ASSERT_FALSE(cont_time, "should not be using continuous time yet");
1342}
1343
1344static const uint32_t frame_eventid = KDBG_EVENTID(DBG_BSD,
1345 DBG_BSD_KDEBUG_TEST, 1);
1346
1347static ktrace_session_t
1348future_events_session(void)
1349{
1350 ktrace_session_t ktsess = ktrace_session_create();
1351 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(ktsess, "failed to create session");
1352
1353 ktrace_events_single(ktsess, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
1354 ^(struct trace_point *tp __unused) {
1355 T_FAIL("saw future test event from IOP");
1356 });
1357 ktrace_events_single(ktsess, frame_eventid, ^(struct trace_point *tp) {
1358 if (tp->debugid & DBG_FUNC_START) {
1359 T_LOG("saw start event");
1360 } else {
1361 T_LOG("saw event traced after trying to trace future event, ending");
1362 ktrace_end(ktsess, 1);
1363 }
1364 });
1365
1366 ktrace_set_collection_interval(ktsess, 100);
1367 return ktsess;
1368}
1369
1370T_DECL(future_iop_events,
1371 "make sure IOPs cannot trace events in the future while live tracing")
1372{
1373 start_controlling_ktrace();
1374 ktrace_session_t ktsess = future_events_session();
1375 ktrace_set_completion_handler(ktsess, ^{
1376 ktrace_session_destroy(ktsess);
1377 T_END;
1378 });
1379
1380 T_ASSERT_POSIX_ZERO(ktrace_start(ktsess, dispatch_get_main_queue()),
1381 "start tracing");
1382 kdebug_trace(frame_eventid | DBG_FUNC_START, 0, 0, 0, 0);
1383 assert_kdebug_test(KDBG_TEST_FUTURE_TIMES);
1384 kdebug_trace(frame_eventid | DBG_FUNC_END, 0, 0, 0, 0);
1385
1386 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 5 * NSEC_PER_SEC),
1387 dispatch_get_main_queue(), ^{
1388 T_FAIL("ending tracing after timeout");
1389 ktrace_end(ktsess, 0);
1390 });
1391
1392 dispatch_main();
1393}
1394
1395T_DECL(future_iop_events_disabled,
1396 "make sure IOPs cannot trace events in the future after disabling tracing")
1397{
1398 start_controlling_ktrace();
1399 ktrace_session_t ktsess = future_events_session();
1400 T_ASSERT_POSIX_ZERO(ktrace_configure(ktsess), "configure tracing");
1401
1402 kdebug_trace(frame_eventid | DBG_FUNC_START, 0, 0, 0, 0);
1403 assert_kdebug_test(KDBG_TEST_FUTURE_TIMES);
1404 kdebug_trace(frame_eventid | DBG_FUNC_END, 0, 0, 0, 0);
1405
1406 T_ASSERT_POSIX_ZERO(ktrace_disable_configured(ktsess),
1407 "disable tracing");
1408 ktrace_session_destroy(ktsess);
1409
1410 ktsess = future_events_session();
1411 T_QUIET;
1412 T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(ktsess), "use existing trace");
1413 ktrace_set_completion_handler(ktsess, ^{
1414 ktrace_session_destroy(ktsess);
1415 T_END;
1416 });
1417
1418 T_ASSERT_POSIX_ZERO(ktrace_start(ktsess, dispatch_get_main_queue()),
1419 "start tracing existing session");
1420
1421 dispatch_main();
1422}
1423
1424T_DECL(iop_events_disable,
1425 "make sure IOP events are flushed before disabling trace")
1426{
1427 start_controlling_ktrace();
1428 ktrace_session_t ktsess = future_events_session();
1429
1430 assert_kdebug_test(KDBG_TEST_IOP_SYNC_FLUSH);
1431 T_ASSERT_POSIX_ZERO(ktrace_configure(ktsess), "configure tracing");
1432
1433 kdebug_trace(frame_eventid | DBG_FUNC_START, 0, 0, 0, 0);
1434
1435 T_ASSERT_POSIX_ZERO(ktrace_disable_configured(ktsess),
1436 "disable tracing");
1437 ktrace_session_destroy(ktsess);
1438
1439 ktsess = ktrace_session_create();
1440 T_QUIET; T_WITH_ERRNO;
1441 T_ASSERT_NOTNULL(ktsess, "create session");
1442
1443 ktrace_events_single(ktsess,
1444 KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0xff),
1445 ^(struct trace_point *tp __unused) {
1446 T_PASS("saw IOP event from sync flush");
1447 });
1448
1449 T_QUIET;
1450 T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(ktsess), "use existing trace");
1451 ktrace_set_completion_handler(ktsess, ^{
1452 ktrace_session_destroy(ktsess);
1453 T_END;
1454 });
1455
1456 T_ASSERT_POSIX_ZERO(ktrace_start(ktsess, dispatch_get_main_queue()),
1457 "start tracing existing session");
1458
1459 dispatch_main();
1460}
f427ee49
A
1461
1462T_DECL(lookup_long_paths,
1463 "lookup long path names")
1464{
1465 start_controlling_ktrace();
1466
1467 int ret = chdir("/tmp");
1468 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to /tmp");
1469 const char *dir = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/";
1470 int i = 0;
1471 do {
1472 ret = mkdir(dir, S_IRUSR | S_IWUSR | S_IXUSR);
1473 if (ret >= 0 || errno != EEXIST) {
1474 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "mkdir of %d nested directory",
1475 i);
1476 }
1477 ret = chdir(dir);
1478 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "chdir to %d nested directory", i);
1479 } while (i++ < 40);
1480
1481 ktrace_session_t s = ktrace_session_create();
1482 ktrace_set_collection_interval(s, 250);
1483 ktrace_filter_pid(s, getpid());
1484 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "created session");
1485 ktrace_events_single(s, VFS_LOOKUP, ^(struct trace_point *tp __unused){});
1486 ktrace_set_vnode_paths_enabled(s, KTRACE_FEATURE_ENABLED);
1487
1488 dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test", 0);
1489
1490 ktrace_set_completion_handler(s, ^{
1491 dispatch_release(q);
1492 T_END;
1493 });
1494
1495 int error = ktrace_start(s, q);
1496 T_ASSERT_POSIX_ZERO(error, "started tracing");
1497
1498 int fd = open("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb", O_RDWR | O_CREAT);
1499 T_ASSERT_POSIX_SUCCESS(fd, "opened file at %d directories deep", i);
1500
1501 sleep(5);
1502
1503 T_LOG("ending tracing");
1504 ktrace_end(s, 0);
1505 dispatch_main();
1506}
1507
1508#pragma mark - boot tracing
1509
1510static const char *expected_subsystems[] = {
1511 "tunables", "locks_early", "kprintf", "pmap_steal", "vm_kernel",
1512 "kmem", "kmem_alloc", "zalloc",
1513 /* "percpu", only has a startup phase on Intel */
1514 "locks", "codesigning", "oslog", "early_boot",
1515};
1516#define EXPECTED_SUBSYSTEMS_LEN \
1517 (sizeof(expected_subsystems) / sizeof(expected_subsystems[0]))
1518
1519T_DECL(early_boot_tracing, "ensure early boot strings are present",
1520 T_META_BOOTARGS_SET("trace=100000"))
1521{
1522 T_ATEND(reset_ktrace);
1523
1524 T_SETUPBEGIN;
1525 ktrace_session_t s = ktrace_session_create();
1526 ktrace_set_collection_interval(s, 250);
1527 int error = ktrace_set_use_existing(s);
1528 T_ASSERT_POSIX_ZERO(error, "use existing trace buffer");
1529
1530#if defined(__x86_64__)
1531#define FIRST_EVENT_STRING "i386_init"
1532#else /* defined(__x86_64__) */
1533#define FIRST_EVENT_STRING "kernel_startup_bootstrap"
1534#endif /* !defined(__x86_64__) */
1535
1536 __block bool seen_event = false;
1537 __block unsigned int cur_subsystem = 0;
1538 ktrace_events_single(s, TRACE_INFO_STRING, ^(struct trace_point *tp) {
1539 char early_str[33] = "";
1540 size_t argsize = ktrace_is_kernel_64_bit(s) ? 8 : 4;
1541 memcpy(early_str, &tp->arg1, argsize);
1542 memcpy(early_str + argsize, &tp->arg2, argsize);
1543 memcpy(early_str + argsize * 2, &tp->arg3, argsize);
1544 memcpy(early_str + argsize * 3, &tp->arg4, argsize);
1545
1546 if (!seen_event) {
1547 T_LOG("found first string event with args: "
1548 "0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64 ", 0x%" PRIx64,
1549 tp->arg1, tp->arg2, tp->arg3, tp->arg4);
1550 char expect_str[33] = FIRST_EVENT_STRING;
1551 if (!ktrace_is_kernel_64_bit(s)) {
1552 // Only the first 16 bytes of the string will be traced.
1553 expect_str[16] = '\0';
1554 }
1555
1556 T_EXPECT_EQ_STR(early_str, expect_str,
1557 "first event in boot trace should be the bootstrap message");
1558 }
1559 seen_event = true;
1560
1561 if (strcmp(early_str, expected_subsystems[cur_subsystem]) == 0) {
1562 T_LOG("found log for subsystem %s",
1563 expected_subsystems[cur_subsystem]);
1564 cur_subsystem++;
1565 }
1566
1567 if (cur_subsystem == EXPECTED_SUBSYSTEMS_LEN) {
1568 T_LOG("ending after seeing all expected logs");
1569 ktrace_end(s, 1);
1570 }
1571 });
1572
1573 ktrace_set_completion_handler(s, ^{
1574 T_EXPECT_TRUE(seen_event, "should see an early boot string event");
1575 T_EXPECT_TRUE(cur_subsystem == EXPECTED_SUBSYSTEMS_LEN,
1576 "should see logs from all subsystems");
1577 if (cur_subsystem != EXPECTED_SUBSYSTEMS_LEN) {
1578 T_LOG("missing log for %s", expected_subsystems[cur_subsystem]);
1579 }
1580 T_END;
1581 });
1582
1583 error = ktrace_start(s, dispatch_get_main_queue());
1584 T_ASSERT_POSIX_ZERO(error, "started tracing");
1585
1586 T_SETUPEND;
1587
1588 dispatch_main();
1589}
1590
1591T_DECL(typefilter_boot_arg, "ensure typefilter is set up correctly at boot",
1592 T_META_BOOTARGS_SET("trace=100000 trace_typefilter=S0x0c00,C0xfe"))
1593{
1594 T_ATEND(reset_ktrace);
1595
1596 T_SETUPBEGIN;
1597 ktrace_config_t config = ktrace_config_create_current();
1598 T_QUIET; T_WITH_ERRNO;
1599 T_ASSERT_NOTNULL(config, "create config from current system");
1600 T_SETUPEND;
1601
1602 T_LOG("ktrace configuration:");
1603 ktrace_config_print_description(config, stdout);
1604
1605 uint8_t *typefilt = ktrace_config_kdebug_get_typefilter(config);
1606 T_ASSERT_NOTNULL(typefilt, "typefilter is active");
1607 T_EXPECT_TRUE(typefilt[0x0c00 / 8],
1608 "specified subclass is set in typefilter");
1609 T_MAYFAIL; // rdar://63625062 (UTD converts commas in boot-args to spaces)
1610 T_EXPECT_TRUE(typefilt[0xfeed / 8],
1611 "specified class is set in typefilter");
1612
1613 ktrace_config_destroy(config);
1614}
1615
1616#pragma mark - events present
1617
1618static int recvd_sigchild = 0;
1619static void
1620sighandler(int sig)
1621{
1622 if (sig != SIGCHLD) {
1623 T_ASSERT_FAIL("unexpected signal: %d", sig);
1624 }
1625 recvd_sigchild = 1;
1626}
1627
1628T_DECL(instrs_and_cycles_on_proc_exit,
1629 "instructions and cycles should be traced on thread exit",
1630 T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1631{
1632 T_SETUPBEGIN;
1633 start_controlling_ktrace();
1634 int error;
1635 struct rusage_info_v4 *rusage = calloc(1, sizeof(*rusage));
1636 char *args[] = { "ls", "-l", NULL, };
1637 int status;
1638 dispatch_queue_t q = dispatch_queue_create("com.apple.kdebug-test",
1639 DISPATCH_QUEUE_SERIAL);
1640 T_QUIET; T_ASSERT_POSIX_SUCCESS(signal(SIGCHLD, sighandler),
1641 "register signal handler");
1642
1643 ktrace_session_t s = ktrace_session_create();
1644 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1645
1646 __block pid_t pid;
1647 __block bool seen_event = false;
1648 __block uint64_t proc_instrs = 0;
1649 __block uint64_t proc_cycles = 0;
1650 __block uint64_t proc_sys_time = 0;
1651 __block uint64_t proc_usr_time = 0;
1652 error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_PROC_EXIT,
1653 ^(ktrace_event_t tp){
1654 if (tp->pid == pid) {
1655 seen_event = true;
1656 proc_instrs = tp->arg1;
1657 proc_cycles = tp->arg2;
1658 proc_sys_time = tp->arg3;
1659 proc_usr_time = tp->arg4;
1660 ktrace_end(s, 1);
1661 }
1662 });
1663 T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(error, "trace single event");
1664 ktrace_set_completion_handler(s, ^{
1665 // TODO Check for equality once rdar://61948669 is fixed.
1666 T_ASSERT_GE(proc_instrs, rusage->ri_instructions,
1667 "trace event instrs are >= to rusage instrs");
1668 T_ASSERT_GE(proc_cycles, rusage->ri_cycles,
1669 "trace event cycles are >= to rusage cycles");
1670 T_ASSERT_GE(proc_sys_time, rusage->ri_system_time,
1671 "trace event sys time is >= rusage sys time");
1672 T_ASSERT_GE(proc_usr_time, rusage->ri_user_time,
1673 "trace event usr time >= rusage usr time");
1674 T_EXPECT_TRUE(seen_event, "should see the proc exit trace event");
1675
1676 free(rusage);
1677 ktrace_session_destroy(s);
1678 dispatch_release(q);
1679 T_END;
1680 });
1681 error = ktrace_start(s, q);
1682 T_ASSERT_POSIX_ZERO(error, "start tracing");
1683 T_SETUPEND;
1684
1685 extern char **environ;
1686 status = posix_spawnp(&pid, args[0], NULL, NULL, args, environ);
1687 T_QUIET; T_ASSERT_POSIX_SUCCESS(status, "spawn process");
1688 if (status == 0) {
1689 while (!recvd_sigchild) {
1690 pause();
1691 }
1692 error = proc_pid_rusage(pid, RUSAGE_INFO_V4, (rusage_info_t)rusage);
1693 T_QUIET; T_ASSERT_POSIX_ZERO(error, "rusage");
1694 error = waitpid(pid, &status, 0);
1695 T_QUIET; T_ASSERT_POSIX_SUCCESS(error, "waitpid");
1696 }
1697 dispatch_main();
1698}
1699
1700#define NO_OF_THREADS 2
1701
1702struct thread_counters_info {
1703 uint64_t counts[2]; //cycles and/or instrs
1704 uint64_t cpu_time;
1705 uint64_t thread_id;
1706};
1707typedef struct thread_counters_info *tc_info_t;
1708
1709static void*
1710get_thread_counters(void* ptr)
1711{
1712 extern int thread_selfcounts(int type, void *buf, size_t nbytes);
1713 extern uint64_t __thread_selfusage(void);
1714 extern uint64_t __thread_selfid(void);
1715 tc_info_t tc_info = (tc_info_t) ptr;
1716 tc_info->thread_id = __thread_selfid();
1717 // Just to increase the instr, cycle count
1718 T_LOG("printing %llu\n", tc_info->thread_id);
1719 tc_info->cpu_time = __thread_selfusage();
1720 thread_selfcounts(1, tc_info->counts, sizeof(tc_info->counts));
1721 return NULL;
1722}
1723
1724T_DECL(instrs_and_cycles_on_thread_exit,
1725 "instructions and cycles should be traced on thread exit",
1726 T_META_REQUIRES_SYSCTL_EQ("kern.monotonic.supported", 1))
1727{
1728 T_SETUPBEGIN;
1729 start_controlling_ktrace();
1730
1731 int error;
1732 pthread_t *threads = calloc((unsigned int)(NO_OF_THREADS),
1733 sizeof(pthread_t));
1734 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(threads, "calloc(%d threads)",
1735 NO_OF_THREADS);
1736 tc_info_t tc_infos = calloc((unsigned int) (NO_OF_THREADS),
1737 sizeof(struct thread_counters_info));
1738 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(tc_infos,
1739 "calloc(%d thread counters)", NO_OF_THREADS);
1740
1741 ktrace_session_t s = ktrace_session_create();
1742 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
1743 ktrace_filter_pid(s, getpid());
1744
1745 __block int nevents = 0;
1746 error = ktrace_events_single(s, DBG_MT_INSTRS_CYCLES_THR_EXIT,
1747 ^(ktrace_event_t tp) {
1748 for (int i = 0; i < NO_OF_THREADS; i++) {
1749 if (tp->threadid == tc_infos[i].thread_id) {
1750 nevents++;
1751 uint64_t cpu_time = tp->arg3 + tp->arg4;
1752 /*
1753 * as we are getting counts before thread exit,
1754 * the counts at thread exit should be greater than
1755 * thread_selfcounts
1756 */
1757 T_ASSERT_GE(tp->arg1, tc_infos[i].counts[0],
1758 "trace event instrs are >= to thread's instrs");
1759 T_ASSERT_GE(tp->arg2, tc_infos[i].counts[1],
1760 "trace event cycles are >= to thread's cycles");
1761 T_ASSERT_GE(cpu_time, tc_infos[i].cpu_time,
1762 "trace event cpu time is >= thread's cpu time");
1763 }
1764 if (nevents == NO_OF_THREADS) {
1765 ktrace_end(s, 1);
1766 }
1767 }
1768 });
1769 T_QUIET; T_ASSERT_POSIX_ZERO(error, "trace single event");
1770 ktrace_set_completion_handler(s, ^{
1771 T_EXPECT_EQ(NO_OF_THREADS, nevents, "seen %d thread exit trace events",
1772 NO_OF_THREADS);
1773 free(tc_infos);
1774 ktrace_session_destroy(s);
1775 T_END;
1776 });
1777 error = ktrace_start(s, dispatch_get_main_queue());
1778 T_ASSERT_POSIX_ZERO(error, "start tracing");
1779
1780 for (int i = 0; i < NO_OF_THREADS; i++) {
1781 error = pthread_create(&threads[i], NULL, get_thread_counters,
1782 (void *)&tc_infos[i]);
1783 T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create thread %d", i);
1784 }
1785 T_SETUPEND;
1786 for (int i = 0; i < NO_OF_THREADS; i++) {
1787 error = pthread_join(threads[i], NULL);
1788 T_QUIET; T_EXPECT_POSIX_ZERO(error, "pthread_join thread %d", i);
1789 }
1790
1791 dispatch_main();
1792}