]> git.saurik.com Git - apple/xnu.git/blob - tools/tests/darwintests/kdebug.c
xnu-3789.51.2.tar.gz
[apple/xnu.git] / tools / tests / darwintests / kdebug.c
1 #include <darwintest.h>
2 #include <dispatch/dispatch.h>
3 #include <inttypes.h>
4 #include <ktrace.h>
5 #include <ktrace_private.h>
6 #include <mach/dyld_kernel.h>
7 #include <mach/host_info.h>
8 #include <mach/mach.h>
9 #include <mach/mach_init.h>
10 #include <mach/task.h>
11 #include <os/assumes.h>
12 #include <sys/kdebug.h>
13 #include <sys/kdebug_signpost.h>
14 #include <sys/sysctl.h>
15
16 #define KTRACE_WAIT_TIMEOUT_S (10)
17
18 #define TRACE_DEBUGID (0xfedfed00U)
19
20 T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events",
21 T_META_ASROOT(true))
22 {
23 ktrace_session_t s;
24 dispatch_time_t timeout;
25 __block int events_seen = 0;
26
27 s = ktrace_session_create();
28 os_assert(s != NULL);
29
30 ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
31 ktrace_events_single(s, TRACE_DEBUGID, ^void(struct trace_point *tp) {
32 events_seen++;
33 T_PASS("saw traced event");
34
35 T_EXPECT_EQ(tp->arg1, 1UL, "argument 1 of traced event is correct");
36 T_EXPECT_EQ(tp->arg2, 2UL, "argument 2 of traced event is correct");
37 T_EXPECT_EQ(tp->arg3, 3UL, "argument 3 of traced event is correct");
38 T_EXPECT_EQ(tp->arg4, 4UL, "argument 4 of traced event is correct");
39
40 ktrace_end(s, 1);
41 });
42
43 ktrace_set_completion_handler(s, ^(void) {
44 T_EXPECT_GE(events_seen, 1, NULL);
45 ktrace_session_destroy(s);
46 T_END;
47 });
48
49 ktrace_filter_pid(s, getpid());
50
51 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
52 T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 1, 2, 3, 4), NULL);
53 ktrace_end(s, 0);
54
55 dispatch_main();
56 }
57
58 #define SIGNPOST_SINGLE_CODE (0x10U)
59 #define SIGNPOST_PAIRED_CODE (0x20U)
60
61 T_DECL(kdebug_signpost_syscall,
62 "test that kdebug_signpost(2) emits correct events",
63 T_META_ASROOT(true))
64 {
65 ktrace_session_t s;
66 __block int single_seen = 0;
67 __block int paired_seen = 0;
68 dispatch_time_t timeout;
69
70 s = ktrace_session_create();
71 T_ASSERT_NOTNULL(s, NULL);
72
73 /* make sure to get enough events for the KDBUFWAIT to trigger */
74 // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){});
75 ktrace_events_single(s,
76 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE),
77 ^void(struct trace_point *tp)
78 {
79 single_seen++;
80 T_PASS("single signpost is traced");
81
82 T_EXPECT_EQ(tp->arg1, 1UL, "argument 1 of single signpost is correct");
83 T_EXPECT_EQ(tp->arg2, 2UL, "argument 2 of single signpost is correct");
84 T_EXPECT_EQ(tp->arg3, 3UL, "argument 3 of single signpost is correct");
85 T_EXPECT_EQ(tp->arg4, 4UL, "argument 4 of single signpost is correct");
86 });
87
88 ktrace_events_single_paired(s,
89 APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE),
90 ^void(struct trace_point *start, struct trace_point *end)
91 {
92 paired_seen++;
93 T_PASS("paired signposts are traced");
94
95 T_EXPECT_EQ(start->arg1, 5UL, "argument 1 of start signpost is correct");
96 T_EXPECT_EQ(start->arg2, 6UL, "argument 2 of start signpost is correct");
97 T_EXPECT_EQ(start->arg3, 7UL, "argument 3 of start signpost is correct");
98 T_EXPECT_EQ(start->arg4, 8UL, "argument 4 of start signpost is correct");
99
100 T_EXPECT_EQ(end->arg1, 9UL, "argument 1 of end signpost is correct");
101 T_EXPECT_EQ(end->arg2, 10UL, "argument 2 of end signpost is correct");
102 T_EXPECT_EQ(end->arg3, 11UL, "argument 3 of end signpost is correct");
103 T_EXPECT_EQ(end->arg4, 12UL, "argument 4 of end signpost is correct");
104
105 T_EXPECT_EQ(single_seen, 1,
106 "signposts are traced in the correct order");
107
108 ktrace_end(s, 1);
109 });
110
111 ktrace_set_completion_handler(s, ^(void) {
112 if (single_seen == 0) {
113 T_FAIL("did not see single tracepoint before timeout");
114 }
115 if (paired_seen == 0) {
116 T_FAIL("did not see paired tracepoints before timeout");
117 }
118 ktrace_session_destroy(s);
119 T_END;
120 });
121
122 ktrace_filter_pid(s, getpid());
123
124 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
125
126 T_EXPECT_POSIX_SUCCESS(kdebug_signpost(
127 SIGNPOST_SINGLE_CODE, 1, 2, 3, 4), NULL);
128 T_EXPECT_POSIX_SUCCESS(kdebug_signpost_start(
129 SIGNPOST_PAIRED_CODE, 5, 6, 7, 8), NULL);
130 T_EXPECT_POSIX_SUCCESS(kdebug_signpost_end(
131 SIGNPOST_PAIRED_CODE, 9, 10, 11, 12), NULL);
132 ktrace_end(s, 0);
133
134 dispatch_main();
135 }
136
137 #define WRAPPING_EVENTS_COUNT (150000)
138 #define TRACE_ITERATIONS (5000)
139 #define WRAPPING_EVENTS_THRESHOLD (100)
140
141 T_DECL(kdebug_wrapping,
142 "ensure that wrapping traces lost events and no events prior to the wrap",
143 T_META_ASROOT(true), T_META_CHECK_LEAKS(false))
144 {
145 ktrace_session_t s;
146 __block int events = 0;
147 int mib[4];
148 size_t needed;
149 kbufinfo_t buf_info;
150 int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5;
151 int current_secs = wait_wrapping_secs;
152
153 /* use sysctls manually to bypass libktrace assumptions */
154
155 mib[0] = CTL_KERN; mib[1] = KERN_KDEBUG; mib[2] = KERN_KDSETUP; mib[3] = 0;
156 needed = 0;
157 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0),
158 "KERN_KDSETUP");
159
160 mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT;
161 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF");
162
163 mib[2] = KERN_KDENABLE; mib[3] = 1;
164 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE");
165
166 /* wrapping is on by default */
167
168 /* wait until wrapped */
169 T_LOG("waiting for trace to wrap");
170 mib[2] = KERN_KDGETBUF;
171 needed = sizeof(buf_info);
172 do {
173 sleep(1);
174 for (int i = 0; i < TRACE_ITERATIONS; i++) {
175 T_QUIET;
176 T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL);
177 }
178 T_QUIET;
179 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0),
180 NULL);
181 } while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0);
182
183 T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED,
184 "trace wrapped (after %d seconds within %d second timeout)",
185 wait_wrapping_secs - current_secs, wait_wrapping_secs);
186
187 s = ktrace_session_create();
188 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
189 T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL);
190
191 ktrace_events_all(s, ^void(struct trace_point *tp) {
192 if (events == 0) {
193 T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
194 "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS",
195 tp->debugid,
196 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
197 } else {
198 T_QUIET;
199 T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS,
200 "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS",
201 tp->debugid,
202 ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK));
203 }
204
205 events++;
206 if (events > WRAPPING_EVENTS_THRESHOLD) {
207 ktrace_end(s, 1);
208 }
209 });
210
211 ktrace_set_completion_handler(s, ^(void) {
212 ktrace_session_destroy(s);
213 T_END;
214 });
215
216 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
217
218 dispatch_main();
219 }
220
221 __attribute__((aligned(8)))
222 static const char map_uuid[16] = "map UUID";
223
224 __attribute__((aligned(8)))
225 static const char unmap_uuid[16] = "unmap UUID";
226
227 __attribute__((aligned(8)))
228 static const char sc_uuid[16] = "shared UUID";
229
230 static fsid_t map_fsid = { .val = { 42, 43 } };
231 static fsid_t unmap_fsid = { .val = { 44, 45 } };
232 static fsid_t sc_fsid = { .val = { 46, 47 } };
233
234 static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 };
235 static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 };
236 static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 };
237
238 #define MAP_LOAD_ADDR 0xabadcafe
239 #define UNMAP_LOAD_ADDR 0xfeedface
240 #define SC_LOAD_ADDR 0xfedfaced
241
242 __unused
243 static void
244 expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid,
245 uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid,
246 int order)
247 {
248 #if defined(__LP64__)
249 if (order == 0) {
250 uint64_t uuid[2];
251 uint64_t load_addr;
252 fsid_t fsid;
253
254 uuid[0] = (uint64_t)tp->arg1;
255 uuid[1] = (uint64_t)tp->arg2;
256 load_addr = (uint64_t)tp->arg3;
257 fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX);
258 fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32);
259
260 T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL);
261 T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL);
262 T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL);
263 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
264 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
265 } else if (order == 1) {
266 fsobj_id_t fsobjid;
267
268 fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX);
269 fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32);
270
271 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
272 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
273 exp_fsobjid->fid_generation, NULL);
274 } else {
275 T_ASSERT_FAIL("unrecognized order of events %d", order);
276 }
277 #else /* defined(__LP64__) */
278 if (order == 0) {
279 uint32_t uuid[4];
280
281 uuid[0] = (uint32_t)tp->arg1;
282 uuid[1] = (uint32_t)tp->arg2;
283 uuid[2] = (uint32_t)tp->arg3;
284 uuid[3] = (uint32_t)tp->arg4;
285
286 T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL);
287 T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL);
288 T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL);
289 T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL);
290 } else if (order == 1) {
291 uint32_t load_addr;
292 fsid_t fsid;
293 fsobj_id_t fsobjid;
294
295 load_addr = (uint32_t)tp->arg1;
296 fsid.val[0] = (int32_t)tp->arg2;
297 fsid.val[1] = (int32_t)tp->arg3;
298 fsobjid.fid_objno = (uint32_t)tp->arg4;
299
300 T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL);
301 T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL);
302 T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL);
303 T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL);
304 } else if (order == 2) {
305 fsobj_id_t fsobjid;
306
307 fsobjid.fid_generation = tp->arg1;
308
309 T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation,
310 exp_fsobjid->fid_generation, NULL);
311 } else {
312 T_ASSERT_FAIL("unrecognized order of events %d", order);
313 }
314 #endif /* defined(__LP64__) */
315 }
316
317 #if defined(__LP64__)
318 #define DYLD_CODE_OFFSET (0)
319 #define DYLD_EVENTS (2)
320 #else
321 #define DYLD_CODE_OFFSET (2)
322 #define DYLD_EVENTS (3)
323 #endif
324
325 static void
326 expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code,
327 const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid,
328 fsobj_id_t *exp_fsobjid, uint8_t *saw_events)
329 {
330 for (int i = 0; i < DYLD_EVENTS; i++) {
331 ktrace_events_single(s,
332 KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID,
333 base_code + DYLD_CODE_OFFSET + (unsigned int)i),
334 ^(struct trace_point *tp)
335 {
336 T_LOG("checking %s event %c", name, 'A' + i);
337 expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr,
338 exp_fsid, exp_fsobjid, i);
339 *saw_events |= (1U << i);
340 });
341 }
342 }
343
344 T_DECL(dyld_events, "test that dyld registering libraries emits events",
345 T_META_ASROOT(true))
346 {
347 ktrace_session_t s;
348 dyld_kernel_image_info_t info;
349
350 /*
351 * Use pointers instead of __block variables in order to use these variables
352 * in the completion block below _and_ pass pointers to them to the
353 * expect_dyld_events function.
354 */
355 uint8_t saw_events[3] = { 0 };
356 uint8_t *saw_mapping = &(saw_events[0]);
357 uint8_t *saw_unmapping = &(saw_events[1]);
358 uint8_t *saw_shared_cache = &(saw_events[2]);
359
360 s = ktrace_session_create();
361 T_ASSERT_NOTNULL(s, NULL);
362 T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL);
363
364 expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid,
365 MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping);
366 expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid,
367 UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping);
368 expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A,
369 sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache);
370
371 ktrace_set_completion_handler(s, ^(void) {
372 T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL);
373 T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL);
374 T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL);
375 ktrace_session_destroy(s);
376 T_END;
377 });
378
379 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
380
381 info.load_addr = MAP_LOAD_ADDR;
382 memcpy(info.uuid, map_uuid, sizeof(info.uuid));
383 info.fsid = map_fsid;
384 info.fsobjid = map_fsobjid;
385 T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(),
386 &info, 1), NULL);
387
388 info.load_addr = UNMAP_LOAD_ADDR;
389 memcpy(info.uuid, unmap_uuid, sizeof(info.uuid));
390 info.fsid = unmap_fsid;
391 info.fsobjid = unmap_fsobjid;
392 T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(),
393 &info, 1), NULL);
394
395 info.load_addr = SC_LOAD_ADDR;
396 memcpy(info.uuid, sc_uuid, sizeof(info.uuid));
397 info.fsid = sc_fsid;
398 info.fsobjid = sc_fsobjid;
399 T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info(
400 mach_task_self(), info, FALSE, FALSE), NULL);
401
402 ktrace_end(s, 0);
403
404 dispatch_main();
405 }
406
407 #define EXP_KERNEL_EVENTS 5U
408
409 static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = {
410 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0),
411 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1),
412 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2),
413 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3),
414 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4),
415 };
416
417 static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = {
418 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5),
419 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6),
420 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7),
421 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8),
422 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9),
423 };
424
425 static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = {
426 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10),
427 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11),
428 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12),
429 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13),
430 BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14),
431 };
432
433 static bool
434 is_development_kernel(void)
435 {
436 static dispatch_once_t is_development_once;
437 static bool is_development;
438
439 dispatch_once(&is_development_once, ^(void) {
440 int dev;
441 size_t dev_size = sizeof(dev);
442
443 T_QUIET;
444 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev,
445 &dev_size, NULL, 0), NULL);
446 is_development = (dev != 0);
447 });
448
449 return is_development;
450 }
451
452 static void
453 assert_kdebug_test(void)
454 {
455 int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDTEST };
456 T_ASSERT_POSIX_SUCCESS(
457 sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL, NULL, NULL, 0),
458 "KERN_KDTEST");
459 }
460
461 static void
462 expect_event(struct trace_point *tp, unsigned int *events,
463 const uint32_t *event_ids, size_t event_ids_len)
464 {
465 unsigned int event_idx = *events;
466 bool event_found = false;
467 size_t i;
468 for (i = 0; i < event_ids_len; i++) {
469 if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) {
470 T_LOG("found event 0x%x", tp->debugid);
471 event_found = true;
472 }
473 }
474
475 if (!event_found) {
476 return;
477 }
478
479 *events += 1;
480 for (i = 0; i < event_idx; i++) {
481 T_QUIET; T_EXPECT_EQ(((uintptr_t *)&tp->arg1)[i], (uintptr_t)i + 1,
482 NULL);
483 }
484 for (; i < 4; i++) {
485 T_QUIET; T_EXPECT_EQ(((uintptr_t *)&tp->arg1)[i], (uintptr_t)0, NULL);
486 }
487 }
488
489 static void
490 expect_release_event(struct trace_point *tp, unsigned int *events)
491 {
492 expect_event(tp, events, rel_evts,
493 sizeof(rel_evts) / sizeof(rel_evts[0]));
494 }
495
496 static void
497 expect_development_event(struct trace_point *tp, unsigned int *events)
498 {
499 expect_event(tp, events, dev_evts,
500 sizeof(dev_evts) / sizeof(dev_evts[0]));
501 }
502
503 static void
504 expect_filtered_event(struct trace_point *tp, unsigned int *events)
505 {
506 expect_event(tp, events, filt_evts,
507 sizeof(filt_evts) / sizeof(filt_evts[0]));
508 }
509
510 T_DECL(kernel_events, "ensure kernel macros work",
511 T_META_ASROOT(true))
512 {
513 ktrace_session_t s;
514
515
516 s = ktrace_session_create();
517 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
518
519 __block unsigned int dev_seen = 0;
520 __block unsigned int rel_seen = 0;
521 __block unsigned int filt_seen = 0;
522 ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0),
523 KDBG_EVENTID(DBG_BSD + 1, 0, 0),
524 ^(struct trace_point *tp)
525 {
526 expect_development_event(tp, &dev_seen);
527 expect_release_event(tp, &rel_seen);
528 expect_filtered_event(tp, &filt_seen);
529 });
530
531 ktrace_set_completion_handler(s, ^(void) {
532 /*
533 * Development-only events are only filtered if running on an embedded
534 * OS.
535 */
536 unsigned dev_exp;
537 dev_exp = EXP_KERNEL_EVENTS;
538
539 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS,
540 "release and development events seen");
541 T_EXPECT_EQ(dev_seen, dev_exp, "development-only events seen/not seen");
542 T_EXPECT_EQ(filt_seen, EXP_KERNEL_EVENTS, "filter-only events seen");
543 ktrace_session_destroy(s);
544 T_END;
545 });
546
547 ktrace_filter_pid(s, getpid());
548
549 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
550 assert_kdebug_test();
551
552 ktrace_end(s, 0);
553
554 dispatch_main();
555 }
556
557 T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work",
558 T_META_ASROOT(true))
559 {
560 ktrace_session_t s;
561
562 s = ktrace_session_create();
563 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
564
565 __block unsigned int dev_seen = 0;
566 __block unsigned int rel_seen = 0;
567 __block unsigned int filt_seen = 0;
568 ktrace_events_all(s, ^(struct trace_point *tp) {
569 expect_development_event(tp, &dev_seen);
570 expect_release_event(tp, &rel_seen);
571 /* to make sure no filtered events are emitted */
572 expect_filtered_event(tp, &filt_seen);
573 });
574
575 ktrace_set_completion_handler(s, ^(void) {
576 ktrace_session_destroy(s);
577
578 T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL);
579 T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS, NULL);
580 T_EXPECT_EQ(filt_seen, 0U, NULL);
581 T_END;
582 });
583
584 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
585 assert_kdebug_test();
586
587 ktrace_end(s, 0);
588
589 dispatch_main();
590 }
591