]>
Commit | Line | Data |
---|---|---|
39037602 A |
1 | #include <darwintest.h> |
2 | #include <dispatch/dispatch.h> | |
3 | #include <inttypes.h> | |
5ba3f43e A |
4 | #include <ktrace/session.h> |
5 | #include <ktrace/private.h> | |
39037602 A |
6 | #include <mach/dyld_kernel.h> |
7 | #include <mach/host_info.h> | |
8 | #include <mach/mach.h> | |
9 | #include <mach/mach_init.h> | |
10 | #include <mach/task.h> | |
11 | #include <os/assumes.h> | |
12 | #include <sys/kdebug.h> | |
13 | #include <sys/kdebug_signpost.h> | |
14 | #include <sys/sysctl.h> | |
15 | ||
5ba3f43e A |
16 | #define KDBG_TEST_MACROS 1 |
17 | #define KDBG_TEST_OLD_TIMES 2 | |
18 | ||
19 | static void | |
20 | assert_kdebug_test(unsigned int flavor) | |
21 | { | |
22 | size_t size = flavor; | |
23 | int mib[] = { CTL_KERN, KERN_KDEBUG, KERN_KDTEST }; | |
24 | T_ASSERT_POSIX_SUCCESS( | |
25 | sysctl(mib, sizeof(mib) / sizeof(mib[0]), NULL, &size, NULL, 0), | |
26 | "KERN_KDTEST sysctl"); | |
27 | } | |
28 | ||
29 | #pragma mark kdebug syscalls | |
39037602 A |
30 | |
31 | #define TRACE_DEBUGID (0xfedfed00U) | |
32 | ||
33 | T_DECL(kdebug_trace_syscall, "test that kdebug_trace(2) emits correct events", | |
813fb2f6 | 34 | T_META_ASROOT(true)) |
39037602 A |
35 | { |
36 | ktrace_session_t s; | |
39037602 A |
37 | __block int events_seen = 0; |
38 | ||
39 | s = ktrace_session_create(); | |
40 | os_assert(s != NULL); | |
41 | ||
42 | ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){}); | |
43 | ktrace_events_single(s, TRACE_DEBUGID, ^void(struct trace_point *tp) { | |
44 | events_seen++; | |
45 | T_PASS("saw traced event"); | |
46 | ||
47 | T_EXPECT_EQ(tp->arg1, 1UL, "argument 1 of traced event is correct"); | |
48 | T_EXPECT_EQ(tp->arg2, 2UL, "argument 2 of traced event is correct"); | |
49 | T_EXPECT_EQ(tp->arg3, 3UL, "argument 3 of traced event is correct"); | |
50 | T_EXPECT_EQ(tp->arg4, 4UL, "argument 4 of traced event is correct"); | |
51 | ||
52 | ktrace_end(s, 1); | |
53 | }); | |
54 | ||
55 | ktrace_set_completion_handler(s, ^(void) { | |
56 | T_EXPECT_GE(events_seen, 1, NULL); | |
57 | ktrace_session_destroy(s); | |
58 | T_END; | |
59 | }); | |
60 | ||
61 | ktrace_filter_pid(s, getpid()); | |
62 | ||
63 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
64 | T_ASSERT_POSIX_SUCCESS(kdebug_trace(TRACE_DEBUGID, 1, 2, 3, 4), NULL); | |
65 | ktrace_end(s, 0); | |
66 | ||
67 | dispatch_main(); | |
68 | } | |
69 | ||
70 | #define SIGNPOST_SINGLE_CODE (0x10U) | |
71 | #define SIGNPOST_PAIRED_CODE (0x20U) | |
72 | ||
73 | T_DECL(kdebug_signpost_syscall, | |
74 | "test that kdebug_signpost(2) emits correct events", | |
813fb2f6 | 75 | T_META_ASROOT(true)) |
39037602 A |
76 | { |
77 | ktrace_session_t s; | |
78 | __block int single_seen = 0; | |
79 | __block int paired_seen = 0; | |
39037602 A |
80 | |
81 | s = ktrace_session_create(); | |
82 | T_ASSERT_NOTNULL(s, NULL); | |
83 | ||
84 | /* make sure to get enough events for the KDBUFWAIT to trigger */ | |
85 | // ktrace_events_class(s, DBG_MACH, ^(__unused struct trace_point *tp){}); | |
86 | ktrace_events_single(s, | |
87 | APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_SINGLE_CODE), | |
88 | ^void(struct trace_point *tp) | |
89 | { | |
90 | single_seen++; | |
91 | T_PASS("single signpost is traced"); | |
92 | ||
93 | T_EXPECT_EQ(tp->arg1, 1UL, "argument 1 of single signpost is correct"); | |
94 | T_EXPECT_EQ(tp->arg2, 2UL, "argument 2 of single signpost is correct"); | |
95 | T_EXPECT_EQ(tp->arg3, 3UL, "argument 3 of single signpost is correct"); | |
96 | T_EXPECT_EQ(tp->arg4, 4UL, "argument 4 of single signpost is correct"); | |
97 | }); | |
98 | ||
99 | ktrace_events_single_paired(s, | |
100 | APPSDBG_CODE(DBG_APP_SIGNPOST, SIGNPOST_PAIRED_CODE), | |
101 | ^void(struct trace_point *start, struct trace_point *end) | |
102 | { | |
103 | paired_seen++; | |
104 | T_PASS("paired signposts are traced"); | |
105 | ||
106 | T_EXPECT_EQ(start->arg1, 5UL, "argument 1 of start signpost is correct"); | |
107 | T_EXPECT_EQ(start->arg2, 6UL, "argument 2 of start signpost is correct"); | |
108 | T_EXPECT_EQ(start->arg3, 7UL, "argument 3 of start signpost is correct"); | |
109 | T_EXPECT_EQ(start->arg4, 8UL, "argument 4 of start signpost is correct"); | |
110 | ||
111 | T_EXPECT_EQ(end->arg1, 9UL, "argument 1 of end signpost is correct"); | |
112 | T_EXPECT_EQ(end->arg2, 10UL, "argument 2 of end signpost is correct"); | |
113 | T_EXPECT_EQ(end->arg3, 11UL, "argument 3 of end signpost is correct"); | |
114 | T_EXPECT_EQ(end->arg4, 12UL, "argument 4 of end signpost is correct"); | |
115 | ||
116 | T_EXPECT_EQ(single_seen, 1, | |
117 | "signposts are traced in the correct order"); | |
118 | ||
119 | ktrace_end(s, 1); | |
120 | }); | |
121 | ||
122 | ktrace_set_completion_handler(s, ^(void) { | |
123 | if (single_seen == 0) { | |
124 | T_FAIL("did not see single tracepoint before timeout"); | |
125 | } | |
126 | if (paired_seen == 0) { | |
127 | T_FAIL("did not see paired tracepoints before timeout"); | |
128 | } | |
129 | ktrace_session_destroy(s); | |
130 | T_END; | |
131 | }); | |
132 | ||
133 | ktrace_filter_pid(s, getpid()); | |
134 | ||
135 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
136 | ||
137 | T_EXPECT_POSIX_SUCCESS(kdebug_signpost( | |
138 | SIGNPOST_SINGLE_CODE, 1, 2, 3, 4), NULL); | |
139 | T_EXPECT_POSIX_SUCCESS(kdebug_signpost_start( | |
140 | SIGNPOST_PAIRED_CODE, 5, 6, 7, 8), NULL); | |
141 | T_EXPECT_POSIX_SUCCESS(kdebug_signpost_end( | |
142 | SIGNPOST_PAIRED_CODE, 9, 10, 11, 12), NULL); | |
143 | ktrace_end(s, 0); | |
144 | ||
145 | dispatch_main(); | |
146 | } | |
147 | ||
5ba3f43e A |
148 | #pragma mark kdebug behaviors |
149 | ||
39037602 A |
150 | #define WRAPPING_EVENTS_COUNT (150000) |
151 | #define TRACE_ITERATIONS (5000) | |
152 | #define WRAPPING_EVENTS_THRESHOLD (100) | |
153 | ||
5ba3f43e | 154 | T_DECL(wrapping, |
39037602 | 155 | "ensure that wrapping traces lost events and no events prior to the wrap", |
813fb2f6 | 156 | T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) |
39037602 A |
157 | { |
158 | ktrace_session_t s; | |
159 | __block int events = 0; | |
160 | int mib[4]; | |
161 | size_t needed; | |
162 | kbufinfo_t buf_info; | |
163 | int wait_wrapping_secs = (WRAPPING_EVENTS_COUNT / TRACE_ITERATIONS) + 5; | |
164 | int current_secs = wait_wrapping_secs; | |
165 | ||
166 | /* use sysctls manually to bypass libktrace assumptions */ | |
167 | ||
168 | mib[0] = CTL_KERN; mib[1] = KERN_KDEBUG; mib[2] = KERN_KDSETUP; mib[3] = 0; | |
169 | needed = 0; | |
170 | T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, NULL, &needed, NULL, 0), | |
171 | "KERN_KDSETUP"); | |
172 | ||
173 | mib[2] = KERN_KDSETBUF; mib[3] = WRAPPING_EVENTS_COUNT; | |
174 | T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDSETBUF"); | |
175 | ||
176 | mib[2] = KERN_KDENABLE; mib[3] = 1; | |
177 | T_ASSERT_POSIX_SUCCESS(sysctl(mib, 4, NULL, 0, NULL, 0), "KERN_KDENABLE"); | |
178 | ||
179 | /* wrapping is on by default */ | |
180 | ||
181 | /* wait until wrapped */ | |
182 | T_LOG("waiting for trace to wrap"); | |
183 | mib[2] = KERN_KDGETBUF; | |
184 | needed = sizeof(buf_info); | |
185 | do { | |
186 | sleep(1); | |
187 | for (int i = 0; i < TRACE_ITERATIONS; i++) { | |
188 | T_QUIET; | |
189 | T_ASSERT_POSIX_SUCCESS(kdebug_trace(0xfefe0000, 0, 0, 0, 0), NULL); | |
190 | } | |
191 | T_QUIET; | |
192 | T_ASSERT_POSIX_SUCCESS(sysctl(mib, 3, &buf_info, &needed, NULL, 0), | |
193 | NULL); | |
194 | } while (!(buf_info.flags & KDBG_WRAPPED) && --current_secs > 0); | |
195 | ||
196 | T_ASSERT_TRUE(buf_info.flags & KDBG_WRAPPED, | |
197 | "trace wrapped (after %d seconds within %d second timeout)", | |
198 | wait_wrapping_secs - current_secs, wait_wrapping_secs); | |
199 | ||
200 | s = ktrace_session_create(); | |
201 | T_QUIET; T_ASSERT_NOTNULL(s, NULL); | |
202 | T_QUIET; T_ASSERT_POSIX_ZERO(ktrace_set_use_existing(s), NULL); | |
203 | ||
204 | ktrace_events_all(s, ^void(struct trace_point *tp) { | |
205 | if (events == 0) { | |
206 | T_EXPECT_EQ(tp->debugid, (unsigned int)TRACE_LOST_EVENTS, | |
207 | "first event's debugid 0x%08x (%s) should be TRACE_LOST_EVENTS", | |
208 | tp->debugid, | |
209 | ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK)); | |
210 | } else { | |
211 | T_QUIET; | |
212 | T_EXPECT_NE(tp->debugid, (unsigned int)TRACE_LOST_EVENTS, | |
213 | "event debugid 0x%08x (%s) should not be TRACE_LOST_EVENTS", | |
214 | tp->debugid, | |
215 | ktrace_name_for_eventid(s, tp->debugid & KDBG_EVENTID_MASK)); | |
216 | } | |
217 | ||
218 | events++; | |
219 | if (events > WRAPPING_EVENTS_THRESHOLD) { | |
220 | ktrace_end(s, 1); | |
221 | } | |
222 | }); | |
223 | ||
224 | ktrace_set_completion_handler(s, ^(void) { | |
225 | ktrace_session_destroy(s); | |
226 | T_END; | |
227 | }); | |
228 | ||
229 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
230 | ||
231 | dispatch_main(); | |
232 | } | |
233 | ||
5ba3f43e A |
234 | T_DECL(reject_old_events, |
235 | "ensure that kdebug rejects events from before tracing began", | |
236 | T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) | |
237 | { | |
238 | __block uint64_t event_horizon_ts; | |
239 | __block int events = 0; | |
240 | ||
241 | ktrace_session_t s = ktrace_session_create(); | |
242 | T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); | |
243 | ||
244 | ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0), | |
245 | KDBG_EVENTID(DBG_BSD + 1, 0, 0), | |
246 | ^(struct trace_point *tp) | |
247 | { | |
248 | events++; | |
249 | T_EXPECT_GT(tp->timestamp, event_horizon_ts, | |
250 | "events in trace should be from after tracing began"); | |
251 | }); | |
252 | ||
253 | ktrace_set_completion_handler(s, ^{ | |
254 | T_EXPECT_EQ(events, 2, "should see only two events"); | |
255 | ktrace_session_destroy(s); | |
256 | T_END; | |
257 | }); | |
258 | ||
259 | event_horizon_ts = mach_absolute_time(); | |
260 | ||
261 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
262 | /* first, try an old event at the beginning of trace */ | |
263 | assert_kdebug_test(KDBG_TEST_OLD_TIMES); | |
264 | /* after a good event has been traced, old events should be rejected */ | |
265 | assert_kdebug_test(KDBG_TEST_OLD_TIMES); | |
266 | ktrace_end(s, 0); | |
267 | ||
268 | dispatch_main(); | |
269 | } | |
270 | ||
271 | #define ORDERING_TIMEOUT_SEC 5 | |
272 | ||
273 | T_DECL(ascending_time_order, | |
274 | "ensure that kdebug events are in ascending order based on time", | |
275 | T_META_ASROOT(true), T_META_CHECK_LEAKS(false)) | |
276 | { | |
277 | __block uint64_t prev_ts = 0; | |
278 | __block uint32_t prev_debugid = 0; | |
279 | __block unsigned int prev_cpu = 0; | |
280 | __block bool in_order = true; | |
281 | ||
282 | ktrace_session_t s = ktrace_session_create(); | |
283 | T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); | |
284 | ||
285 | ktrace_events_all(s, ^(struct trace_point *tp) { | |
286 | if (tp->timestamp < prev_ts) { | |
287 | in_order = false; | |
288 | T_FAIL("found timestamps out of order"); | |
289 | T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)", | |
290 | prev_ts, prev_debugid, prev_cpu); | |
291 | T_LOG("%" PRIu64 ": %#" PRIx32 " (cpu %d)", | |
292 | tp->timestamp, tp->debugid, tp->cpuid); | |
293 | } | |
294 | }); | |
295 | ||
296 | ktrace_set_completion_handler(s, ^{ | |
297 | ktrace_session_destroy(s); | |
298 | T_EXPECT_TRUE(in_order, "event timestamps were in-order"); | |
299 | T_END; | |
300 | }); | |
301 | ||
302 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
303 | ||
304 | /* try to inject old timestamps into trace */ | |
305 | assert_kdebug_test(KDBG_TEST_OLD_TIMES); | |
306 | ||
307 | dispatch_after(dispatch_time(DISPATCH_TIME_NOW, ORDERING_TIMEOUT_SEC * NSEC_PER_SEC), | |
308 | dispatch_get_main_queue(), ^{ | |
309 | T_LOG("ending test after timeout"); | |
310 | ktrace_end(s, 1); | |
311 | }); | |
312 | ||
313 | dispatch_main(); | |
314 | ||
315 | } | |
316 | ||
317 | #pragma mark dyld tracing | |
318 | ||
39037602 A |
319 | __attribute__((aligned(8))) |
320 | static const char map_uuid[16] = "map UUID"; | |
321 | ||
322 | __attribute__((aligned(8))) | |
323 | static const char unmap_uuid[16] = "unmap UUID"; | |
324 | ||
325 | __attribute__((aligned(8))) | |
326 | static const char sc_uuid[16] = "shared UUID"; | |
327 | ||
328 | static fsid_t map_fsid = { .val = { 42, 43 } }; | |
329 | static fsid_t unmap_fsid = { .val = { 44, 45 } }; | |
330 | static fsid_t sc_fsid = { .val = { 46, 47 } }; | |
331 | ||
332 | static fsobj_id_t map_fsobjid = { .fid_objno = 42, .fid_generation = 43 }; | |
333 | static fsobj_id_t unmap_fsobjid = { .fid_objno = 44, .fid_generation = 45 }; | |
334 | static fsobj_id_t sc_fsobjid = { .fid_objno = 46, .fid_generation = 47 }; | |
335 | ||
336 | #define MAP_LOAD_ADDR 0xabadcafe | |
337 | #define UNMAP_LOAD_ADDR 0xfeedface | |
338 | #define SC_LOAD_ADDR 0xfedfaced | |
339 | ||
340 | __unused | |
341 | static void | |
342 | expect_dyld_image_info(struct trace_point *tp, const uint64_t *exp_uuid, | |
343 | uint64_t exp_load_addr, fsid_t *exp_fsid, fsobj_id_t *exp_fsobjid, | |
344 | int order) | |
345 | { | |
346 | #if defined(__LP64__) | |
347 | if (order == 0) { | |
348 | uint64_t uuid[2]; | |
349 | uint64_t load_addr; | |
350 | fsid_t fsid; | |
351 | ||
352 | uuid[0] = (uint64_t)tp->arg1; | |
353 | uuid[1] = (uint64_t)tp->arg2; | |
354 | load_addr = (uint64_t)tp->arg3; | |
355 | fsid.val[0] = (int32_t)(tp->arg4 & UINT32_MAX); | |
356 | fsid.val[1] = (int32_t)((uint64_t)tp->arg4 >> 32); | |
357 | ||
358 | T_QUIET; T_EXPECT_EQ(uuid[0], exp_uuid[0], NULL); | |
359 | T_QUIET; T_EXPECT_EQ(uuid[1], exp_uuid[1], NULL); | |
360 | T_QUIET; T_EXPECT_EQ(load_addr, exp_load_addr, NULL); | |
361 | T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL); | |
362 | T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL); | |
363 | } else if (order == 1) { | |
364 | fsobj_id_t fsobjid; | |
365 | ||
366 | fsobjid.fid_objno = (uint32_t)(tp->arg1 & UINT32_MAX); | |
367 | fsobjid.fid_generation = (uint32_t)((uint64_t)tp->arg1 >> 32); | |
368 | ||
369 | T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL); | |
370 | T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation, | |
371 | exp_fsobjid->fid_generation, NULL); | |
372 | } else { | |
373 | T_ASSERT_FAIL("unrecognized order of events %d", order); | |
374 | } | |
375 | #else /* defined(__LP64__) */ | |
376 | if (order == 0) { | |
377 | uint32_t uuid[4]; | |
378 | ||
379 | uuid[0] = (uint32_t)tp->arg1; | |
380 | uuid[1] = (uint32_t)tp->arg2; | |
381 | uuid[2] = (uint32_t)tp->arg3; | |
382 | uuid[3] = (uint32_t)tp->arg4; | |
383 | ||
384 | T_QUIET; T_EXPECT_EQ(uuid[0], (uint32_t)exp_uuid[0], NULL); | |
385 | T_QUIET; T_EXPECT_EQ(uuid[1], (uint32_t)(exp_uuid[0] >> 32), NULL); | |
386 | T_QUIET; T_EXPECT_EQ(uuid[2], (uint32_t)exp_uuid[1], NULL); | |
387 | T_QUIET; T_EXPECT_EQ(uuid[3], (uint32_t)(exp_uuid[1] >> 32), NULL); | |
388 | } else if (order == 1) { | |
389 | uint32_t load_addr; | |
390 | fsid_t fsid; | |
391 | fsobj_id_t fsobjid; | |
392 | ||
393 | load_addr = (uint32_t)tp->arg1; | |
394 | fsid.val[0] = (int32_t)tp->arg2; | |
395 | fsid.val[1] = (int32_t)tp->arg3; | |
396 | fsobjid.fid_objno = (uint32_t)tp->arg4; | |
397 | ||
398 | T_QUIET; T_EXPECT_EQ(load_addr, (uint32_t)exp_load_addr, NULL); | |
399 | T_QUIET; T_EXPECT_EQ(fsid.val[0], exp_fsid->val[0], NULL); | |
400 | T_QUIET; T_EXPECT_EQ(fsid.val[1], exp_fsid->val[1], NULL); | |
401 | T_QUIET; T_EXPECT_EQ(fsobjid.fid_objno, exp_fsobjid->fid_objno, NULL); | |
402 | } else if (order == 2) { | |
403 | fsobj_id_t fsobjid; | |
404 | ||
405 | fsobjid.fid_generation = tp->arg1; | |
406 | ||
407 | T_QUIET; T_EXPECT_EQ(fsobjid.fid_generation, | |
408 | exp_fsobjid->fid_generation, NULL); | |
409 | } else { | |
410 | T_ASSERT_FAIL("unrecognized order of events %d", order); | |
411 | } | |
412 | #endif /* defined(__LP64__) */ | |
413 | } | |
414 | ||
415 | #if defined(__LP64__) | |
416 | #define DYLD_CODE_OFFSET (0) | |
417 | #define DYLD_EVENTS (2) | |
418 | #else | |
419 | #define DYLD_CODE_OFFSET (2) | |
420 | #define DYLD_EVENTS (3) | |
421 | #endif | |
422 | ||
423 | static void | |
424 | expect_dyld_events(ktrace_session_t s, const char *name, uint32_t base_code, | |
425 | const char *exp_uuid, uint64_t exp_load_addr, fsid_t *exp_fsid, | |
426 | fsobj_id_t *exp_fsobjid, uint8_t *saw_events) | |
427 | { | |
428 | for (int i = 0; i < DYLD_EVENTS; i++) { | |
429 | ktrace_events_single(s, | |
430 | KDBG_EVENTID(DBG_DYLD, DBG_DYLD_UUID, | |
431 | base_code + DYLD_CODE_OFFSET + (unsigned int)i), | |
432 | ^(struct trace_point *tp) | |
433 | { | |
434 | T_LOG("checking %s event %c", name, 'A' + i); | |
435 | expect_dyld_image_info(tp, (const void *)exp_uuid, exp_load_addr, | |
436 | exp_fsid, exp_fsobjid, i); | |
437 | *saw_events |= (1U << i); | |
438 | }); | |
439 | } | |
440 | } | |
441 | ||
442 | T_DECL(dyld_events, "test that dyld registering libraries emits events", | |
813fb2f6 | 443 | T_META_ASROOT(true)) |
39037602 A |
444 | { |
445 | ktrace_session_t s; | |
446 | dyld_kernel_image_info_t info; | |
447 | ||
448 | /* | |
449 | * Use pointers instead of __block variables in order to use these variables | |
450 | * in the completion block below _and_ pass pointers to them to the | |
451 | * expect_dyld_events function. | |
452 | */ | |
453 | uint8_t saw_events[3] = { 0 }; | |
454 | uint8_t *saw_mapping = &(saw_events[0]); | |
455 | uint8_t *saw_unmapping = &(saw_events[1]); | |
456 | uint8_t *saw_shared_cache = &(saw_events[2]); | |
457 | ||
458 | s = ktrace_session_create(); | |
459 | T_ASSERT_NOTNULL(s, NULL); | |
813fb2f6 | 460 | T_ASSERT_POSIX_ZERO(ktrace_filter_pid(s, getpid()), NULL); |
39037602 A |
461 | |
462 | expect_dyld_events(s, "mapping", DBG_DYLD_UUID_MAP_A, map_uuid, | |
463 | MAP_LOAD_ADDR, &map_fsid, &map_fsobjid, saw_mapping); | |
464 | expect_dyld_events(s, "unmapping", DBG_DYLD_UUID_UNMAP_A, unmap_uuid, | |
465 | UNMAP_LOAD_ADDR, &unmap_fsid, &unmap_fsobjid, saw_unmapping); | |
466 | expect_dyld_events(s, "shared cache", DBG_DYLD_UUID_SHARED_CACHE_A, | |
467 | sc_uuid, SC_LOAD_ADDR, &sc_fsid, &sc_fsobjid, saw_shared_cache); | |
468 | ||
469 | ktrace_set_completion_handler(s, ^(void) { | |
470 | T_EXPECT_EQ(__builtin_popcount(*saw_mapping), DYLD_EVENTS, NULL); | |
471 | T_EXPECT_EQ(__builtin_popcount(*saw_unmapping), DYLD_EVENTS, NULL); | |
472 | T_EXPECT_EQ(__builtin_popcount(*saw_shared_cache), DYLD_EVENTS, NULL); | |
473 | ktrace_session_destroy(s); | |
474 | T_END; | |
475 | }); | |
476 | ||
477 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
478 | ||
479 | info.load_addr = MAP_LOAD_ADDR; | |
480 | memcpy(info.uuid, map_uuid, sizeof(info.uuid)); | |
481 | info.fsid = map_fsid; | |
482 | info.fsobjid = map_fsobjid; | |
483 | T_EXPECT_MACH_SUCCESS(task_register_dyld_image_infos(mach_task_self(), | |
484 | &info, 1), NULL); | |
485 | ||
486 | info.load_addr = UNMAP_LOAD_ADDR; | |
487 | memcpy(info.uuid, unmap_uuid, sizeof(info.uuid)); | |
488 | info.fsid = unmap_fsid; | |
489 | info.fsobjid = unmap_fsobjid; | |
490 | T_EXPECT_MACH_SUCCESS(task_unregister_dyld_image_infos(mach_task_self(), | |
491 | &info, 1), NULL); | |
492 | ||
493 | info.load_addr = SC_LOAD_ADDR; | |
494 | memcpy(info.uuid, sc_uuid, sizeof(info.uuid)); | |
495 | info.fsid = sc_fsid; | |
496 | info.fsobjid = sc_fsobjid; | |
497 | T_EXPECT_MACH_SUCCESS(task_register_dyld_shared_cache_image_info( | |
498 | mach_task_self(), info, FALSE, FALSE), NULL); | |
499 | ||
500 | ktrace_end(s, 0); | |
501 | ||
502 | dispatch_main(); | |
503 | } | |
504 | ||
5ba3f43e A |
505 | #pragma mark kdebug kernel macros |
506 | ||
39037602 A |
507 | #define EXP_KERNEL_EVENTS 5U |
508 | ||
509 | static const uint32_t dev_evts[EXP_KERNEL_EVENTS] = { | |
510 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 0), | |
511 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 1), | |
512 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 2), | |
513 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 3), | |
514 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 4), | |
515 | }; | |
516 | ||
517 | static const uint32_t rel_evts[EXP_KERNEL_EVENTS] = { | |
518 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 5), | |
519 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 6), | |
520 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 7), | |
521 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 8), | |
522 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 9), | |
523 | }; | |
524 | ||
525 | static const uint32_t filt_evts[EXP_KERNEL_EVENTS] = { | |
526 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 10), | |
527 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 11), | |
528 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 12), | |
529 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 13), | |
530 | BSDDBG_CODE(DBG_BSD_KDEBUG_TEST, 14), | |
531 | }; | |
532 | ||
533 | static bool | |
534 | is_development_kernel(void) | |
535 | { | |
536 | static dispatch_once_t is_development_once; | |
537 | static bool is_development; | |
538 | ||
539 | dispatch_once(&is_development_once, ^(void) { | |
813fb2f6 A |
540 | int dev; |
541 | size_t dev_size = sizeof(dev); | |
39037602 | 542 | |
813fb2f6 A |
543 | T_QUIET; |
544 | T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.development", &dev, | |
545 | &dev_size, NULL, 0), NULL); | |
546 | is_development = (dev != 0); | |
39037602 A |
547 | }); |
548 | ||
549 | return is_development; | |
550 | } | |
551 | ||
39037602 A |
552 | static void |
553 | expect_event(struct trace_point *tp, unsigned int *events, | |
554 | const uint32_t *event_ids, size_t event_ids_len) | |
555 | { | |
556 | unsigned int event_idx = *events; | |
557 | bool event_found = false; | |
558 | size_t i; | |
559 | for (i = 0; i < event_ids_len; i++) { | |
560 | if (event_ids[i] == (tp->debugid & KDBG_EVENTID_MASK)) { | |
561 | T_LOG("found event 0x%x", tp->debugid); | |
562 | event_found = true; | |
563 | } | |
564 | } | |
565 | ||
566 | if (!event_found) { | |
567 | return; | |
568 | } | |
569 | ||
570 | *events += 1; | |
571 | for (i = 0; i < event_idx; i++) { | |
572 | T_QUIET; T_EXPECT_EQ(((uintptr_t *)&tp->arg1)[i], (uintptr_t)i + 1, | |
573 | NULL); | |
574 | } | |
575 | for (; i < 4; i++) { | |
576 | T_QUIET; T_EXPECT_EQ(((uintptr_t *)&tp->arg1)[i], (uintptr_t)0, NULL); | |
577 | } | |
578 | } | |
579 | ||
580 | static void | |
581 | expect_release_event(struct trace_point *tp, unsigned int *events) | |
582 | { | |
583 | expect_event(tp, events, rel_evts, | |
584 | sizeof(rel_evts) / sizeof(rel_evts[0])); | |
585 | } | |
586 | ||
587 | static void | |
588 | expect_development_event(struct trace_point *tp, unsigned int *events) | |
589 | { | |
590 | expect_event(tp, events, dev_evts, | |
591 | sizeof(dev_evts) / sizeof(dev_evts[0])); | |
592 | } | |
593 | ||
594 | static void | |
595 | expect_filtered_event(struct trace_point *tp, unsigned int *events) | |
596 | { | |
597 | expect_event(tp, events, filt_evts, | |
598 | sizeof(filt_evts) / sizeof(filt_evts[0])); | |
599 | } | |
600 | ||
601 | T_DECL(kernel_events, "ensure kernel macros work", | |
813fb2f6 | 602 | T_META_ASROOT(true)) |
39037602 A |
603 | { |
604 | ktrace_session_t s; | |
605 | ||
813fb2f6 | 606 | |
39037602 A |
607 | s = ktrace_session_create(); |
608 | T_QUIET; T_ASSERT_NOTNULL(s, NULL); | |
609 | ||
610 | __block unsigned int dev_seen = 0; | |
611 | __block unsigned int rel_seen = 0; | |
612 | __block unsigned int filt_seen = 0; | |
613 | ktrace_events_range(s, KDBG_EVENTID(DBG_BSD, DBG_BSD_KDEBUG_TEST, 0), | |
614 | KDBG_EVENTID(DBG_BSD + 1, 0, 0), | |
615 | ^(struct trace_point *tp) | |
616 | { | |
617 | expect_development_event(tp, &dev_seen); | |
618 | expect_release_event(tp, &rel_seen); | |
619 | expect_filtered_event(tp, &filt_seen); | |
620 | }); | |
621 | ||
622 | ktrace_set_completion_handler(s, ^(void) { | |
813fb2f6 A |
623 | /* |
624 | * Development-only events are only filtered if running on an embedded | |
625 | * OS. | |
626 | */ | |
5ba3f43e A |
627 | unsigned int dev_exp; |
628 | #if TARGET_OS_EMBEDDED | |
629 | dev_exp = is_development_kernel() ? EXP_KERNEL_EVENTS : 0U; | |
630 | #else | |
813fb2f6 | 631 | dev_exp = EXP_KERNEL_EVENTS; |
5ba3f43e | 632 | #endif |
813fb2f6 A |
633 | |
634 | T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, | |
635 | "release and development events seen"); | |
636 | T_EXPECT_EQ(dev_seen, dev_exp, "development-only events seen/not seen"); | |
5ba3f43e | 637 | T_EXPECT_EQ(filt_seen, dev_exp, "filter-only events seen"); |
39037602 A |
638 | ktrace_session_destroy(s); |
639 | T_END; | |
640 | }); | |
641 | ||
642 | ktrace_filter_pid(s, getpid()); | |
643 | ||
644 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
5ba3f43e | 645 | assert_kdebug_test(KDBG_TEST_MACROS); |
39037602 A |
646 | |
647 | ktrace_end(s, 0); | |
648 | ||
649 | dispatch_main(); | |
650 | } | |
651 | ||
652 | T_DECL(kernel_events_filtered, "ensure that the filtered kernel macros work", | |
813fb2f6 | 653 | T_META_ASROOT(true)) |
39037602 A |
654 | { |
655 | ktrace_session_t s; | |
656 | ||
657 | s = ktrace_session_create(); | |
658 | T_QUIET; T_ASSERT_NOTNULL(s, NULL); | |
659 | ||
660 | __block unsigned int dev_seen = 0; | |
661 | __block unsigned int rel_seen = 0; | |
662 | __block unsigned int filt_seen = 0; | |
663 | ktrace_events_all(s, ^(struct trace_point *tp) { | |
664 | expect_development_event(tp, &dev_seen); | |
665 | expect_release_event(tp, &rel_seen); | |
666 | /* to make sure no filtered events are emitted */ | |
667 | expect_filtered_event(tp, &filt_seen); | |
668 | }); | |
669 | ||
670 | ktrace_set_completion_handler(s, ^(void) { | |
671 | ktrace_session_destroy(s); | |
672 | ||
673 | T_EXPECT_EQ(rel_seen, EXP_KERNEL_EVENTS, NULL); | |
5ba3f43e A |
674 | #if defined(__arm__) || defined(__arm64__) |
675 | T_EXPECT_EQ(dev_seen, is_development_kernel() ? EXP_KERNEL_EVENTS : 0U, | |
676 | NULL); | |
677 | #else | |
39037602 | 678 | T_EXPECT_EQ(dev_seen, EXP_KERNEL_EVENTS, NULL); |
5ba3f43e | 679 | #endif /* defined(__arm__) || defined(__arm64__) */ |
39037602 A |
680 | T_EXPECT_EQ(filt_seen, 0U, NULL); |
681 | T_END; | |
682 | }); | |
683 | ||
684 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
5ba3f43e | 685 | assert_kdebug_test(KDBG_TEST_MACROS); |
39037602 A |
686 | |
687 | ktrace_end(s, 0); | |
688 | ||
689 | dispatch_main(); | |
690 | } |