]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | #ifdef T_NAMESPACE |
2 | #undef T_NAMESPACE | |
3 | #endif /* defined(T_NAMESPACE) */ | |
4 | ||
39037602 | 5 | #include <darwintest.h> |
5ba3f43e | 6 | #include <darwintest_utils.h> |
39037602 A |
7 | #include <dispatch/dispatch.h> |
8 | #include <inttypes.h> | |
5ba3f43e A |
9 | #include <ktrace/session.h> |
10 | #include <ktrace/private.h> | |
11 | #include <System/sys/kdebug.h> | |
39037602 A |
12 | #include <kperf/kperf.h> |
13 | #include <kperfdata/kpdecode.h> | |
14 | #include <os/assumes.h> | |
15 | #include <stdint.h> | |
16 | #include <sys/sysctl.h> | |
17 | ||
18 | #include "kperf_helpers.h" | |
19 | ||
5ba3f43e A |
20 | T_GLOBAL_META( |
21 | T_META_NAMESPACE("xnu.kperf"), | |
22 | T_META_CHECK_LEAKS(false)); | |
23 | ||
24 | #define MAX_CPUS 64 | |
25 | #define MAX_THREADS 64 | |
26 | ||
27 | volatile static bool running_threads = true; | |
28 | ||
29 | static void * | |
30 | spinning_thread(void *semp) | |
31 | { | |
32 | T_QUIET; | |
33 | T_ASSERT_NOTNULL(semp, "semaphore passed to thread should not be NULL"); | |
34 | dispatch_semaphore_signal(*(dispatch_semaphore_t *)semp); | |
35 | ||
36 | while (running_threads); | |
37 | return NULL; | |
38 | } | |
39 | ||
39037602 A |
40 | #define PERF_STK_KHDR UINT32_C(0x25020014) |
41 | #define PERF_STK_UHDR UINT32_C(0x25020018) | |
5ba3f43e A |
42 | #define PERF_TMR_FIRE KDBG_EVENTID(DBG_PERF, 3, 0) |
43 | #define PERF_TMR_HNDLR KDBG_EVENTID(DBG_PERF, 3, 2) | |
44 | #define PERF_TMR_PEND KDBG_EVENTID(DBG_PERF, 3, 3) | |
45 | #define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4) | |
46 | ||
47 | #define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \ | |
48 | MACH_STACK_HANDOFF) | |
49 | #define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED) | |
50 | #define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE) | |
51 | ||
52 | #define MP_CPUS_CALL UINT32_C(0x1900004) | |
53 | ||
54 | #define DISPATCH_AFTER_EVENT UINT32_C(0xfefffffc) | |
55 | #define TIMEOUT_SECS 10 | |
56 | ||
57 | #define TIMER_PERIOD_NS (1 * NSEC_PER_MSEC) | |
58 | ||
59 | static void | |
60 | reset_ktrace(void) | |
61 | { | |
62 | kperf_reset(); | |
63 | } | |
64 | ||
65 | /* | |
66 | * Ensure that kperf is correctly IPIing CPUs that are actively scheduling by | |
67 | * bringing up threads and ensuring that threads on-core are sampled by each | |
68 | * timer fire. | |
69 | */ | |
70 | ||
71 | T_DECL(ipi_active_cpus, | |
72 | "make sure that kperf IPIs all active CPUs", | |
73 | T_META_ASROOT(true)) | |
74 | { | |
75 | int ncpus = dt_ncpu(); | |
76 | T_QUIET; | |
77 | T_ASSERT_LT(ncpus, MAX_CPUS, | |
78 | "only supports up to %d CPUs", MAX_CPUS); | |
79 | T_LOG("found %d CPUs", ncpus); | |
39037602 | 80 | |
5ba3f43e A |
81 | int nthreads = ncpus - 1; |
82 | T_QUIET; | |
83 | T_ASSERT_LT(nthreads, MAX_THREADS, | |
84 | "only supports up to %d threads", MAX_THREADS); | |
85 | ||
86 | static pthread_t threads[MAX_THREADS]; | |
87 | ||
88 | /* | |
89 | * TODO options to write this to a file and reinterpret a file... | |
90 | */ | |
91 | ||
92 | /* | |
93 | * Create threads to bring up all of the CPUs. | |
94 | */ | |
95 | ||
96 | dispatch_semaphore_t thread_spinning = dispatch_semaphore_create(0); | |
97 | ||
98 | for (int i = 0; i < nthreads; i++) { | |
99 | T_QUIET; | |
100 | T_ASSERT_POSIX_ZERO( | |
101 | pthread_create(&threads[i], NULL, &spinning_thread, | |
102 | &thread_spinning), NULL); | |
103 | dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER); | |
104 | } | |
105 | ||
106 | T_LOG("spun up %d thread%s", nthreads, nthreads == 1 ? "" : "s"); | |
107 | ||
108 | ktrace_session_t s = ktrace_session_create(); | |
109 | T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create"); | |
110 | ||
111 | dispatch_queue_t q = dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0); | |
112 | ||
113 | /* | |
114 | * Only set the timeout after we've seen an event that was traced by us. | |
115 | * This helps set a reasonable timeout after we're guaranteed to get a | |
116 | * few events. | |
117 | */ | |
118 | ||
119 | ktrace_events_single(s, DISPATCH_AFTER_EVENT, | |
120 | ^(__unused struct trace_point *tp) | |
121 | { | |
122 | dispatch_after(dispatch_time(DISPATCH_TIME_NOW, | |
123 | TIMEOUT_SECS * NSEC_PER_SEC), q, ^{ | |
124 | ktrace_end(s, 0); | |
125 | }); | |
126 | }); | |
127 | ||
128 | __block uint64_t nfires = 0; | |
129 | __block uint64_t nsamples = 0; | |
130 | static uint64_t idle_tids[MAX_CPUS] = { 0 }; | |
131 | __block int nidles = 0; | |
132 | ||
133 | ktrace_set_completion_handler(s, ^{ | |
134 | T_LOG("stopping threads"); | |
135 | ||
136 | running_threads = false; | |
137 | ||
138 | for (int i = 0; i < nthreads; i++) { | |
139 | T_QUIET; | |
140 | T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL); | |
141 | } | |
142 | ||
143 | for (int i = 0; i < nidles; i++) { | |
144 | T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]); | |
145 | } | |
146 | ||
147 | T_LOG("saw %" PRIu64 " timer fires, %" PRIu64 " samples, " | |
148 | "%g samples/fire", nfires, nsamples, | |
149 | (double)nsamples / (double)nfires); | |
150 | ||
151 | T_END; | |
152 | }); | |
153 | ||
154 | /* | |
155 | * Track which threads are running on each CPU. | |
156 | */ | |
157 | ||
158 | static uint64_t tids_on_cpu[MAX_CPUS] = { 0 }; | |
159 | ||
160 | void (^switch_cb)(struct trace_point *) = ^(struct trace_point *tp) { | |
161 | uint64_t new_thread = tp->arg2; | |
162 | // uint64_t old_thread = tp->threadid; | |
163 | ||
164 | for (int i = 0; i < nidles; i++) { | |
165 | if (idle_tids[i] == new_thread) { | |
166 | return; | |
167 | } | |
168 | } | |
169 | ||
170 | tids_on_cpu[tp->cpuid] = new_thread; | |
171 | }; | |
172 | ||
173 | ktrace_events_single(s, SCHED_SWITCH, switch_cb); | |
174 | ktrace_events_single(s, SCHED_HANDOFF, switch_cb); | |
175 | ||
176 | /* | |
177 | * Determine the thread IDs of the idle threads on each CPU. | |
178 | */ | |
179 | ||
180 | ktrace_events_single(s, SCHED_IDLE, ^(struct trace_point *tp) { | |
181 | uint64_t idle_thread = tp->threadid; | |
182 | ||
183 | tids_on_cpu[tp->cpuid] = 0; | |
184 | ||
185 | for (int i = 0; i < nidles; i++) { | |
186 | if (idle_tids[i] == idle_thread) { | |
187 | return; | |
188 | } | |
189 | } | |
190 | ||
191 | idle_tids[nidles++] = idle_thread; | |
192 | }); | |
193 | ||
194 | /* | |
195 | * On each timer fire, go through all the cores and mark any threads | |
196 | * that should be sampled. | |
197 | */ | |
198 | ||
199 | __block int last_fire_cpu = -1; | |
200 | __block uint64_t sample_missing = 0; | |
201 | static uint64_t tids_snap[MAX_CPUS] = { 0 }; | |
202 | __block int nexpected = 0; | |
203 | #if defined(__x86_64__) | |
204 | __block int xcall_from_cpu = -1; | |
205 | #endif /* defined(__x86_64__) */ | |
206 | __block uint64_t xcall_mask = 0; | |
207 | ||
208 | ktrace_events_single(s, PERF_TMR_FIRE, ^(struct trace_point *tp) { | |
209 | int last_expected = nexpected; | |
210 | nfires++; | |
211 | ||
212 | nexpected = 0; | |
213 | for (int i = 0; i < ncpus; i++) { | |
214 | uint64_t i_bit = UINT64_C(1) << i; | |
215 | if (sample_missing & i_bit) { | |
216 | T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)", | |
217 | tp->cpuid, tids_snap[i], last_fire_cpu, | |
218 | xcall_mask, last_expected); | |
219 | sample_missing &= ~i_bit; | |
220 | } | |
221 | ||
222 | if (tids_on_cpu[i] != 0) { | |
223 | tids_snap[i] = tids_on_cpu[i]; | |
224 | sample_missing |= i_bit; | |
225 | nexpected++; | |
226 | } | |
227 | } | |
228 | ||
229 | T_QUIET; | |
230 | T_ASSERT_LT((int)tp->cpuid, ncpus, "timer fire should not occur on an IOP"); | |
231 | last_fire_cpu = (int)tp->cpuid; | |
232 | #if defined(__x86_64__) | |
233 | xcall_from_cpu = (int)tp->cpuid; | |
234 | #endif /* defined(__x86_64__) */ | |
235 | }); | |
236 | ||
237 | #if defined(__x86_64__) | |
238 | /* | |
239 | * Watch for the cross-call on Intel, make sure they match what kperf | |
240 | * should be doing. | |
241 | */ | |
242 | ||
243 | ktrace_events_single(s, MP_CPUS_CALL, ^(struct trace_point *tp) { | |
244 | if (xcall_from_cpu != (int)tp->cpuid) { | |
245 | return; | |
246 | } | |
247 | ||
248 | xcall_mask = tp->arg1; | |
249 | xcall_from_cpu = -1; | |
250 | }); | |
251 | #endif /* defined(__x86_64__) */ | |
252 | ||
253 | /* | |
254 | * On the timer handler for each CPU, unset the missing sample bitmap. | |
255 | */ | |
256 | ||
257 | ktrace_events_single(s, PERF_TMR_HNDLR, ^(struct trace_point *tp) { | |
258 | nsamples++; | |
259 | if ((int)tp->cpuid > ncpus) { | |
260 | /* skip IOPs; they're not scheduling our threads */ | |
261 | return; | |
262 | } | |
263 | ||
264 | sample_missing &= ~(UINT64_C(1) << tp->cpuid); | |
265 | }); | |
266 | ||
267 | /* | |
268 | * Configure kperf and ktrace. | |
269 | */ | |
270 | ||
271 | (void)kperf_action_count_set(1); | |
272 | T_QUIET; | |
273 | T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK), | |
274 | NULL); | |
275 | (void)kperf_timer_count_set(1); | |
276 | T_QUIET; | |
277 | T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0, | |
278 | kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL); | |
279 | T_QUIET; | |
280 | T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL); | |
281 | ||
282 | T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling"); | |
283 | T_ATEND(reset_ktrace); | |
284 | ||
285 | T_ASSERT_POSIX_ZERO(ktrace_start(s, | |
286 | dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)), | |
287 | "start ktrace"); | |
288 | ||
289 | kdebug_trace(DISPATCH_AFTER_EVENT, 0, 0, 0, 0); | |
290 | ||
291 | dispatch_main(); | |
292 | } | |
293 | ||
294 | #pragma mark kdebug triggers | |
39037602 A |
295 | |
296 | #define KDEBUG_TRIGGER_TIMEOUT_NS (10 * NSEC_PER_SEC) | |
297 | ||
298 | #define NON_TRIGGER_CLASS UINT8_C(0xfd) | |
299 | #define NON_TRIGGER_SUBCLASS UINT8_C(0xff) | |
300 | #define NON_TRIGGER_CODE UINT8_C(0xff) | |
301 | ||
302 | #define NON_TRIGGER_EVENT \ | |
5ba3f43e A |
303 | (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \ |
304 | NON_TRIGGER_CODE)) | |
39037602 A |
305 | |
306 | static void | |
307 | expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids, | |
5ba3f43e | 308 | unsigned int n_debugids) |
39037602 | 309 | { |
5ba3f43e A |
310 | __block int missing_kernel_stacks = 0; |
311 | __block int missing_user_stacks = 0; | |
312 | ktrace_session_t s; | |
313 | kperf_kdebug_filter_t filter; | |
314 | ||
315 | s = ktrace_session_create(); | |
316 | T_QUIET; T_ASSERT_NOTNULL(s, NULL); | |
317 | ||
318 | ktrace_events_single(s, PERF_STK_KHDR, ^(struct trace_point *tp) { | |
319 | missing_kernel_stacks--; | |
320 | T_LOG("saw kernel stack with %lu frames, flags = %#lx", tp->arg2, | |
321 | tp->arg1); | |
322 | }); | |
323 | ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) { | |
324 | missing_user_stacks--; | |
325 | T_LOG("saw user stack with %lu frames, flags = %#lx", tp->arg2, | |
326 | tp->arg1); | |
327 | }); | |
328 | ||
329 | for (unsigned int i = 0; i < n_debugids; i++) { | |
330 | ktrace_events_single(s, debugids[i], ^(struct trace_point *tp) { | |
331 | missing_kernel_stacks++; | |
332 | missing_user_stacks++; | |
333 | T_LOG("saw event with debugid 0x%" PRIx32, tp->debugid); | |
334 | }); | |
335 | } | |
336 | ||
337 | ktrace_events_single(s, NON_TRIGGER_EVENT, | |
338 | ^(__unused struct trace_point *tp) | |
339 | { | |
340 | ktrace_end(s, 0); | |
341 | }); | |
342 | ||
343 | ktrace_set_completion_handler(s, ^{ | |
344 | T_EXPECT_LE(missing_kernel_stacks, 0, NULL); | |
345 | T_EXPECT_LE(missing_user_stacks, 0, NULL); | |
346 | ||
347 | ktrace_session_destroy(s); | |
348 | T_END; | |
349 | }); | |
350 | ||
351 | /* configure kperf */ | |
352 | ||
353 | kperf_reset(); | |
354 | ||
355 | (void)kperf_action_count_set(1); | |
356 | T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, | |
357 | KPERF_SAMPLER_KSTACK | KPERF_SAMPLER_USTACK), NULL); | |
358 | ||
359 | filter = kperf_kdebug_filter_create(); | |
360 | T_ASSERT_NOTNULL(filter, NULL); | |
361 | ||
362 | T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL); | |
363 | T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_desc(filter, filter_desc), | |
364 | NULL); | |
365 | T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL); | |
366 | kperf_kdebug_filter_destroy(filter); | |
367 | ||
368 | T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); | |
369 | ||
370 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
371 | ||
372 | /* trace the triggering debugids */ | |
373 | ||
374 | for (unsigned int i = 0; i < n_debugids; i++) { | |
375 | T_ASSERT_POSIX_SUCCESS(kdebug_trace(debugids[i], 0, 0, 0, 0), NULL); | |
376 | } | |
377 | ||
378 | T_ASSERT_POSIX_SUCCESS(kdebug_trace(NON_TRIGGER_EVENT, 0, 0, 0, 0), NULL); | |
379 | ||
380 | dispatch_after(dispatch_time(DISPATCH_TIME_NOW, KDEBUG_TRIGGER_TIMEOUT_NS), | |
381 | dispatch_get_main_queue(), ^(void) | |
382 | { | |
383 | ktrace_end(s, 1); | |
384 | }); | |
39037602 A |
385 | } |
386 | ||
387 | #define TRIGGER_CLASS UINT8_C(0xfe) | |
388 | #define TRIGGER_CLASS_END UINT8_C(0xfd) | |
389 | #define TRIGGER_SUBCLASS UINT8_C(0xff) | |
390 | #define TRIGGER_CODE UINT8_C(0) | |
391 | #define TRIGGER_DEBUGID \ | |
5ba3f43e | 392 | (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE)) |
39037602 | 393 | |
5ba3f43e A |
394 | T_DECL(kdebug_trigger_classes, |
395 | "test that kdebug trigger samples on classes", | |
396 | T_META_ASROOT(true)) | |
39037602 | 397 | { |
5ba3f43e A |
398 | const uint32_t class_debugids[] = { |
399 | KDBG_EVENTID(TRIGGER_CLASS, 1, 1), | |
400 | KDBG_EVENTID(TRIGGER_CLASS, 2, 1), | |
401 | KDBG_EVENTID(TRIGGER_CLASS_END, 1, 1) | DBG_FUNC_END, | |
402 | KDBG_EVENTID(TRIGGER_CLASS_END, 2, 1) | DBG_FUNC_END, | |
403 | }; | |
404 | ||
405 | expect_kdebug_trigger("C0xfe,C0xfdr", class_debugids, | |
406 | sizeof(class_debugids) / sizeof(class_debugids[0])); | |
407 | dispatch_main(); | |
39037602 A |
408 | } |
409 | ||
410 | T_DECL(kdebug_trigger_subclasses, | |
5ba3f43e A |
411 | "test that kdebug trigger samples on subclasses", |
412 | T_META_ASROOT(true)) | |
39037602 | 413 | { |
5ba3f43e A |
414 | const uint32_t subclass_debugids[] = { |
415 | KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 0), | |
416 | KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 1), | |
417 | KDBG_EVENTID(TRIGGER_CLASS_END, TRIGGER_SUBCLASS, 0) | DBG_FUNC_END, | |
418 | KDBG_EVENTID(TRIGGER_CLASS_END, TRIGGER_SUBCLASS, 1) | DBG_FUNC_END | |
419 | }; | |
420 | ||
421 | expect_kdebug_trigger("S0xfeff,S0xfdffr", subclass_debugids, | |
422 | sizeof(subclass_debugids) / sizeof(subclass_debugids[0])); | |
423 | dispatch_main(); | |
39037602 A |
424 | } |
425 | ||
5ba3f43e A |
426 | T_DECL(kdebug_trigger_debugids, |
427 | "test that kdebug trigger samples on debugids", | |
428 | T_META_ASROOT(true)) | |
39037602 | 429 | { |
5ba3f43e A |
430 | const uint32_t debugids[] = { |
431 | TRIGGER_DEBUGID | |
432 | }; | |
39037602 | 433 | |
5ba3f43e A |
434 | expect_kdebug_trigger("D0xfeff0000", debugids, |
435 | sizeof(debugids) / sizeof(debugids[0])); | |
436 | dispatch_main(); | |
39037602 A |
437 | } |
438 | ||
439 | /* | |
440 | * TODO Set a single function specifier filter, expect not to trigger of all | |
441 | * events from that class. | |
442 | */ | |
443 | ||
5ba3f43e A |
444 | T_DECL(kdbg_callstacks, |
445 | "test that the kdbg_callstacks samples on syscalls", | |
446 | T_META_ASROOT(true)) | |
39037602 | 447 | { |
5ba3f43e A |
448 | ktrace_session_t s; |
449 | __block bool saw_user_stack = false; | |
39037602 | 450 | |
5ba3f43e A |
451 | s = ktrace_session_create(); |
452 | T_ASSERT_NOTNULL(s, NULL); | |
39037602 | 453 | |
5ba3f43e A |
454 | /* |
455 | * Make sure BSD events are traced in order to trigger samples on syscalls. | |
456 | */ | |
457 | ktrace_events_class(s, DBG_BSD, ^void(__unused struct trace_point *tp) {}); | |
39037602 | 458 | |
5ba3f43e A |
459 | ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) { |
460 | saw_user_stack = true; | |
461 | ktrace_end(s, 1); | |
462 | }); | |
39037602 | 463 | |
5ba3f43e A |
464 | ktrace_set_completion_handler(s, ^{ |
465 | ktrace_session_destroy(s); | |
39037602 | 466 | |
5ba3f43e A |
467 | T_EXPECT_TRUE(saw_user_stack, |
468 | "saw user stack after configuring kdbg_callstacks"); | |
469 | T_END; | |
470 | }); | |
39037602 A |
471 | |
472 | #pragma clang diagnostic push | |
473 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" | |
5ba3f43e | 474 | T_ASSERT_POSIX_SUCCESS(kperf_kdbg_callstacks_set(1), NULL); |
39037602 | 475 | #pragma clang diagnostic pop |
5ba3f43e | 476 | T_ATEND(kperf_reset); |
39037602 | 477 | |
5ba3f43e | 478 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); |
39037602 | 479 | |
5ba3f43e A |
480 | dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC), |
481 | dispatch_get_main_queue(), ^(void) { | |
482 | ktrace_end(s, 1); | |
483 | }); | |
39037602 | 484 | |
5ba3f43e | 485 | dispatch_main(); |
39037602 A |
486 | } |
487 | ||
5ba3f43e | 488 | #pragma mark PET |
39037602 A |
489 | |
490 | #define STACKS_WAIT_DURATION_NS (3 * NSEC_PER_SEC) | |
491 | ||
492 | static void | |
493 | expect_stacks_traced(void (^cb)(void)) | |
494 | { | |
5ba3f43e A |
495 | ktrace_session_t s; |
496 | ||
497 | s = ktrace_session_create(); | |
498 | T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create"); | |
499 | ||
500 | __block unsigned int user_stacks = 0; | |
501 | __block unsigned int kernel_stacks = 0; | |
502 | ||
503 | ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) { | |
504 | user_stacks++; | |
505 | }); | |
506 | ktrace_events_single(s, PERF_STK_KHDR, ^(__unused struct trace_point *tp) { | |
507 | kernel_stacks++; | |
508 | }); | |
509 | ||
510 | ktrace_set_completion_handler(s, ^(void) { | |
511 | ktrace_session_destroy(s); | |
512 | T_EXPECT_GT(user_stacks, 0U, NULL); | |
513 | T_EXPECT_GT(kernel_stacks, 0U, NULL); | |
514 | cb(); | |
515 | }); | |
516 | ||
517 | T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL); | |
518 | ||
519 | T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL); | |
520 | ||
521 | dispatch_after(dispatch_time(DISPATCH_TIME_NOW, STACKS_WAIT_DURATION_NS), | |
522 | dispatch_get_main_queue(), ^(void) | |
523 | { | |
524 | kperf_reset(); | |
525 | ktrace_end(s, 0); | |
526 | }); | |
39037602 A |
527 | } |
528 | ||
529 | T_DECL(pet, "test that PET mode samples kernel and user stacks", | |
5ba3f43e | 530 | T_META_ASROOT(true)) |
39037602 | 531 | { |
5ba3f43e A |
532 | configure_kperf_stacks_timer(-1, 10); |
533 | T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); | |
39037602 | 534 | |
5ba3f43e A |
535 | expect_stacks_traced(^(void) { |
536 | T_END; | |
537 | }); | |
39037602 | 538 | |
5ba3f43e | 539 | dispatch_main(); |
39037602 A |
540 | } |
541 | ||
542 | T_DECL(lightweight_pet, | |
5ba3f43e A |
543 | "test that lightweight PET mode samples kernel and user stacks", |
544 | T_META_ASROOT(true)) | |
39037602 | 545 | { |
5ba3f43e | 546 | int set = 1; |
39037602 | 547 | |
5ba3f43e A |
548 | configure_kperf_stacks_timer(-1, 10); |
549 | T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL, | |
550 | &set, sizeof(set)), NULL); | |
551 | T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL); | |
39037602 | 552 | |
5ba3f43e A |
553 | expect_stacks_traced(^(void) { |
554 | T_END; | |
555 | }); | |
39037602 | 556 | |
5ba3f43e | 557 | dispatch_main(); |
39037602 | 558 | } |