]> git.saurik.com Git - apple/xnu.git/blob - tests/kperf.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / tests / kperf.c
1 #ifdef T_NAMESPACE
2 #undef T_NAMESPACE
3 #endif /* defined(T_NAMESPACE) */
4
5 #include <darwintest.h>
6 #include <darwintest_utils.h>
7 #include <dispatch/dispatch.h>
8 #include <inttypes.h>
9 #include <ktrace/session.h>
10 #include <ktrace/private.h>
11 #include <System/sys/kdebug.h>
12 #include <kperf/kpc.h>
13 #include <kperf/kperf.h>
14 #include <kperfdata/kpdecode.h>
15 #include <os/assumes.h>
16 #include <stdint.h>
17 #include <sys/sysctl.h>
18
19 #include "kperf_helpers.h"
20 #include "ktrace_helpers.h"
21
22 T_GLOBAL_META(
23 T_META_NAMESPACE("xnu.kperf"),
24 T_META_CHECK_LEAKS(false),
25 T_META_ASROOT(true));
26
27 #define MAX_CPUS 64
28 #define MAX_THREADS 64
29
30 volatile static bool running_threads = true;
31
32 static void *
33 spinning_thread(void *semp)
34 {
35 T_QUIET;
36 T_ASSERT_NOTNULL(semp, "semaphore passed to thread should not be NULL");
37 dispatch_semaphore_signal(*(dispatch_semaphore_t *)semp);
38
39 while (running_threads) {
40 ;
41 }
42 return NULL;
43 }
44
45 #define PERF_STK_KHDR UINT32_C(0x25020014)
46 #define PERF_STK_UHDR UINT32_C(0x25020018)
47 #define PERF_TMR_FIRE KDBG_EVENTID(DBG_PERF, 3, 0)
48 #define PERF_TMR_HNDLR KDBG_EVENTID(DBG_PERF, 3, 2)
49 #define PERF_TMR_PEND KDBG_EVENTID(DBG_PERF, 3, 3)
50 #define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4)
51 #define PERF_KPC_CONFIG KDBG_EVENTID(DBG_PERF, 6, 4)
52 #define PERF_KPC_REG KDBG_EVENTID(DBG_PERF, 6, 5)
53 #define PERF_KPC_REG32 KDBG_EVENTID(DBG_PERF, 6, 7)
54 #define PERF_INSTR_DATA KDBG_EVENTID(DBG_PERF, 1, 17)
55 #define PERF_EVENT KDBG_EVENTID(DBG_PERF, 0, 0)
56
57 #define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \
58 MACH_STACK_HANDOFF)
59 #define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED)
60 #define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE)
61
62 #define MP_CPUS_CALL UINT32_C(0x1900004)
63
64 #define DISPATCH_AFTER_EVENT UINT32_C(0xfefffffc)
65 #define TIMEOUT_SECS 10
66
67 #define TIMER_PERIOD_NS (1 * NSEC_PER_MSEC)
68
69 /*
70 * Ensure that kperf is correctly IPIing CPUs that are actively scheduling by
71 * bringing up threads and ensuring that threads on-core are sampled by each
72 * timer fire.
73 */
74
75 T_DECL(ipi_active_cpus,
76 "make sure that kperf IPIs all active CPUs")
77 {
78 start_controlling_ktrace();
79
80 int ncpus = dt_ncpu();
81 T_QUIET;
82 T_ASSERT_LT(ncpus, MAX_CPUS,
83 "only supports up to %d CPUs", MAX_CPUS);
84 T_LOG("found %d CPUs", ncpus);
85
86 int nthreads = ncpus - 1;
87 T_QUIET;
88 T_ASSERT_LT(nthreads, MAX_THREADS,
89 "only supports up to %d threads", MAX_THREADS);
90
91 static pthread_t threads[MAX_THREADS];
92
93 /*
94 * TODO options to write this to a file and reinterpret a file...
95 */
96
97 /*
98 * Create threads to bring up all of the CPUs.
99 */
100
101 dispatch_semaphore_t thread_spinning = dispatch_semaphore_create(0);
102
103 for (int i = 0; i < nthreads; i++) {
104 T_QUIET;
105 T_ASSERT_POSIX_ZERO(
106 pthread_create(&threads[i], NULL, &spinning_thread,
107 &thread_spinning), NULL);
108 dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER);
109 }
110
111 T_LOG("spun up %d thread%s", nthreads, nthreads == 1 ? "" : "s");
112
113 ktrace_session_t s = ktrace_session_create();
114 T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
115
116 dispatch_queue_t q = dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0);
117
118 /*
119 * Only set the timeout after we've seen an event that was traced by us.
120 * This helps set a reasonable timeout after we're guaranteed to get a
121 * few events.
122 */
123
124 ktrace_events_single(s, DISPATCH_AFTER_EVENT,
125 ^(__unused struct trace_point *tp)
126 {
127 dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
128 TIMEOUT_SECS * NSEC_PER_SEC), q, ^{
129 ktrace_end(s, 0);
130 });
131 });
132
133 __block uint64_t nfires = 0;
134 __block uint64_t nsamples = 0;
135 static uint64_t idle_tids[MAX_CPUS] = { 0 };
136 __block int nidles = 0;
137
138 ktrace_set_completion_handler(s, ^{
139 T_LOG("stopping threads");
140
141 running_threads = false;
142
143 for (int i = 0; i < nthreads; i++) {
144 T_QUIET;
145 T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL);
146 }
147
148 for (int i = 0; i < nidles; i++) {
149 T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]);
150 }
151
152 T_LOG("saw %" PRIu64 " timer fires, %" PRIu64 " samples, "
153 "%g samples/fire", nfires, nsamples,
154 (double)nsamples / (double)nfires);
155
156 T_END;
157 });
158
159 /*
160 * Track which threads are running on each CPU.
161 */
162
163 static uint64_t tids_on_cpu[MAX_CPUS] = { 0 };
164
165 void (^switch_cb)(struct trace_point *) = ^(struct trace_point *tp) {
166 uint64_t new_thread = tp->arg2;
167 // uint64_t old_thread = tp->threadid;
168
169 for (int i = 0; i < nidles; i++) {
170 if (idle_tids[i] == new_thread) {
171 return;
172 }
173 }
174
175 tids_on_cpu[tp->cpuid] = new_thread;
176 };
177
178 ktrace_events_single(s, SCHED_SWITCH, switch_cb);
179 ktrace_events_single(s, SCHED_HANDOFF, switch_cb);
180
181 /*
182 * Determine the thread IDs of the idle threads on each CPU.
183 */
184
185 ktrace_events_single(s, SCHED_IDLE, ^(struct trace_point *tp) {
186 uint64_t idle_thread = tp->threadid;
187
188 tids_on_cpu[tp->cpuid] = 0;
189
190 for (int i = 0; i < nidles; i++) {
191 if (idle_tids[i] == idle_thread) {
192 return;
193 }
194 }
195
196 idle_tids[nidles++] = idle_thread;
197 });
198
199 /*
200 * On each timer fire, go through all the cores and mark any threads
201 * that should be sampled.
202 */
203
204 __block int last_fire_cpu = -1;
205 __block uint64_t sample_missing = 0;
206 static uint64_t tids_snap[MAX_CPUS] = { 0 };
207 __block int nexpected = 0;
208 #if defined(__x86_64__)
209 __block int xcall_from_cpu = -1;
210 #endif /* defined(__x86_64__) */
211 __block uint64_t xcall_mask = 0;
212
213 ktrace_events_single(s, PERF_TMR_FIRE, ^(struct trace_point *tp) {
214 int last_expected = nexpected;
215 nfires++;
216
217 nexpected = 0;
218 for (int i = 0; i < ncpus; i++) {
219 uint64_t i_bit = UINT64_C(1) << i;
220 if (sample_missing & i_bit) {
221 T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)",
222 tp->cpuid, tids_snap[i], last_fire_cpu,
223 xcall_mask, last_expected);
224 sample_missing &= ~i_bit;
225 }
226
227 if (tids_on_cpu[i] != 0) {
228 tids_snap[i] = tids_on_cpu[i];
229 sample_missing |= i_bit;
230 nexpected++;
231 }
232 }
233
234 T_QUIET;
235 T_ASSERT_LT((int)tp->cpuid, ncpus, "timer fire should not occur on an IOP");
236 last_fire_cpu = (int)tp->cpuid;
237 #if defined(__x86_64__)
238 xcall_from_cpu = (int)tp->cpuid;
239 #endif /* defined(__x86_64__) */
240 });
241
242 #if defined(__x86_64__)
243 /*
244 * Watch for the cross-call on Intel, make sure they match what kperf
245 * should be doing.
246 */
247
248 ktrace_events_single(s, MP_CPUS_CALL, ^(struct trace_point *tp) {
249 if (xcall_from_cpu != (int)tp->cpuid) {
250 return;
251 }
252
253 xcall_mask = tp->arg1;
254 xcall_from_cpu = -1;
255 });
256 #endif /* defined(__x86_64__) */
257
258 /*
259 * On the timer handler for each CPU, unset the missing sample bitmap.
260 */
261
262 ktrace_events_single(s, PERF_TMR_HNDLR, ^(struct trace_point *tp) {
263 nsamples++;
264 if ((int)tp->cpuid > ncpus) {
265 /* skip IOPs; they're not scheduling our threads */
266 return;
267 }
268
269 sample_missing &= ~(UINT64_C(1) << tp->cpuid);
270 });
271
272 /*
273 * Configure kperf and ktrace.
274 */
275
276 (void)kperf_action_count_set(1);
277 T_QUIET;
278 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK),
279 NULL);
280 (void)kperf_timer_count_set(1);
281 T_QUIET;
282 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
283 kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
284 T_QUIET;
285 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
286
287 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
288
289 T_ASSERT_POSIX_ZERO(ktrace_start(s,
290 dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)),
291 "start ktrace");
292
293 kdebug_trace(DISPATCH_AFTER_EVENT, 0, 0, 0, 0);
294
295 dispatch_main();
296 }
297
298 #pragma mark kdebug triggers
299
300 #define KDEBUG_TRIGGER_TIMEOUT_NS (10 * NSEC_PER_SEC)
301
302 #define NON_TRIGGER_CLASS UINT32_C(0xfd)
303 #define NON_TRIGGER_SUBCLASS UINT32_C(0xff)
304 #define NON_TRIGGER_CODE UINT32_C(0xff)
305
306 #define NON_TRIGGER_EVENT \
307 (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \
308 NON_TRIGGER_CODE))
309
310 static void
311 expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids,
312 unsigned int n_debugids)
313 {
314 __block int missing_kernel_stacks = 0;
315 __block int missing_user_stacks = 0;
316 ktrace_session_t s;
317 kperf_kdebug_filter_t filter;
318
319 s = ktrace_session_create();
320 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
321
322 ktrace_events_single(s, PERF_STK_KHDR, ^(struct trace_point *tp) {
323 missing_kernel_stacks--;
324 T_LOG("saw kernel stack with %" PRIu64 " frames, flags = %#"
325 PRIx64, tp->arg2, tp->arg1);
326 });
327 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
328 missing_user_stacks--;
329 T_LOG("saw user stack with %" PRIu64 " frames, flags = %#"
330 PRIx64, tp->arg2, tp->arg1);
331 });
332
333 for (unsigned int i = 0; i < n_debugids; i++) {
334 ktrace_events_single(s, debugids[i], ^(struct trace_point *tp) {
335 missing_kernel_stacks++;
336 missing_user_stacks++;
337 T_LOG("saw event with debugid 0x%" PRIx32, tp->debugid);
338 });
339 }
340
341 ktrace_events_single(s, NON_TRIGGER_EVENT,
342 ^(__unused struct trace_point *tp)
343 {
344 ktrace_end(s, 0);
345 });
346
347 ktrace_set_completion_handler(s, ^{
348 T_EXPECT_LE(missing_kernel_stacks, 0, NULL);
349 T_EXPECT_LE(missing_user_stacks, 0, NULL);
350
351 ktrace_session_destroy(s);
352 T_END;
353 });
354
355 /* configure kperf */
356
357 kperf_reset();
358
359 (void)kperf_action_count_set(1);
360 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
361 KPERF_SAMPLER_KSTACK | KPERF_SAMPLER_USTACK), NULL);
362
363 filter = kperf_kdebug_filter_create();
364 T_ASSERT_NOTNULL(filter, NULL);
365
366 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
367 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_desc(filter, filter_desc),
368 NULL);
369 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
370 kperf_kdebug_filter_destroy(filter);
371
372 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
373
374 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
375
376 /* trace the triggering debugids */
377
378 for (unsigned int i = 0; i < n_debugids; i++) {
379 T_ASSERT_POSIX_SUCCESS(kdebug_trace(debugids[i], 0, 0, 0, 0), NULL);
380 }
381
382 T_ASSERT_POSIX_SUCCESS(kdebug_trace(NON_TRIGGER_EVENT, 0, 0, 0, 0), NULL);
383
384 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, KDEBUG_TRIGGER_TIMEOUT_NS),
385 dispatch_get_main_queue(), ^(void)
386 {
387 ktrace_end(s, 1);
388 });
389 }
390
391 #define TRIGGER_CLASS UINT32_C(0xfe)
392 #define TRIGGER_CLASS_END UINT32_C(0xfd)
393 #define TRIGGER_SUBCLASS UINT32_C(0xff)
394 #define TRIGGER_CODE UINT32_C(0)
395 #define TRIGGER_DEBUGID \
396 (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE))
397
398 T_DECL(kdebug_trigger_classes,
399 "test that kdebug trigger samples on classes")
400 {
401 start_controlling_ktrace();
402
403 const uint32_t class_debugids[] = {
404 KDBG_EVENTID(TRIGGER_CLASS, 1, 1),
405 KDBG_EVENTID(TRIGGER_CLASS, 2, 1),
406 KDBG_EVENTID(TRIGGER_CLASS_END, 1, 1) | DBG_FUNC_END,
407 KDBG_EVENTID(TRIGGER_CLASS_END, 2, 1) | DBG_FUNC_END,
408 };
409
410 expect_kdebug_trigger("C0xfe,C0xfdr", class_debugids,
411 sizeof(class_debugids) / sizeof(class_debugids[0]));
412 dispatch_main();
413 }
414
415 T_DECL(kdebug_trigger_subclasses,
416 "test that kdebug trigger samples on subclasses")
417 {
418 start_controlling_ktrace();
419
420 const uint32_t subclass_debugids[] = {
421 KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 0),
422 KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 1),
423 KDBG_EVENTID(TRIGGER_CLASS_END, TRIGGER_SUBCLASS, 0) | DBG_FUNC_END,
424 KDBG_EVENTID(TRIGGER_CLASS_END, TRIGGER_SUBCLASS, 1) | DBG_FUNC_END
425 };
426
427 expect_kdebug_trigger("S0xfeff,S0xfdffr", subclass_debugids,
428 sizeof(subclass_debugids) / sizeof(subclass_debugids[0]));
429 dispatch_main();
430 }
431
432 T_DECL(kdebug_trigger_debugids,
433 "test that kdebug trigger samples on debugids")
434 {
435 start_controlling_ktrace();
436
437 const uint32_t debugids[] = {
438 TRIGGER_DEBUGID
439 };
440
441 expect_kdebug_trigger("D0xfeff0000", debugids,
442 sizeof(debugids) / sizeof(debugids[0]));
443 dispatch_main();
444 }
445
446 /*
447 * TODO Set a single function specifier filter, expect not to trigger of all
448 * events from that class.
449 */
450
451 static void
452 reset_kperf(void)
453 {
454 (void)kperf_reset();
455 }
456
457 T_DECL(kdbg_callstacks,
458 "test that the kdbg_callstacks samples on syscalls")
459 {
460 start_controlling_ktrace();
461
462 ktrace_session_t s;
463 __block bool saw_user_stack = false;
464
465 s = ktrace_session_create();
466 T_ASSERT_NOTNULL(s, NULL);
467
468 /*
469 * Make sure BSD events are traced in order to trigger samples on syscalls.
470 */
471 ktrace_events_class(s, DBG_BSD, ^void (__unused struct trace_point *tp) {});
472
473 ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) {
474 saw_user_stack = true;
475 ktrace_end(s, 1);
476 });
477
478 ktrace_set_completion_handler(s, ^{
479 ktrace_session_destroy(s);
480
481 T_EXPECT_TRUE(saw_user_stack,
482 "saw user stack after configuring kdbg_callstacks");
483 T_END;
484 });
485
486 #pragma clang diagnostic push
487 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
488 T_ASSERT_POSIX_SUCCESS(kperf_kdbg_callstacks_set(1), NULL);
489 #pragma clang diagnostic pop
490 T_ATEND(reset_kperf);
491
492 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
493
494 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
495 dispatch_get_main_queue(), ^(void) {
496 ktrace_end(s, 1);
497 });
498
499 dispatch_main();
500 }
501
502 #pragma mark PET
503
504 #define STACKS_WAIT_DURATION_NS (3 * NSEC_PER_SEC)
505
506 static void
507 expect_stacks_traced(void (^cb)(void))
508 {
509 ktrace_session_t s;
510
511 s = ktrace_session_create();
512 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
513
514 __block unsigned int user_stacks = 0;
515 __block unsigned int kernel_stacks = 0;
516
517 ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) {
518 user_stacks++;
519 });
520 ktrace_events_single(s, PERF_STK_KHDR, ^(__unused struct trace_point *tp) {
521 kernel_stacks++;
522 });
523
524 ktrace_set_completion_handler(s, ^(void) {
525 ktrace_session_destroy(s);
526 T_EXPECT_GT(user_stacks, 0U, NULL);
527 T_EXPECT_GT(kernel_stacks, 0U, NULL);
528 cb();
529 });
530
531 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
532
533 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
534
535 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, STACKS_WAIT_DURATION_NS),
536 dispatch_get_main_queue(), ^(void)
537 {
538 kperf_reset();
539 ktrace_end(s, 0);
540 });
541 }
542
543 T_DECL(pet, "test that PET mode samples kernel and user stacks")
544 {
545 start_controlling_ktrace();
546
547 configure_kperf_stacks_timer(-1, 10);
548 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
549
550 expect_stacks_traced(^(void) {
551 T_END;
552 });
553
554 dispatch_main();
555 }
556
557 T_DECL(lightweight_pet,
558 "test that lightweight PET mode samples kernel and user stacks",
559 T_META_ASROOT(true))
560 {
561 start_controlling_ktrace();
562
563 int set = 1;
564
565 configure_kperf_stacks_timer(-1, 10);
566 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL,
567 &set, sizeof(set)), NULL);
568 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
569
570 expect_stacks_traced(^(void) {
571 T_END;
572 });
573
574 dispatch_main();
575 }
576
577 T_DECL(pet_stress, "repeatedly enable and disable PET mode")
578 {
579 start_controlling_ktrace();
580
581 int niters = 1000;
582 while (niters--) {
583 configure_kperf_stacks_timer(-1, 10);
584 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
585 usleep(20);
586 kperf_reset();
587 }
588 ;
589 }
590
591 T_DECL(timer_stress, "repeatedly enable and disable timers")
592 {
593 start_controlling_ktrace();
594
595 int niters = 1000;
596 while (niters--) {
597 configure_kperf_stacks_timer(-1, 1);
598 usleep(20);
599 kperf_reset();
600 }
601 ;
602 }
603
604 T_DECL(pmc_config_only, "shouldn't show PMC config events unless requested")
605 {
606 start_controlling_ktrace();
607
608 __block bool saw_kpc_config = false;
609 __block bool saw_kpc_reg = false;
610
611 ktrace_session_t s = ktrace_session_create();
612 T_ASSERT_NOTNULL(s, "ktrace_session_create");
613
614 /*
615 * Make sure BSD events are traced in order to trigger samples on syscalls.
616 */
617 ktrace_events_single(s, PERF_KPC_CONFIG,
618 ^(__unused struct trace_point *tp) {
619 saw_kpc_config = true;
620 });
621 ktrace_events_single(s, PERF_KPC_REG,
622 ^(__unused struct trace_point *tp) {
623 saw_kpc_reg = true;
624 });
625 ktrace_events_single(s, PERF_KPC_REG32,
626 ^(__unused struct trace_point *tp) {
627 saw_kpc_reg = true;
628 });
629
630 ktrace_set_completion_handler(s, ^{
631 ktrace_session_destroy(s);
632 T_EXPECT_FALSE(saw_kpc_config,
633 "should see no KPC configs without sampler enabled");
634 T_EXPECT_FALSE(saw_kpc_reg,
635 "should see no KPC registers without sampler enabled");
636 T_END;
637 });
638
639 uint32_t nconfigs = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
640 uint64_t *config = calloc(nconfigs, sizeof(*config));
641 config[0] = 0x02;
642 int ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config);
643 T_ASSERT_POSIX_SUCCESS(ret, "configured kpc");
644 T_QUIET;
645 T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_CONFIGURABLE_MASK),
646 "kpc_set_counting");
647
648 (void)kperf_action_count_set(1);
649 T_ATEND(reset_kperf);
650 T_QUIET;
651 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_PMC_CPU),
652 NULL);
653
654 (void)kperf_timer_count_set(1);
655 T_QUIET;
656 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
657 kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
658 T_QUIET;
659 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
660
661 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
662
663 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
664
665 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
666 dispatch_get_main_queue(), ^(void) {
667 ktrace_end(s, 1);
668 });
669
670 dispatch_main();
671 }
672
673 static void
674 skip_if_monotonic_unsupported(void)
675 {
676 int r;
677 int supported = 0;
678 size_t supported_size = sizeof(supported);
679
680 r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size,
681 NULL, 0);
682 if (r < 0) {
683 T_WITH_ERRNO;
684 T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
685 }
686
687 if (!supported) {
688 T_SKIP("monotonic is not supported on this platform");
689 }
690 }
691
692 #define INSTRS_CYCLES_UPPER 500
693 #define INSTRS_CYCLES_LOWER 50
694
695 T_DECL(instrs_cycles, "ensure instructions and cycles are sampled")
696 {
697 skip_if_monotonic_unsupported();
698
699 start_controlling_ktrace();
700
701 ktrace_session_t sess = ktrace_session_create();
702
703 __block uint64_t ninstrs_cycles = 0;
704 __block uint64_t nzeroes = 0;
705 ktrace_events_single(sess, PERF_INSTR_DATA,
706 ^(__unused struct trace_point *tp) {
707 ninstrs_cycles++;
708 if (tp->arg1 == 0) {
709 T_LOG("%llx (%s)\n", tp->threadid, tp->command);
710 nzeroes++;
711 }
712 if (ninstrs_cycles >= INSTRS_CYCLES_UPPER) {
713 ktrace_end(sess, 1);
714 }
715 });
716
717 ktrace_set_collection_interval(sess, 200);
718
719 ktrace_set_completion_handler(sess, ^{
720 T_EXPECT_GE(ninstrs_cycles, (uint64_t)INSTRS_CYCLES_LOWER,
721 "saw enough instructions and cycles events");
722 T_EXPECT_EQ(nzeroes, UINT64_C(0),
723 "saw no events with 0 instructions");
724 T_END;
725 });
726
727 (void)kperf_action_count_set(1);
728 T_ATEND(reset_kperf);
729 T_QUIET;
730 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
731 KPERF_SAMPLER_TH_INSTRS_CYCLES), NULL);
732
733 (void)kperf_timer_count_set(1);
734 T_QUIET;
735 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
736 kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
737 T_QUIET;
738 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
739
740 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
741
742 T_ASSERT_POSIX_ZERO(ktrace_start(sess, dispatch_get_main_queue()),
743 NULL);
744
745 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
746 dispatch_get_main_queue(), ^(void) {
747 ktrace_end(sess, 1);
748 });
749
750 dispatch_main();
751 }
752