]> git.saurik.com Git - apple/xnu.git/blob - tests/kperf.c
29ceeab7d2788f5d77bb8778b44027b4504a8323
[apple/xnu.git] / tests / kperf.c
1 #ifdef T_NAMESPACE
2 #undef T_NAMESPACE
3 #endif /* defined(T_NAMESPACE) */
4
5 #include <darwintest.h>
6 #include <darwintest_utils.h>
7 #include <dispatch/dispatch.h>
8 #include <inttypes.h>
9 #include <ktrace/session.h>
10 #include <ktrace/private.h>
11 #include <System/sys/kdebug.h>
12 #include <kperf/kpc.h>
13 #include <kperf/kperf.h>
14 #include <kperfdata/kpdecode.h>
15 #include <os/assumes.h>
16 #include <stdint.h>
17 #include <sys/sysctl.h>
18
19 #include "kperf_helpers.h"
20 #include "ktrace_helpers.h"
21
22 T_GLOBAL_META(
23 T_META_NAMESPACE("xnu.kperf"),
24 T_META_CHECK_LEAKS(false),
25 T_META_ASROOT(true));
26
27 #define MAX_CPUS 64
28 #define MAX_THREADS 64
29
30 volatile static bool running_threads = true;
31
32 static void *
33 spinning_thread(void *semp)
34 {
35 T_QUIET;
36 T_ASSERT_NOTNULL(semp, "semaphore passed to thread should not be NULL");
37 dispatch_semaphore_signal(*(dispatch_semaphore_t *)semp);
38
39 while (running_threads) {
40 ;
41 }
42 return NULL;
43 }
44
45 #define PERF_STK_KHDR UINT32_C(0x25020014)
46 #define PERF_STK_UHDR UINT32_C(0x25020018)
47 #define PERF_TMR_FIRE KDBG_EVENTID(DBG_PERF, 3, 0)
48 #define PERF_TMR_HNDLR KDBG_EVENTID(DBG_PERF, 3, 2)
49 #define PERF_TMR_PEND KDBG_EVENTID(DBG_PERF, 3, 3)
50 #define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4)
51 #define PERF_KPC_CONFIG KDBG_EVENTID(DBG_PERF, 6, 4)
52 #define PERF_KPC_REG KDBG_EVENTID(DBG_PERF, 6, 5)
53 #define PERF_KPC_REG32 KDBG_EVENTID(DBG_PERF, 6, 7)
54 #define PERF_INSTR_DATA KDBG_EVENTID(DBG_PERF, 1, 17)
55
56 #define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \
57 MACH_STACK_HANDOFF)
58 #define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED)
59 #define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE)
60
61 #define MP_CPUS_CALL UINT32_C(0x1900004)
62
63 #define DISPATCH_AFTER_EVENT UINT32_C(0xfefffffc)
64 #define TIMEOUT_SECS 10
65
66 #define TIMER_PERIOD_NS (1 * NSEC_PER_MSEC)
67
68 /*
69 * Ensure that kperf is correctly IPIing CPUs that are actively scheduling by
70 * bringing up threads and ensuring that threads on-core are sampled by each
71 * timer fire.
72 */
73
74 T_DECL(ipi_active_cpus,
75 "make sure that kperf IPIs all active CPUs")
76 {
77 start_controlling_ktrace();
78
79 int ncpus = dt_ncpu();
80 T_QUIET;
81 T_ASSERT_LT(ncpus, MAX_CPUS,
82 "only supports up to %d CPUs", MAX_CPUS);
83 T_LOG("found %d CPUs", ncpus);
84
85 int nthreads = ncpus - 1;
86 T_QUIET;
87 T_ASSERT_LT(nthreads, MAX_THREADS,
88 "only supports up to %d threads", MAX_THREADS);
89
90 static pthread_t threads[MAX_THREADS];
91
92 /*
93 * TODO options to write this to a file and reinterpret a file...
94 */
95
96 /*
97 * Create threads to bring up all of the CPUs.
98 */
99
100 dispatch_semaphore_t thread_spinning = dispatch_semaphore_create(0);
101
102 for (int i = 0; i < nthreads; i++) {
103 T_QUIET;
104 T_ASSERT_POSIX_ZERO(
105 pthread_create(&threads[i], NULL, &spinning_thread,
106 &thread_spinning), NULL);
107 dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER);
108 }
109
110 T_LOG("spun up %d thread%s", nthreads, nthreads == 1 ? "" : "s");
111
112 ktrace_session_t s = ktrace_session_create();
113 T_WITH_ERRNO; T_ASSERT_NOTNULL(s, "ktrace_session_create");
114
115 dispatch_queue_t q = dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0);
116
117 /*
118 * Only set the timeout after we've seen an event that was traced by us.
119 * This helps set a reasonable timeout after we're guaranteed to get a
120 * few events.
121 */
122
123 ktrace_events_single(s, DISPATCH_AFTER_EVENT,
124 ^(__unused struct trace_point *tp)
125 {
126 dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
127 TIMEOUT_SECS * NSEC_PER_SEC), q, ^{
128 ktrace_end(s, 0);
129 });
130 });
131
132 __block uint64_t nfires = 0;
133 __block uint64_t nsamples = 0;
134 static uint64_t idle_tids[MAX_CPUS] = { 0 };
135 __block int nidles = 0;
136
137 ktrace_set_completion_handler(s, ^{
138 T_LOG("stopping threads");
139
140 running_threads = false;
141
142 for (int i = 0; i < nthreads; i++) {
143 T_QUIET;
144 T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL);
145 }
146
147 for (int i = 0; i < nidles; i++) {
148 T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]);
149 }
150
151 T_LOG("saw %" PRIu64 " timer fires, %" PRIu64 " samples, "
152 "%g samples/fire", nfires, nsamples,
153 (double)nsamples / (double)nfires);
154
155 T_END;
156 });
157
158 /*
159 * Track which threads are running on each CPU.
160 */
161
162 static uint64_t tids_on_cpu[MAX_CPUS] = { 0 };
163
164 void (^switch_cb)(struct trace_point *) = ^(struct trace_point *tp) {
165 uint64_t new_thread = tp->arg2;
166 // uint64_t old_thread = tp->threadid;
167
168 for (int i = 0; i < nidles; i++) {
169 if (idle_tids[i] == new_thread) {
170 return;
171 }
172 }
173
174 tids_on_cpu[tp->cpuid] = new_thread;
175 };
176
177 ktrace_events_single(s, SCHED_SWITCH, switch_cb);
178 ktrace_events_single(s, SCHED_HANDOFF, switch_cb);
179
180 /*
181 * Determine the thread IDs of the idle threads on each CPU.
182 */
183
184 ktrace_events_single(s, SCHED_IDLE, ^(struct trace_point *tp) {
185 uint64_t idle_thread = tp->threadid;
186
187 tids_on_cpu[tp->cpuid] = 0;
188
189 for (int i = 0; i < nidles; i++) {
190 if (idle_tids[i] == idle_thread) {
191 return;
192 }
193 }
194
195 idle_tids[nidles++] = idle_thread;
196 });
197
198 /*
199 * On each timer fire, go through all the cores and mark any threads
200 * that should be sampled.
201 */
202
203 __block int last_fire_cpu = -1;
204 __block uint64_t sample_missing = 0;
205 static uint64_t tids_snap[MAX_CPUS] = { 0 };
206 __block int nexpected = 0;
207 #if defined(__x86_64__)
208 __block int xcall_from_cpu = -1;
209 #endif /* defined(__x86_64__) */
210 __block uint64_t xcall_mask = 0;
211
212 ktrace_events_single(s, PERF_TMR_FIRE, ^(struct trace_point *tp) {
213 int last_expected = nexpected;
214 nfires++;
215
216 nexpected = 0;
217 for (int i = 0; i < ncpus; i++) {
218 uint64_t i_bit = UINT64_C(1) << i;
219 if (sample_missing & i_bit) {
220 T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)",
221 tp->cpuid, tids_snap[i], last_fire_cpu,
222 xcall_mask, last_expected);
223 sample_missing &= ~i_bit;
224 }
225
226 if (tids_on_cpu[i] != 0) {
227 tids_snap[i] = tids_on_cpu[i];
228 sample_missing |= i_bit;
229 nexpected++;
230 }
231 }
232
233 T_QUIET;
234 T_ASSERT_LT((int)tp->cpuid, ncpus, "timer fire should not occur on an IOP");
235 last_fire_cpu = (int)tp->cpuid;
236 #if defined(__x86_64__)
237 xcall_from_cpu = (int)tp->cpuid;
238 #endif /* defined(__x86_64__) */
239 });
240
241 #if defined(__x86_64__)
242 /*
243 * Watch for the cross-call on Intel, make sure they match what kperf
244 * should be doing.
245 */
246
247 ktrace_events_single(s, MP_CPUS_CALL, ^(struct trace_point *tp) {
248 if (xcall_from_cpu != (int)tp->cpuid) {
249 return;
250 }
251
252 xcall_mask = tp->arg1;
253 xcall_from_cpu = -1;
254 });
255 #endif /* defined(__x86_64__) */
256
257 /*
258 * On the timer handler for each CPU, unset the missing sample bitmap.
259 */
260
261 ktrace_events_single(s, PERF_TMR_HNDLR, ^(struct trace_point *tp) {
262 nsamples++;
263 if ((int)tp->cpuid > ncpus) {
264 /* skip IOPs; they're not scheduling our threads */
265 return;
266 }
267
268 sample_missing &= ~(UINT64_C(1) << tp->cpuid);
269 });
270
271 /*
272 * Configure kperf and ktrace.
273 */
274
275 (void)kperf_action_count_set(1);
276 T_QUIET;
277 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK),
278 NULL);
279 (void)kperf_timer_count_set(1);
280 T_QUIET;
281 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
282 kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
283 T_QUIET;
284 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
285
286 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
287
288 T_ASSERT_POSIX_ZERO(ktrace_start(s,
289 dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)),
290 "start ktrace");
291
292 kdebug_trace(DISPATCH_AFTER_EVENT, 0, 0, 0, 0);
293
294 dispatch_main();
295 }
296
297 #pragma mark kdebug triggers
298
299 #define KDEBUG_TRIGGER_TIMEOUT_NS (10 * NSEC_PER_SEC)
300
301 #define NON_TRIGGER_CLASS UINT32_C(0xfd)
302 #define NON_TRIGGER_SUBCLASS UINT32_C(0xff)
303 #define NON_TRIGGER_CODE UINT32_C(0xff)
304
305 #define NON_TRIGGER_EVENT \
306 (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \
307 NON_TRIGGER_CODE))
308
309 static void
310 expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids,
311 unsigned int n_debugids)
312 {
313 __block int missing_kernel_stacks = 0;
314 __block int missing_user_stacks = 0;
315 ktrace_session_t s;
316 kperf_kdebug_filter_t filter;
317
318 s = ktrace_session_create();
319 T_QUIET; T_ASSERT_NOTNULL(s, NULL);
320
321 ktrace_events_single(s, PERF_STK_KHDR, ^(struct trace_point *tp) {
322 missing_kernel_stacks--;
323 T_LOG("saw kernel stack with %" PRIu64 " frames, flags = %#"
324 PRIx64, tp->arg2, tp->arg1);
325 });
326 ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
327 missing_user_stacks--;
328 T_LOG("saw user stack with %" PRIu64 " frames, flags = %#"
329 PRIx64, tp->arg2, tp->arg1);
330 });
331
332 for (unsigned int i = 0; i < n_debugids; i++) {
333 ktrace_events_single(s, debugids[i], ^(struct trace_point *tp) {
334 missing_kernel_stacks++;
335 missing_user_stacks++;
336 T_LOG("saw event with debugid 0x%" PRIx32, tp->debugid);
337 });
338 }
339
340 ktrace_events_single(s, NON_TRIGGER_EVENT,
341 ^(__unused struct trace_point *tp)
342 {
343 ktrace_end(s, 0);
344 });
345
346 ktrace_set_completion_handler(s, ^{
347 T_EXPECT_LE(missing_kernel_stacks, 0, NULL);
348 T_EXPECT_LE(missing_user_stacks, 0, NULL);
349
350 ktrace_session_destroy(s);
351 T_END;
352 });
353
354 /* configure kperf */
355
356 kperf_reset();
357
358 (void)kperf_action_count_set(1);
359 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
360 KPERF_SAMPLER_KSTACK | KPERF_SAMPLER_USTACK), NULL);
361
362 filter = kperf_kdebug_filter_create();
363 T_ASSERT_NOTNULL(filter, NULL);
364
365 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
366 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_desc(filter, filter_desc),
367 NULL);
368 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
369 kperf_kdebug_filter_destroy(filter);
370
371 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
372
373 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
374
375 /* trace the triggering debugids */
376
377 for (unsigned int i = 0; i < n_debugids; i++) {
378 T_ASSERT_POSIX_SUCCESS(kdebug_trace(debugids[i], 0, 0, 0, 0), NULL);
379 }
380
381 T_ASSERT_POSIX_SUCCESS(kdebug_trace(NON_TRIGGER_EVENT, 0, 0, 0, 0), NULL);
382
383 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, KDEBUG_TRIGGER_TIMEOUT_NS),
384 dispatch_get_main_queue(), ^(void)
385 {
386 ktrace_end(s, 1);
387 });
388 }
389
390 #define TRIGGER_CLASS UINT32_C(0xfe)
391 #define TRIGGER_CLASS_END UINT32_C(0xfd)
392 #define TRIGGER_SUBCLASS UINT32_C(0xff)
393 #define TRIGGER_CODE UINT32_C(0)
394 #define TRIGGER_DEBUGID \
395 (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE))
396
397 T_DECL(kdebug_trigger_classes,
398 "test that kdebug trigger samples on classes")
399 {
400 start_controlling_ktrace();
401
402 const uint32_t class_debugids[] = {
403 KDBG_EVENTID(TRIGGER_CLASS, 1, 1),
404 KDBG_EVENTID(TRIGGER_CLASS, 2, 1),
405 KDBG_EVENTID(TRIGGER_CLASS_END, 1, 1) | DBG_FUNC_END,
406 KDBG_EVENTID(TRIGGER_CLASS_END, 2, 1) | DBG_FUNC_END,
407 };
408
409 expect_kdebug_trigger("C0xfe,C0xfdr", class_debugids,
410 sizeof(class_debugids) / sizeof(class_debugids[0]));
411 dispatch_main();
412 }
413
414 T_DECL(kdebug_trigger_subclasses,
415 "test that kdebug trigger samples on subclasses")
416 {
417 start_controlling_ktrace();
418
419 const uint32_t subclass_debugids[] = {
420 KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 0),
421 KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 1),
422 KDBG_EVENTID(TRIGGER_CLASS_END, TRIGGER_SUBCLASS, 0) | DBG_FUNC_END,
423 KDBG_EVENTID(TRIGGER_CLASS_END, TRIGGER_SUBCLASS, 1) | DBG_FUNC_END
424 };
425
426 expect_kdebug_trigger("S0xfeff,S0xfdffr", subclass_debugids,
427 sizeof(subclass_debugids) / sizeof(subclass_debugids[0]));
428 dispatch_main();
429 }
430
431 T_DECL(kdebug_trigger_debugids,
432 "test that kdebug trigger samples on debugids")
433 {
434 start_controlling_ktrace();
435
436 const uint32_t debugids[] = {
437 TRIGGER_DEBUGID
438 };
439
440 expect_kdebug_trigger("D0xfeff0000", debugids,
441 sizeof(debugids) / sizeof(debugids[0]));
442 dispatch_main();
443 }
444
445 /*
446 * TODO Set a single function specifier filter, expect not to trigger of all
447 * events from that class.
448 */
449
450 static void
451 reset_kperf(void)
452 {
453 (void)kperf_reset();
454 }
455
456 T_DECL(kdbg_callstacks,
457 "test that the kdbg_callstacks samples on syscalls")
458 {
459 start_controlling_ktrace();
460
461 ktrace_session_t s;
462 __block bool saw_user_stack = false;
463
464 s = ktrace_session_create();
465 T_ASSERT_NOTNULL(s, NULL);
466
467 /*
468 * Make sure BSD events are traced in order to trigger samples on syscalls.
469 */
470 ktrace_events_class(s, DBG_BSD, ^void (__unused struct trace_point *tp) {});
471
472 ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) {
473 saw_user_stack = true;
474 ktrace_end(s, 1);
475 });
476
477 ktrace_set_completion_handler(s, ^{
478 ktrace_session_destroy(s);
479
480 T_EXPECT_TRUE(saw_user_stack,
481 "saw user stack after configuring kdbg_callstacks");
482 T_END;
483 });
484
485 #pragma clang diagnostic push
486 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
487 T_ASSERT_POSIX_SUCCESS(kperf_kdbg_callstacks_set(1), NULL);
488 #pragma clang diagnostic pop
489 T_ATEND(reset_kperf);
490
491 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
492
493 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
494 dispatch_get_main_queue(), ^(void) {
495 ktrace_end(s, 1);
496 });
497
498 dispatch_main();
499 }
500
501 #pragma mark PET
502
503 #define STACKS_WAIT_DURATION_NS (3 * NSEC_PER_SEC)
504
505 static void
506 expect_stacks_traced(void (^cb)(void))
507 {
508 ktrace_session_t s;
509
510 s = ktrace_session_create();
511 T_QUIET; T_ASSERT_NOTNULL(s, "ktrace_session_create");
512
513 __block unsigned int user_stacks = 0;
514 __block unsigned int kernel_stacks = 0;
515
516 ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) {
517 user_stacks++;
518 });
519 ktrace_events_single(s, PERF_STK_KHDR, ^(__unused struct trace_point *tp) {
520 kernel_stacks++;
521 });
522
523 ktrace_set_completion_handler(s, ^(void) {
524 ktrace_session_destroy(s);
525 T_EXPECT_GT(user_stacks, 0U, NULL);
526 T_EXPECT_GT(kernel_stacks, 0U, NULL);
527 cb();
528 });
529
530 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
531
532 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
533
534 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, STACKS_WAIT_DURATION_NS),
535 dispatch_get_main_queue(), ^(void)
536 {
537 kperf_reset();
538 ktrace_end(s, 0);
539 });
540 }
541
542 T_DECL(pet, "test that PET mode samples kernel and user stacks")
543 {
544 start_controlling_ktrace();
545
546 configure_kperf_stacks_timer(-1, 10);
547 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
548
549 expect_stacks_traced(^(void) {
550 T_END;
551 });
552
553 dispatch_main();
554 }
555
556 T_DECL(lightweight_pet,
557 "test that lightweight PET mode samples kernel and user stacks",
558 T_META_ASROOT(true))
559 {
560 start_controlling_ktrace();
561
562 int set = 1;
563
564 configure_kperf_stacks_timer(-1, 10);
565 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL,
566 &set, sizeof(set)), NULL);
567 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
568
569 expect_stacks_traced(^(void) {
570 T_END;
571 });
572
573 dispatch_main();
574 }
575
576 T_DECL(pet_stress, "repeatedly enable and disable PET mode")
577 {
578 start_controlling_ktrace();
579
580 int niters = 1000;
581 while (niters--) {
582 configure_kperf_stacks_timer(-1, 10);
583 T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
584 usleep(20);
585 kperf_reset();
586 }
587 ;
588 }
589
590 T_DECL(timer_stress, "repeatedly enable and disable timers")
591 {
592 start_controlling_ktrace();
593
594 int niters = 1000;
595 while (niters--) {
596 configure_kperf_stacks_timer(-1, 1);
597 usleep(20);
598 kperf_reset();
599 }
600 ;
601 }
602
603 T_DECL(pmc_config_only, "shouldn't show PMC config events unless requested")
604 {
605 start_controlling_ktrace();
606
607 __block bool saw_kpc_config = false;
608 __block bool saw_kpc_reg = false;
609
610 ktrace_session_t s = ktrace_session_create();
611 T_ASSERT_NOTNULL(s, "ktrace_session_create");
612
613 /*
614 * Make sure BSD events are traced in order to trigger samples on syscalls.
615 */
616 ktrace_events_single(s, PERF_KPC_CONFIG,
617 ^(__unused struct trace_point *tp) {
618 saw_kpc_config = true;
619 });
620 ktrace_events_single(s, PERF_KPC_REG,
621 ^(__unused struct trace_point *tp) {
622 saw_kpc_reg = true;
623 });
624 ktrace_events_single(s, PERF_KPC_REG32,
625 ^(__unused struct trace_point *tp) {
626 saw_kpc_reg = true;
627 });
628
629 ktrace_set_completion_handler(s, ^{
630 ktrace_session_destroy(s);
631 T_EXPECT_FALSE(saw_kpc_config,
632 "should see no KPC configs without sampler enabled");
633 T_EXPECT_FALSE(saw_kpc_reg,
634 "should see no KPC registers without sampler enabled");
635 T_END;
636 });
637
638 uint32_t nconfigs = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
639 uint64_t *config = calloc(nconfigs, sizeof(*config));
640 config[0] = 0x02;
641 int ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config);
642 T_ASSERT_POSIX_SUCCESS(ret, "configured kpc");
643 T_QUIET;
644 T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_CONFIGURABLE_MASK),
645 "kpc_set_counting");
646
647 (void)kperf_action_count_set(1);
648 T_ATEND(reset_kperf);
649 T_QUIET;
650 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_PMC_CPU),
651 NULL);
652
653 (void)kperf_timer_count_set(1);
654 T_QUIET;
655 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
656 kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
657 T_QUIET;
658 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
659
660 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
661
662 T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
663
664 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
665 dispatch_get_main_queue(), ^(void) {
666 ktrace_end(s, 1);
667 });
668
669 dispatch_main();
670 }
671
672 static void
673 skip_if_monotonic_unsupported(void)
674 {
675 int r;
676 int supported = 0;
677 size_t supported_size = sizeof(supported);
678
679 r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size,
680 NULL, 0);
681 if (r < 0) {
682 T_WITH_ERRNO;
683 T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
684 }
685
686 if (!supported) {
687 T_SKIP("monotonic is not supported on this platform");
688 }
689 }
690
691 #define INSTRS_CYCLES_UPPER 500
692 #define INSTRS_CYCLES_LOWER 50
693
694 T_DECL(instrs_cycles, "ensure instructions and cycles are sampled")
695 {
696 skip_if_monotonic_unsupported();
697
698 start_controlling_ktrace();
699
700 ktrace_session_t sess = ktrace_session_create();
701
702 __block uint64_t ninstrs_cycles = 0;
703 __block uint64_t nzeroes = 0;
704 ktrace_events_single(sess, PERF_INSTR_DATA,
705 ^(__unused struct trace_point *tp) {
706 ninstrs_cycles++;
707 if (tp->arg1 == 0) {
708 T_LOG("%llx (%s)\n", tp->threadid, tp->command);
709 nzeroes++;
710 }
711 if (ninstrs_cycles >= INSTRS_CYCLES_UPPER) {
712 ktrace_end(sess, 1);
713 }
714 });
715
716 ktrace_set_collection_interval(sess, 200);
717
718 ktrace_set_completion_handler(sess, ^{
719 T_EXPECT_GE(ninstrs_cycles, (uint64_t)INSTRS_CYCLES_LOWER,
720 "saw enough instructions and cycles events");
721 T_EXPECT_EQ(nzeroes, UINT64_C(0),
722 "saw no events with 0 instructions");
723 T_END;
724 });
725
726 (void)kperf_action_count_set(1);
727 T_ATEND(reset_kperf);
728 T_QUIET;
729 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
730 KPERF_SAMPLER_TH_INSTRS_CYCLES), NULL);
731
732 (void)kperf_timer_count_set(1);
733 T_QUIET;
734 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
735 kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
736 T_QUIET;
737 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
738
739 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
740
741 T_ASSERT_POSIX_ZERO(ktrace_start(sess, dispatch_get_main_queue()),
742 NULL);
743
744 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
745 dispatch_get_main_queue(), ^(void) {
746 ktrace_end(sess, 1);
747 });
748
749 dispatch_main();
750 }
751