3 #endif /* defined(T_NAMESPACE) */
5 #include <darwintest.h>
6 #include <darwintest_utils.h>
7 #include <dispatch/dispatch.h>
9 #include <ktrace/session.h>
10 #include <ktrace/private.h>
11 #include <System/sys/kdebug.h>
12 #include <kperf/kpc.h>
13 #include <kperf/kperf.h>
14 #include <kperfdata/kpdecode.h>
15 #include <os/assumes.h>
17 #include <sys/sysctl.h>
19 #include "kperf_helpers.h"
20 #include "ktrace_helpers.h"
23 T_META_NAMESPACE("xnu.kperf"),
24 T_META_CHECK_LEAKS(false),
28 #define MAX_THREADS 64
30 volatile static bool running_threads
= true;
33 spinning_thread(void *semp
)
36 T_ASSERT_NOTNULL(semp
, "semaphore passed to thread should not be NULL");
37 dispatch_semaphore_signal(*(dispatch_semaphore_t
*)semp
);
39 while (running_threads
) {
45 #define PERF_STK_KHDR UINT32_C(0x25020014)
46 #define PERF_STK_UHDR UINT32_C(0x25020018)
47 #define PERF_TMR_FIRE KDBG_EVENTID(DBG_PERF, 3, 0)
48 #define PERF_TMR_HNDLR KDBG_EVENTID(DBG_PERF, 3, 2)
49 #define PERF_TMR_PEND KDBG_EVENTID(DBG_PERF, 3, 3)
50 #define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4)
51 #define PERF_KPC_CONFIG KDBG_EVENTID(DBG_PERF, 6, 4)
52 #define PERF_KPC_REG KDBG_EVENTID(DBG_PERF, 6, 5)
53 #define PERF_KPC_REG32 KDBG_EVENTID(DBG_PERF, 6, 7)
54 #define PERF_INSTR_DATA KDBG_EVENTID(DBG_PERF, 1, 17)
56 #define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \
58 #define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED)
59 #define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE)
61 #define MP_CPUS_CALL UINT32_C(0x1900004)
63 #define DISPATCH_AFTER_EVENT UINT32_C(0xfefffffc)
64 #define TIMEOUT_SECS 10
66 #define TIMER_PERIOD_NS (1 * NSEC_PER_MSEC)
69 * Ensure that kperf is correctly IPIing CPUs that are actively scheduling by
70 * bringing up threads and ensuring that threads on-core are sampled by each
74 T_DECL(ipi_active_cpus
,
75 "make sure that kperf IPIs all active CPUs")
77 start_controlling_ktrace();
79 int ncpus
= dt_ncpu();
81 T_ASSERT_LT(ncpus
, MAX_CPUS
,
82 "only supports up to %d CPUs", MAX_CPUS
);
83 T_LOG("found %d CPUs", ncpus
);
85 int nthreads
= ncpus
- 1;
87 T_ASSERT_LT(nthreads
, MAX_THREADS
,
88 "only supports up to %d threads", MAX_THREADS
);
90 static pthread_t threads
[MAX_THREADS
];
93 * TODO options to write this to a file and reinterpret a file...
97 * Create threads to bring up all of the CPUs.
100 dispatch_semaphore_t thread_spinning
= dispatch_semaphore_create(0);
102 for (int i
= 0; i
< nthreads
; i
++) {
105 pthread_create(&threads
[i
], NULL
, &spinning_thread
,
106 &thread_spinning
), NULL
);
107 dispatch_semaphore_wait(thread_spinning
, DISPATCH_TIME_FOREVER
);
110 T_LOG("spun up %d thread%s", nthreads
, nthreads
== 1 ? "" : "s");
112 ktrace_session_t s
= ktrace_session_create();
113 T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
115 dispatch_queue_t q
= dispatch_get_global_queue(QOS_CLASS_USER_INITIATED
, 0);
118 * Only set the timeout after we've seen an event that was traced by us.
119 * This helps set a reasonable timeout after we're guaranteed to get a
123 ktrace_events_single(s
, DISPATCH_AFTER_EVENT
,
124 ^(__unused
struct trace_point
*tp
)
126 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
,
127 TIMEOUT_SECS
* NSEC_PER_SEC
), q
, ^{
132 __block
uint64_t nfires
= 0;
133 __block
uint64_t nsamples
= 0;
134 static uint64_t idle_tids
[MAX_CPUS
] = { 0 };
135 __block
int nidles
= 0;
137 ktrace_set_completion_handler(s
, ^{
138 T_LOG("stopping threads");
140 running_threads
= false;
142 for (int i
= 0; i
< nthreads
; i
++) {
144 T_ASSERT_POSIX_ZERO(pthread_join(threads
[i
], NULL
), NULL
);
147 for (int i
= 0; i
< nidles
; i
++) {
148 T_LOG("CPU %d idle thread: %#" PRIx64
, i
, idle_tids
[i
]);
151 T_LOG("saw %" PRIu64
" timer fires, %" PRIu64
" samples, "
152 "%g samples/fire", nfires
, nsamples
,
153 (double)nsamples
/ (double)nfires
);
159 * Track which threads are running on each CPU.
162 static uint64_t tids_on_cpu
[MAX_CPUS
] = { 0 };
164 void (^switch_cb
)(struct trace_point
*) = ^(struct trace_point
*tp
) {
165 uint64_t new_thread
= tp
->arg2
;
166 // uint64_t old_thread = tp->threadid;
168 for (int i
= 0; i
< nidles
; i
++) {
169 if (idle_tids
[i
] == new_thread
) {
174 tids_on_cpu
[tp
->cpuid
] = new_thread
;
177 ktrace_events_single(s
, SCHED_SWITCH
, switch_cb
);
178 ktrace_events_single(s
, SCHED_HANDOFF
, switch_cb
);
181 * Determine the thread IDs of the idle threads on each CPU.
184 ktrace_events_single(s
, SCHED_IDLE
, ^(struct trace_point
*tp
) {
185 uint64_t idle_thread
= tp
->threadid
;
187 tids_on_cpu
[tp
->cpuid
] = 0;
189 for (int i
= 0; i
< nidles
; i
++) {
190 if (idle_tids
[i
] == idle_thread
) {
195 idle_tids
[nidles
++] = idle_thread
;
199 * On each timer fire, go through all the cores and mark any threads
200 * that should be sampled.
203 __block
int last_fire_cpu
= -1;
204 __block
uint64_t sample_missing
= 0;
205 static uint64_t tids_snap
[MAX_CPUS
] = { 0 };
206 __block
int nexpected
= 0;
207 #if defined(__x86_64__)
208 __block
int xcall_from_cpu
= -1;
209 #endif /* defined(__x86_64__) */
210 __block
uint64_t xcall_mask
= 0;
212 ktrace_events_single(s
, PERF_TMR_FIRE
, ^(struct trace_point
*tp
) {
213 int last_expected
= nexpected
;
217 for (int i
= 0; i
< ncpus
; i
++) {
218 uint64_t i_bit
= UINT64_C(1) << i
;
219 if (sample_missing
& i_bit
) {
220 T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)",
221 tp
->cpuid
, tids_snap
[i
], last_fire_cpu
,
222 xcall_mask
, last_expected
);
223 sample_missing
&= ~i_bit
;
226 if (tids_on_cpu
[i
] != 0) {
227 tids_snap
[i
] = tids_on_cpu
[i
];
228 sample_missing
|= i_bit
;
234 T_ASSERT_LT((int)tp
->cpuid
, ncpus
, "timer fire should not occur on an IOP");
235 last_fire_cpu
= (int)tp
->cpuid
;
236 #if defined(__x86_64__)
237 xcall_from_cpu
= (int)tp
->cpuid
;
238 #endif /* defined(__x86_64__) */
241 #if defined(__x86_64__)
243 * Watch for the cross-call on Intel, make sure they match what kperf
247 ktrace_events_single(s
, MP_CPUS_CALL
, ^(struct trace_point
*tp
) {
248 if (xcall_from_cpu
!= (int)tp
->cpuid
) {
252 xcall_mask
= tp
->arg1
;
255 #endif /* defined(__x86_64__) */
258 * On the timer handler for each CPU, unset the missing sample bitmap.
261 ktrace_events_single(s
, PERF_TMR_HNDLR
, ^(struct trace_point
*tp
) {
263 if ((int)tp
->cpuid
> ncpus
) {
264 /* skip IOPs; they're not scheduling our threads */
268 sample_missing
&= ~(UINT64_C(1) << tp
->cpuid
);
272 * Configure kperf and ktrace.
275 (void)kperf_action_count_set(1);
277 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK
),
279 (void)kperf_timer_count_set(1);
281 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
282 kperf_ns_to_ticks(TIMER_PERIOD_NS
)), NULL
);
284 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL
);
286 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
288 T_ASSERT_POSIX_ZERO(ktrace_start(s
,
289 dispatch_get_global_queue(QOS_CLASS_USER_INITIATED
, 0)),
292 kdebug_trace(DISPATCH_AFTER_EVENT
, 0, 0, 0, 0);
297 #pragma mark kdebug triggers
299 #define KDEBUG_TRIGGER_TIMEOUT_NS (10 * NSEC_PER_SEC)
301 #define NON_TRIGGER_CLASS UINT32_C(0xfd)
302 #define NON_TRIGGER_SUBCLASS UINT32_C(0xff)
303 #define NON_TRIGGER_CODE UINT32_C(0xff)
305 #define NON_TRIGGER_EVENT \
306 (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \
310 expect_kdebug_trigger(const char *filter_desc
, const uint32_t *debugids
,
311 unsigned int n_debugids
)
313 __block
int missing_kernel_stacks
= 0;
314 __block
int missing_user_stacks
= 0;
316 kperf_kdebug_filter_t filter
;
318 s
= ktrace_session_create();
319 T_QUIET
; T_ASSERT_NOTNULL(s
, NULL
);
321 ktrace_events_single(s
, PERF_STK_KHDR
, ^(struct trace_point
*tp
) {
322 missing_kernel_stacks
--;
323 T_LOG("saw kernel stack with %" PRIu64
" frames, flags = %#"
324 PRIx64
, tp
->arg2
, tp
->arg1
);
326 ktrace_events_single(s
, PERF_STK_UHDR
, ^(struct trace_point
*tp
) {
327 missing_user_stacks
--;
328 T_LOG("saw user stack with %" PRIu64
" frames, flags = %#"
329 PRIx64
, tp
->arg2
, tp
->arg1
);
332 for (unsigned int i
= 0; i
< n_debugids
; i
++) {
333 ktrace_events_single(s
, debugids
[i
], ^(struct trace_point
*tp
) {
334 missing_kernel_stacks
++;
335 missing_user_stacks
++;
336 T_LOG("saw event with debugid 0x%" PRIx32
, tp
->debugid
);
340 ktrace_events_single(s
, NON_TRIGGER_EVENT
,
341 ^(__unused
struct trace_point
*tp
)
346 ktrace_set_completion_handler(s
, ^{
347 T_EXPECT_LE(missing_kernel_stacks
, 0, NULL
);
348 T_EXPECT_LE(missing_user_stacks
, 0, NULL
);
350 ktrace_session_destroy(s
);
354 /* configure kperf */
358 (void)kperf_action_count_set(1);
359 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
360 KPERF_SAMPLER_KSTACK
| KPERF_SAMPLER_USTACK
), NULL
);
362 filter
= kperf_kdebug_filter_create();
363 T_ASSERT_NOTNULL(filter
, NULL
);
365 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL
);
366 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_desc(filter
, filter_desc
),
368 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter
), NULL
);
369 kperf_kdebug_filter_destroy(filter
);
371 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
373 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
375 /* trace the triggering debugids */
377 for (unsigned int i
= 0; i
< n_debugids
; i
++) {
378 T_ASSERT_POSIX_SUCCESS(kdebug_trace(debugids
[i
], 0, 0, 0, 0), NULL
);
381 T_ASSERT_POSIX_SUCCESS(kdebug_trace(NON_TRIGGER_EVENT
, 0, 0, 0, 0), NULL
);
383 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, KDEBUG_TRIGGER_TIMEOUT_NS
),
384 dispatch_get_main_queue(), ^(void)
390 #define TRIGGER_CLASS UINT32_C(0xfe)
391 #define TRIGGER_CLASS_END UINT32_C(0xfd)
392 #define TRIGGER_SUBCLASS UINT32_C(0xff)
393 #define TRIGGER_CODE UINT32_C(0)
394 #define TRIGGER_DEBUGID \
395 (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE))
397 T_DECL(kdebug_trigger_classes
,
398 "test that kdebug trigger samples on classes")
400 start_controlling_ktrace();
402 const uint32_t class_debugids
[] = {
403 KDBG_EVENTID(TRIGGER_CLASS
, 1, 1),
404 KDBG_EVENTID(TRIGGER_CLASS
, 2, 1),
405 KDBG_EVENTID(TRIGGER_CLASS_END
, 1, 1) | DBG_FUNC_END
,
406 KDBG_EVENTID(TRIGGER_CLASS_END
, 2, 1) | DBG_FUNC_END
,
409 expect_kdebug_trigger("C0xfe,C0xfdr", class_debugids
,
410 sizeof(class_debugids
) / sizeof(class_debugids
[0]));
414 T_DECL(kdebug_trigger_subclasses
,
415 "test that kdebug trigger samples on subclasses")
417 start_controlling_ktrace();
419 const uint32_t subclass_debugids
[] = {
420 KDBG_EVENTID(TRIGGER_CLASS
, TRIGGER_SUBCLASS
, 0),
421 KDBG_EVENTID(TRIGGER_CLASS
, TRIGGER_SUBCLASS
, 1),
422 KDBG_EVENTID(TRIGGER_CLASS_END
, TRIGGER_SUBCLASS
, 0) | DBG_FUNC_END
,
423 KDBG_EVENTID(TRIGGER_CLASS_END
, TRIGGER_SUBCLASS
, 1) | DBG_FUNC_END
426 expect_kdebug_trigger("S0xfeff,S0xfdffr", subclass_debugids
,
427 sizeof(subclass_debugids
) / sizeof(subclass_debugids
[0]));
431 T_DECL(kdebug_trigger_debugids
,
432 "test that kdebug trigger samples on debugids")
434 start_controlling_ktrace();
436 const uint32_t debugids
[] = {
440 expect_kdebug_trigger("D0xfeff0000", debugids
,
441 sizeof(debugids
) / sizeof(debugids
[0]));
446 * TODO Set a single function specifier filter, expect not to trigger of all
447 * events from that class.
456 T_DECL(kdbg_callstacks
,
457 "test that the kdbg_callstacks samples on syscalls")
459 start_controlling_ktrace();
462 __block
bool saw_user_stack
= false;
464 s
= ktrace_session_create();
465 T_ASSERT_NOTNULL(s
, NULL
);
468 * Make sure BSD events are traced in order to trigger samples on syscalls.
470 ktrace_events_class(s
, DBG_BSD
, ^void (__unused
struct trace_point
*tp
) {});
472 ktrace_events_single(s
, PERF_STK_UHDR
, ^(__unused
struct trace_point
*tp
) {
473 saw_user_stack
= true;
477 ktrace_set_completion_handler(s
, ^{
478 ktrace_session_destroy(s
);
480 T_EXPECT_TRUE(saw_user_stack
,
481 "saw user stack after configuring kdbg_callstacks");
485 #pragma clang diagnostic push
486 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
487 T_ASSERT_POSIX_SUCCESS(kperf_kdbg_callstacks_set(1), NULL
);
488 #pragma clang diagnostic pop
489 T_ATEND(reset_kperf
);
491 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
493 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 10 * NSEC_PER_SEC
),
494 dispatch_get_main_queue(), ^(void) {
503 #define STACKS_WAIT_DURATION_NS (3 * NSEC_PER_SEC)
506 expect_stacks_traced(void (^cb
)(void))
510 s
= ktrace_session_create();
511 T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
513 __block
unsigned int user_stacks
= 0;
514 __block
unsigned int kernel_stacks
= 0;
516 ktrace_events_single(s
, PERF_STK_UHDR
, ^(__unused
struct trace_point
*tp
) {
519 ktrace_events_single(s
, PERF_STK_KHDR
, ^(__unused
struct trace_point
*tp
) {
523 ktrace_set_completion_handler(s
, ^(void) {
524 ktrace_session_destroy(s
);
525 T_EXPECT_GT(user_stacks
, 0U, NULL
);
526 T_EXPECT_GT(kernel_stacks
, 0U, NULL
);
530 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
532 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
534 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, STACKS_WAIT_DURATION_NS
),
535 dispatch_get_main_queue(), ^(void)
542 T_DECL(pet
, "test that PET mode samples kernel and user stacks")
544 start_controlling_ktrace();
546 configure_kperf_stacks_timer(-1, 10);
547 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL
);
549 expect_stacks_traced(^(void) {
556 T_DECL(lightweight_pet
,
557 "test that lightweight PET mode samples kernel and user stacks",
560 start_controlling_ktrace();
564 configure_kperf_stacks_timer(-1, 10);
565 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL
, NULL
,
566 &set
, sizeof(set
)), NULL
);
567 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL
);
569 expect_stacks_traced(^(void) {
576 T_DECL(pet_stress
, "repeatedly enable and disable PET mode")
578 start_controlling_ktrace();
582 configure_kperf_stacks_timer(-1, 10);
583 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL
);
590 T_DECL(timer_stress
, "repeatedly enable and disable timers")
592 start_controlling_ktrace();
596 configure_kperf_stacks_timer(-1, 1);
603 T_DECL(pmc_config_only
, "shouldn't show PMC config events unless requested")
605 start_controlling_ktrace();
607 __block
bool saw_kpc_config
= false;
608 __block
bool saw_kpc_reg
= false;
610 ktrace_session_t s
= ktrace_session_create();
611 T_ASSERT_NOTNULL(s
, "ktrace_session_create");
614 * Make sure BSD events are traced in order to trigger samples on syscalls.
616 ktrace_events_single(s
, PERF_KPC_CONFIG
,
617 ^(__unused
struct trace_point
*tp
) {
618 saw_kpc_config
= true;
620 ktrace_events_single(s
, PERF_KPC_REG
,
621 ^(__unused
struct trace_point
*tp
) {
624 ktrace_events_single(s
, PERF_KPC_REG32
,
625 ^(__unused
struct trace_point
*tp
) {
629 ktrace_set_completion_handler(s
, ^{
630 ktrace_session_destroy(s
);
631 T_EXPECT_FALSE(saw_kpc_config
,
632 "should see no KPC configs without sampler enabled");
633 T_EXPECT_FALSE(saw_kpc_reg
,
634 "should see no KPC registers without sampler enabled");
638 uint32_t nconfigs
= kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK
);
639 uint64_t *config
= calloc(nconfigs
, sizeof(*config
));
641 int ret
= kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK
, config
);
642 T_ASSERT_POSIX_SUCCESS(ret
, "configured kpc");
644 T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_CONFIGURABLE_MASK
),
647 (void)kperf_action_count_set(1);
648 T_ATEND(reset_kperf
);
650 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_PMC_CPU
),
653 (void)kperf_timer_count_set(1);
655 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
656 kperf_ns_to_ticks(TIMER_PERIOD_NS
)), NULL
);
658 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL
);
660 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
662 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
664 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 10 * NSEC_PER_SEC
),
665 dispatch_get_main_queue(), ^(void) {
673 skip_if_monotonic_unsupported(void)
677 size_t supported_size
= sizeof(supported
);
679 r
= sysctlbyname("kern.monotonic.supported", &supported
, &supported_size
,
683 T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
687 T_SKIP("monotonic is not supported on this platform");
691 #define INSTRS_CYCLES_UPPER 500
692 #define INSTRS_CYCLES_LOWER 50
694 T_DECL(instrs_cycles
, "ensure instructions and cycles are sampled")
696 skip_if_monotonic_unsupported();
698 start_controlling_ktrace();
700 ktrace_session_t sess
= ktrace_session_create();
702 __block
uint64_t ninstrs_cycles
= 0;
703 __block
uint64_t nzeroes
= 0;
704 ktrace_events_single(sess
, PERF_INSTR_DATA
,
705 ^(__unused
struct trace_point
*tp
) {
708 T_LOG("%llx (%s)\n", tp
->threadid
, tp
->command
);
711 if (ninstrs_cycles
>= INSTRS_CYCLES_UPPER
) {
716 ktrace_set_collection_interval(sess
, 200);
718 ktrace_set_completion_handler(sess
, ^{
719 T_EXPECT_GE(ninstrs_cycles
, (uint64_t)INSTRS_CYCLES_LOWER
,
720 "saw enough instructions and cycles events");
721 T_EXPECT_EQ(nzeroes
, UINT64_C(0),
722 "saw no events with 0 instructions");
726 (void)kperf_action_count_set(1);
727 T_ATEND(reset_kperf
);
729 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
730 KPERF_SAMPLER_TH_INSTRS_CYCLES
), NULL
);
732 (void)kperf_timer_count_set(1);
734 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
735 kperf_ns_to_ticks(TIMER_PERIOD_NS
)), NULL
);
737 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL
);
739 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
741 T_ASSERT_POSIX_ZERO(ktrace_start(sess
, dispatch_get_main_queue()),
744 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 10 * NSEC_PER_SEC
),
745 dispatch_get_main_queue(), ^(void) {