3 #endif /* defined(T_NAMESPACE) */
5 #include <darwintest.h>
6 #include <darwintest_utils.h>
7 #include <dispatch/dispatch.h>
9 #include <ktrace/session.h>
10 #include <ktrace/private.h>
11 #include <System/sys/kdebug.h>
12 #include <kperf/kpc.h>
13 #include <kperf/kperf.h>
14 #include <kperfdata/kpdecode.h>
15 #include <os/assumes.h>
17 #include <sys/sysctl.h>
19 #include "kperf_helpers.h"
20 #include "ktrace_helpers.h"
23 T_META_NAMESPACE("xnu.kperf"),
24 T_META_CHECK_LEAKS(false),
28 #define MAX_THREADS 64
30 volatile static bool running_threads
= true;
33 spinning_thread(void *semp
)
36 T_ASSERT_NOTNULL(semp
, "semaphore passed to thread should not be NULL");
37 dispatch_semaphore_signal(*(dispatch_semaphore_t
*)semp
);
39 while (running_threads
) {
45 #define PERF_STK_KHDR UINT32_C(0x25020014)
46 #define PERF_STK_UHDR UINT32_C(0x25020018)
47 #define PERF_TMR_FIRE KDBG_EVENTID(DBG_PERF, 3, 0)
48 #define PERF_TMR_HNDLR KDBG_EVENTID(DBG_PERF, 3, 2)
49 #define PERF_TMR_PEND KDBG_EVENTID(DBG_PERF, 3, 3)
50 #define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4)
51 #define PERF_KPC_CONFIG KDBG_EVENTID(DBG_PERF, 6, 4)
52 #define PERF_KPC_REG KDBG_EVENTID(DBG_PERF, 6, 5)
53 #define PERF_KPC_REG32 KDBG_EVENTID(DBG_PERF, 6, 7)
54 #define PERF_INSTR_DATA KDBG_EVENTID(DBG_PERF, 1, 17)
55 #define PERF_EVENT KDBG_EVENTID(DBG_PERF, 0, 0)
57 #define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \
59 #define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED)
60 #define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE)
62 #define MP_CPUS_CALL UINT32_C(0x1900004)
64 #define DISPATCH_AFTER_EVENT UINT32_C(0xfefffffc)
65 #define TIMEOUT_SECS 10
67 #define TIMER_PERIOD_NS (1 * NSEC_PER_MSEC)
70 * Ensure that kperf is correctly IPIing CPUs that are actively scheduling by
71 * bringing up threads and ensuring that threads on-core are sampled by each
75 T_DECL(ipi_active_cpus
,
76 "make sure that kperf IPIs all active CPUs")
78 start_controlling_ktrace();
80 int ncpus
= dt_ncpu();
82 T_ASSERT_LT(ncpus
, MAX_CPUS
,
83 "only supports up to %d CPUs", MAX_CPUS
);
84 T_LOG("found %d CPUs", ncpus
);
86 int nthreads
= ncpus
- 1;
88 T_ASSERT_LT(nthreads
, MAX_THREADS
,
89 "only supports up to %d threads", MAX_THREADS
);
91 static pthread_t threads
[MAX_THREADS
];
94 * TODO options to write this to a file and reinterpret a file...
98 * Create threads to bring up all of the CPUs.
101 dispatch_semaphore_t thread_spinning
= dispatch_semaphore_create(0);
103 for (int i
= 0; i
< nthreads
; i
++) {
106 pthread_create(&threads
[i
], NULL
, &spinning_thread
,
107 &thread_spinning
), NULL
);
108 dispatch_semaphore_wait(thread_spinning
, DISPATCH_TIME_FOREVER
);
111 T_LOG("spun up %d thread%s", nthreads
, nthreads
== 1 ? "" : "s");
113 ktrace_session_t s
= ktrace_session_create();
114 T_WITH_ERRNO
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
116 dispatch_queue_t q
= dispatch_get_global_queue(QOS_CLASS_USER_INITIATED
, 0);
119 * Only set the timeout after we've seen an event that was traced by us.
120 * This helps set a reasonable timeout after we're guaranteed to get a
124 ktrace_events_single(s
, DISPATCH_AFTER_EVENT
,
125 ^(__unused
struct trace_point
*tp
)
127 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
,
128 TIMEOUT_SECS
* NSEC_PER_SEC
), q
, ^{
133 __block
uint64_t nfires
= 0;
134 __block
uint64_t nsamples
= 0;
135 static uint64_t idle_tids
[MAX_CPUS
] = { 0 };
136 __block
int nidles
= 0;
138 ktrace_set_completion_handler(s
, ^{
139 T_LOG("stopping threads");
141 running_threads
= false;
143 for (int i
= 0; i
< nthreads
; i
++) {
145 T_ASSERT_POSIX_ZERO(pthread_join(threads
[i
], NULL
), NULL
);
148 for (int i
= 0; i
< nidles
; i
++) {
149 T_LOG("CPU %d idle thread: %#" PRIx64
, i
, idle_tids
[i
]);
152 T_LOG("saw %" PRIu64
" timer fires, %" PRIu64
" samples, "
153 "%g samples/fire", nfires
, nsamples
,
154 (double)nsamples
/ (double)nfires
);
160 * Track which threads are running on each CPU.
163 static uint64_t tids_on_cpu
[MAX_CPUS
] = { 0 };
165 void (^switch_cb
)(struct trace_point
*) = ^(struct trace_point
*tp
) {
166 uint64_t new_thread
= tp
->arg2
;
167 // uint64_t old_thread = tp->threadid;
169 for (int i
= 0; i
< nidles
; i
++) {
170 if (idle_tids
[i
] == new_thread
) {
175 tids_on_cpu
[tp
->cpuid
] = new_thread
;
178 ktrace_events_single(s
, SCHED_SWITCH
, switch_cb
);
179 ktrace_events_single(s
, SCHED_HANDOFF
, switch_cb
);
182 * Determine the thread IDs of the idle threads on each CPU.
185 ktrace_events_single(s
, SCHED_IDLE
, ^(struct trace_point
*tp
) {
186 uint64_t idle_thread
= tp
->threadid
;
188 tids_on_cpu
[tp
->cpuid
] = 0;
190 for (int i
= 0; i
< nidles
; i
++) {
191 if (idle_tids
[i
] == idle_thread
) {
196 idle_tids
[nidles
++] = idle_thread
;
200 * On each timer fire, go through all the cores and mark any threads
201 * that should be sampled.
204 __block
int last_fire_cpu
= -1;
205 __block
uint64_t sample_missing
= 0;
206 static uint64_t tids_snap
[MAX_CPUS
] = { 0 };
207 __block
int nexpected
= 0;
208 #if defined(__x86_64__)
209 __block
int xcall_from_cpu
= -1;
210 #endif /* defined(__x86_64__) */
211 __block
uint64_t xcall_mask
= 0;
213 ktrace_events_single(s
, PERF_TMR_FIRE
, ^(struct trace_point
*tp
) {
214 int last_expected
= nexpected
;
218 for (int i
= 0; i
< ncpus
; i
++) {
219 uint64_t i_bit
= UINT64_C(1) << i
;
220 if (sample_missing
& i_bit
) {
221 T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)",
222 tp
->cpuid
, tids_snap
[i
], last_fire_cpu
,
223 xcall_mask
, last_expected
);
224 sample_missing
&= ~i_bit
;
227 if (tids_on_cpu
[i
] != 0) {
228 tids_snap
[i
] = tids_on_cpu
[i
];
229 sample_missing
|= i_bit
;
235 T_ASSERT_LT((int)tp
->cpuid
, ncpus
, "timer fire should not occur on an IOP");
236 last_fire_cpu
= (int)tp
->cpuid
;
237 #if defined(__x86_64__)
238 xcall_from_cpu
= (int)tp
->cpuid
;
239 #endif /* defined(__x86_64__) */
242 #if defined(__x86_64__)
244 * Watch for the cross-call on Intel, make sure they match what kperf
248 ktrace_events_single(s
, MP_CPUS_CALL
, ^(struct trace_point
*tp
) {
249 if (xcall_from_cpu
!= (int)tp
->cpuid
) {
253 xcall_mask
= tp
->arg1
;
256 #endif /* defined(__x86_64__) */
259 * On the timer handler for each CPU, unset the missing sample bitmap.
262 ktrace_events_single(s
, PERF_TMR_HNDLR
, ^(struct trace_point
*tp
) {
264 if ((int)tp
->cpuid
> ncpus
) {
265 /* skip IOPs; they're not scheduling our threads */
269 sample_missing
&= ~(UINT64_C(1) << tp
->cpuid
);
273 * Configure kperf and ktrace.
276 (void)kperf_action_count_set(1);
278 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK
),
280 (void)kperf_timer_count_set(1);
282 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
283 kperf_ns_to_ticks(TIMER_PERIOD_NS
)), NULL
);
285 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL
);
287 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
289 T_ASSERT_POSIX_ZERO(ktrace_start(s
,
290 dispatch_get_global_queue(QOS_CLASS_USER_INITIATED
, 0)),
293 kdebug_trace(DISPATCH_AFTER_EVENT
, 0, 0, 0, 0);
298 #pragma mark kdebug triggers
300 #define KDEBUG_TRIGGER_TIMEOUT_NS (10 * NSEC_PER_SEC)
302 #define NON_TRIGGER_CLASS UINT32_C(0xfd)
303 #define NON_TRIGGER_SUBCLASS UINT32_C(0xff)
304 #define NON_TRIGGER_CODE UINT32_C(0xff)
306 #define NON_TRIGGER_EVENT \
307 (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \
311 expect_kdebug_trigger(const char *filter_desc
, const uint32_t *debugids
,
312 unsigned int n_debugids
)
314 __block
int missing_kernel_stacks
= 0;
315 __block
int missing_user_stacks
= 0;
317 kperf_kdebug_filter_t filter
;
319 s
= ktrace_session_create();
320 T_QUIET
; T_ASSERT_NOTNULL(s
, NULL
);
322 ktrace_events_single(s
, PERF_STK_KHDR
, ^(struct trace_point
*tp
) {
323 missing_kernel_stacks
--;
324 T_LOG("saw kernel stack with %" PRIu64
" frames, flags = %#"
325 PRIx64
, tp
->arg2
, tp
->arg1
);
327 ktrace_events_single(s
, PERF_STK_UHDR
, ^(struct trace_point
*tp
) {
328 missing_user_stacks
--;
329 T_LOG("saw user stack with %" PRIu64
" frames, flags = %#"
330 PRIx64
, tp
->arg2
, tp
->arg1
);
333 for (unsigned int i
= 0; i
< n_debugids
; i
++) {
334 ktrace_events_single(s
, debugids
[i
], ^(struct trace_point
*tp
) {
335 missing_kernel_stacks
++;
336 missing_user_stacks
++;
337 T_LOG("saw event with debugid 0x%" PRIx32
, tp
->debugid
);
341 ktrace_events_single(s
, NON_TRIGGER_EVENT
,
342 ^(__unused
struct trace_point
*tp
)
347 ktrace_set_completion_handler(s
, ^{
348 T_EXPECT_LE(missing_kernel_stacks
, 0, NULL
);
349 T_EXPECT_LE(missing_user_stacks
, 0, NULL
);
351 ktrace_session_destroy(s
);
355 /* configure kperf */
359 (void)kperf_action_count_set(1);
360 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
361 KPERF_SAMPLER_KSTACK
| KPERF_SAMPLER_USTACK
), NULL
);
363 filter
= kperf_kdebug_filter_create();
364 T_ASSERT_NOTNULL(filter
, NULL
);
366 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL
);
367 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_desc(filter
, filter_desc
),
369 T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter
), NULL
);
370 kperf_kdebug_filter_destroy(filter
);
372 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
374 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
376 /* trace the triggering debugids */
378 for (unsigned int i
= 0; i
< n_debugids
; i
++) {
379 T_ASSERT_POSIX_SUCCESS(kdebug_trace(debugids
[i
], 0, 0, 0, 0), NULL
);
382 T_ASSERT_POSIX_SUCCESS(kdebug_trace(NON_TRIGGER_EVENT
, 0, 0, 0, 0), NULL
);
384 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, KDEBUG_TRIGGER_TIMEOUT_NS
),
385 dispatch_get_main_queue(), ^(void)
391 #define TRIGGER_CLASS UINT32_C(0xfe)
392 #define TRIGGER_CLASS_END UINT32_C(0xfd)
393 #define TRIGGER_SUBCLASS UINT32_C(0xff)
394 #define TRIGGER_CODE UINT32_C(0)
395 #define TRIGGER_DEBUGID \
396 (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE))
398 T_DECL(kdebug_trigger_classes
,
399 "test that kdebug trigger samples on classes")
401 start_controlling_ktrace();
403 const uint32_t class_debugids
[] = {
404 KDBG_EVENTID(TRIGGER_CLASS
, 1, 1),
405 KDBG_EVENTID(TRIGGER_CLASS
, 2, 1),
406 KDBG_EVENTID(TRIGGER_CLASS_END
, 1, 1) | DBG_FUNC_END
,
407 KDBG_EVENTID(TRIGGER_CLASS_END
, 2, 1) | DBG_FUNC_END
,
410 expect_kdebug_trigger("C0xfe,C0xfdr", class_debugids
,
411 sizeof(class_debugids
) / sizeof(class_debugids
[0]));
415 T_DECL(kdebug_trigger_subclasses
,
416 "test that kdebug trigger samples on subclasses")
418 start_controlling_ktrace();
420 const uint32_t subclass_debugids
[] = {
421 KDBG_EVENTID(TRIGGER_CLASS
, TRIGGER_SUBCLASS
, 0),
422 KDBG_EVENTID(TRIGGER_CLASS
, TRIGGER_SUBCLASS
, 1),
423 KDBG_EVENTID(TRIGGER_CLASS_END
, TRIGGER_SUBCLASS
, 0) | DBG_FUNC_END
,
424 KDBG_EVENTID(TRIGGER_CLASS_END
, TRIGGER_SUBCLASS
, 1) | DBG_FUNC_END
427 expect_kdebug_trigger("S0xfeff,S0xfdffr", subclass_debugids
,
428 sizeof(subclass_debugids
) / sizeof(subclass_debugids
[0]));
432 T_DECL(kdebug_trigger_debugids
,
433 "test that kdebug trigger samples on debugids")
435 start_controlling_ktrace();
437 const uint32_t debugids
[] = {
441 expect_kdebug_trigger("D0xfeff0000", debugids
,
442 sizeof(debugids
) / sizeof(debugids
[0]));
447 * TODO Set a single function specifier filter, expect not to trigger of all
448 * events from that class.
457 T_DECL(kdbg_callstacks
,
458 "test that the kdbg_callstacks samples on syscalls")
460 start_controlling_ktrace();
463 __block
bool saw_user_stack
= false;
465 s
= ktrace_session_create();
466 T_ASSERT_NOTNULL(s
, NULL
);
469 * Make sure BSD events are traced in order to trigger samples on syscalls.
471 ktrace_events_class(s
, DBG_BSD
, ^void (__unused
struct trace_point
*tp
) {});
473 ktrace_events_single(s
, PERF_STK_UHDR
, ^(__unused
struct trace_point
*tp
) {
474 saw_user_stack
= true;
478 ktrace_set_completion_handler(s
, ^{
479 ktrace_session_destroy(s
);
481 T_EXPECT_TRUE(saw_user_stack
,
482 "saw user stack after configuring kdbg_callstacks");
486 #pragma clang diagnostic push
487 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
488 T_ASSERT_POSIX_SUCCESS(kperf_kdbg_callstacks_set(1), NULL
);
489 #pragma clang diagnostic pop
490 T_ATEND(reset_kperf
);
492 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
494 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 10 * NSEC_PER_SEC
),
495 dispatch_get_main_queue(), ^(void) {
504 #define STACKS_WAIT_DURATION_NS (3 * NSEC_PER_SEC)
507 expect_stacks_traced(void (^cb
)(void))
511 s
= ktrace_session_create();
512 T_QUIET
; T_ASSERT_NOTNULL(s
, "ktrace_session_create");
514 __block
unsigned int user_stacks
= 0;
515 __block
unsigned int kernel_stacks
= 0;
517 ktrace_events_single(s
, PERF_STK_UHDR
, ^(__unused
struct trace_point
*tp
) {
520 ktrace_events_single(s
, PERF_STK_KHDR
, ^(__unused
struct trace_point
*tp
) {
524 ktrace_set_completion_handler(s
, ^(void) {
525 ktrace_session_destroy(s
);
526 T_EXPECT_GT(user_stacks
, 0U, NULL
);
527 T_EXPECT_GT(kernel_stacks
, 0U, NULL
);
531 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL
);
533 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
535 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, STACKS_WAIT_DURATION_NS
),
536 dispatch_get_main_queue(), ^(void)
543 T_DECL(pet
, "test that PET mode samples kernel and user stacks")
545 start_controlling_ktrace();
547 configure_kperf_stacks_timer(-1, 10);
548 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL
);
550 expect_stacks_traced(^(void) {
557 T_DECL(lightweight_pet
,
558 "test that lightweight PET mode samples kernel and user stacks",
561 start_controlling_ktrace();
565 configure_kperf_stacks_timer(-1, 10);
566 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL
, NULL
,
567 &set
, sizeof(set
)), NULL
);
568 T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL
);
570 expect_stacks_traced(^(void) {
577 T_DECL(pet_stress
, "repeatedly enable and disable PET mode")
579 start_controlling_ktrace();
583 configure_kperf_stacks_timer(-1, 10);
584 T_QUIET
; T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL
);
591 T_DECL(timer_stress
, "repeatedly enable and disable timers")
593 start_controlling_ktrace();
597 configure_kperf_stacks_timer(-1, 1);
604 T_DECL(pmc_config_only
, "shouldn't show PMC config events unless requested")
606 start_controlling_ktrace();
608 __block
bool saw_kpc_config
= false;
609 __block
bool saw_kpc_reg
= false;
611 ktrace_session_t s
= ktrace_session_create();
612 T_ASSERT_NOTNULL(s
, "ktrace_session_create");
615 * Make sure BSD events are traced in order to trigger samples on syscalls.
617 ktrace_events_single(s
, PERF_KPC_CONFIG
,
618 ^(__unused
struct trace_point
*tp
) {
619 saw_kpc_config
= true;
621 ktrace_events_single(s
, PERF_KPC_REG
,
622 ^(__unused
struct trace_point
*tp
) {
625 ktrace_events_single(s
, PERF_KPC_REG32
,
626 ^(__unused
struct trace_point
*tp
) {
630 ktrace_set_completion_handler(s
, ^{
631 ktrace_session_destroy(s
);
632 T_EXPECT_FALSE(saw_kpc_config
,
633 "should see no KPC configs without sampler enabled");
634 T_EXPECT_FALSE(saw_kpc_reg
,
635 "should see no KPC registers without sampler enabled");
639 uint32_t nconfigs
= kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK
);
640 uint64_t *config
= calloc(nconfigs
, sizeof(*config
));
642 int ret
= kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK
, config
);
643 T_ASSERT_POSIX_SUCCESS(ret
, "configured kpc");
645 T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_CONFIGURABLE_MASK
),
648 (void)kperf_action_count_set(1);
649 T_ATEND(reset_kperf
);
651 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_PMC_CPU
),
654 (void)kperf_timer_count_set(1);
656 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
657 kperf_ns_to_ticks(TIMER_PERIOD_NS
)), NULL
);
659 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL
);
661 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
663 T_ASSERT_POSIX_ZERO(ktrace_start(s
, dispatch_get_main_queue()), NULL
);
665 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 10 * NSEC_PER_SEC
),
666 dispatch_get_main_queue(), ^(void) {
674 skip_if_monotonic_unsupported(void)
678 size_t supported_size
= sizeof(supported
);
680 r
= sysctlbyname("kern.monotonic.supported", &supported
, &supported_size
,
684 T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
688 T_SKIP("monotonic is not supported on this platform");
692 #define INSTRS_CYCLES_UPPER 500
693 #define INSTRS_CYCLES_LOWER 50
695 T_DECL(instrs_cycles
, "ensure instructions and cycles are sampled")
697 skip_if_monotonic_unsupported();
699 start_controlling_ktrace();
701 ktrace_session_t sess
= ktrace_session_create();
703 __block
uint64_t ninstrs_cycles
= 0;
704 __block
uint64_t nzeroes
= 0;
705 ktrace_events_single(sess
, PERF_INSTR_DATA
,
706 ^(__unused
struct trace_point
*tp
) {
709 T_LOG("%llx (%s)\n", tp
->threadid
, tp
->command
);
712 if (ninstrs_cycles
>= INSTRS_CYCLES_UPPER
) {
717 ktrace_set_collection_interval(sess
, 200);
719 ktrace_set_completion_handler(sess
, ^{
720 T_EXPECT_GE(ninstrs_cycles
, (uint64_t)INSTRS_CYCLES_LOWER
,
721 "saw enough instructions and cycles events");
722 T_EXPECT_EQ(nzeroes
, UINT64_C(0),
723 "saw no events with 0 instructions");
727 (void)kperf_action_count_set(1);
728 T_ATEND(reset_kperf
);
730 T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
731 KPERF_SAMPLER_TH_INSTRS_CYCLES
), NULL
);
733 (void)kperf_timer_count_set(1);
735 T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
736 kperf_ns_to_ticks(TIMER_PERIOD_NS
)), NULL
);
738 T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL
);
740 T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
742 T_ASSERT_POSIX_ZERO(ktrace_start(sess
, dispatch_get_main_queue()),
745 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, 10 * NSEC_PER_SEC
),
746 dispatch_get_main_queue(), ^(void) {