#include <ktrace/session.h>
#include <ktrace/private.h>
#include <System/sys/kdebug.h>
+#include <kperf/kpc.h>
#include <kperf/kperf.h>
#include <kperfdata/kpdecode.h>
#include <os/assumes.h>
#include <sys/sysctl.h>
#include "kperf_helpers.h"
+#include "ktrace_helpers.h"
T_GLOBAL_META(
- T_META_NAMESPACE("xnu.kperf"),
- T_META_CHECK_LEAKS(false));
+ T_META_NAMESPACE("xnu.kperf"),
+ T_META_CHECK_LEAKS(false),
+ T_META_ASROOT(true));
#define MAX_CPUS 64
#define MAX_THREADS 64
T_ASSERT_NOTNULL(semp, "semaphore passed to thread should not be NULL");
dispatch_semaphore_signal(*(dispatch_semaphore_t *)semp);
- while (running_threads);
+ while (running_threads) {
+ ;
+ }
return NULL;
}
-#define PERF_STK_KHDR UINT32_C(0x25020014)
-#define PERF_STK_UHDR UINT32_C(0x25020018)
-#define PERF_TMR_FIRE KDBG_EVENTID(DBG_PERF, 3, 0)
-#define PERF_TMR_HNDLR KDBG_EVENTID(DBG_PERF, 3, 2)
-#define PERF_TMR_PEND KDBG_EVENTID(DBG_PERF, 3, 3)
-#define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4)
+#define PERF_STK_KHDR UINT32_C(0x25020014)
+#define PERF_STK_UHDR UINT32_C(0x25020018)
+#define PERF_TMR_FIRE KDBG_EVENTID(DBG_PERF, 3, 0)
+#define PERF_TMR_HNDLR KDBG_EVENTID(DBG_PERF, 3, 2)
+#define PERF_TMR_PEND KDBG_EVENTID(DBG_PERF, 3, 3)
+#define PERF_TMR_SKIP KDBG_EVENTID(DBG_PERF, 3, 4)
+#define PERF_KPC_CONFIG KDBG_EVENTID(DBG_PERF, 6, 4)
+#define PERF_KPC_REG KDBG_EVENTID(DBG_PERF, 6, 5)
+#define PERF_KPC_REG32 KDBG_EVENTID(DBG_PERF, 6, 7)
+#define PERF_INSTR_DATA KDBG_EVENTID(DBG_PERF, 1, 17)
#define SCHED_HANDOFF KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, \
- MACH_STACK_HANDOFF)
+ MACH_STACK_HANDOFF)
#define SCHED_SWITCH KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_SCHED)
#define SCHED_IDLE KDBG_EVENTID(DBG_MACH, DBG_MACH_SCHED, MACH_IDLE)
#define TIMER_PERIOD_NS (1 * NSEC_PER_MSEC)
-static void
-reset_ktrace(void)
-{
- kperf_reset();
-}
-
/*
* Ensure that kperf is correctly IPIing CPUs that are actively scheduling by
* bringing up threads and ensuring that threads on-core are sampled by each
*/
T_DECL(ipi_active_cpus,
- "make sure that kperf IPIs all active CPUs",
- T_META_ASROOT(true))
+ "make sure that kperf IPIs all active CPUs")
{
+ start_controlling_ktrace();
+
int ncpus = dt_ncpu();
T_QUIET;
T_ASSERT_LT(ncpus, MAX_CPUS,
- "only supports up to %d CPUs", MAX_CPUS);
+ "only supports up to %d CPUs", MAX_CPUS);
T_LOG("found %d CPUs", ncpus);
int nthreads = ncpus - 1;
T_QUIET;
T_ASSERT_LT(nthreads, MAX_THREADS,
- "only supports up to %d threads", MAX_THREADS);
+ "only supports up to %d threads", MAX_THREADS);
static pthread_t threads[MAX_THREADS];
for (int i = 0; i < nthreads; i++) {
T_QUIET;
T_ASSERT_POSIX_ZERO(
- pthread_create(&threads[i], NULL, &spinning_thread,
- &thread_spinning), NULL);
+ pthread_create(&threads[i], NULL, &spinning_thread,
+ &thread_spinning), NULL);
dispatch_semaphore_wait(thread_spinning, DISPATCH_TIME_FOREVER);
}
*/
ktrace_events_single(s, DISPATCH_AFTER_EVENT,
- ^(__unused struct trace_point *tp)
+ ^(__unused struct trace_point *tp)
{
dispatch_after(dispatch_time(DISPATCH_TIME_NOW,
- TIMEOUT_SECS * NSEC_PER_SEC), q, ^{
+ TIMEOUT_SECS * NSEC_PER_SEC), q, ^{
ktrace_end(s, 0);
});
});
running_threads = false;
for (int i = 0; i < nthreads; i++) {
- T_QUIET;
- T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL);
+ T_QUIET;
+ T_ASSERT_POSIX_ZERO(pthread_join(threads[i], NULL), NULL);
}
for (int i = 0; i < nidles; i++) {
- T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]);
+ T_LOG("CPU %d idle thread: %#" PRIx64, i, idle_tids[i]);
}
T_LOG("saw %" PRIu64 " timer fires, %" PRIu64 " samples, "
- "%g samples/fire", nfires, nsamples,
- (double)nsamples / (double)nfires);
+ "%g samples/fire", nfires, nsamples,
+ (double)nsamples / (double)nfires);
T_END;
});
tids_on_cpu[tp->cpuid] = 0;
for (int i = 0; i < nidles; i++) {
- if (idle_tids[i] == idle_thread) {
- return;
+ if (idle_tids[i] == idle_thread) {
+ return;
}
}
nexpected = 0;
for (int i = 0; i < ncpus; i++) {
- uint64_t i_bit = UINT64_C(1) << i;
- if (sample_missing & i_bit) {
- T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)",
- tp->cpuid, tids_snap[i], last_fire_cpu,
- xcall_mask, last_expected);
- sample_missing &= ~i_bit;
+ uint64_t i_bit = UINT64_C(1) << i;
+ if (sample_missing & i_bit) {
+ T_LOG("missed sample on CPU %d for thread %#llx from timer on CPU %d (xcall mask = %llx, expected %d samples)",
+ tp->cpuid, tids_snap[i], last_fire_cpu,
+ xcall_mask, last_expected);
+ sample_missing &= ~i_bit;
}
- if (tids_on_cpu[i] != 0) {
- tids_snap[i] = tids_on_cpu[i];
- sample_missing |= i_bit;
- nexpected++;
+ if (tids_on_cpu[i] != 0) {
+ tids_snap[i] = tids_on_cpu[i];
+ sample_missing |= i_bit;
+ nexpected++;
}
}
ktrace_events_single(s, MP_CPUS_CALL, ^(struct trace_point *tp) {
if (xcall_from_cpu != (int)tp->cpuid) {
- return;
+ return;
}
xcall_mask = tp->arg1;
ktrace_events_single(s, PERF_TMR_HNDLR, ^(struct trace_point *tp) {
nsamples++;
if ((int)tp->cpuid > ncpus) {
- /* skip IOPs; they're not scheduling our threads */
- return;
+ /* skip IOPs; they're not scheduling our threads */
+ return;
}
sample_missing &= ~(UINT64_C(1) << tp->cpuid);
(void)kperf_action_count_set(1);
T_QUIET;
T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_KSTACK),
- NULL);
+ NULL);
(void)kperf_timer_count_set(1);
T_QUIET;
T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
- kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
+ kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
T_QUIET;
T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
- T_ATEND(reset_ktrace);
T_ASSERT_POSIX_ZERO(ktrace_start(s,
- dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)),
- "start ktrace");
+ dispatch_get_global_queue(QOS_CLASS_USER_INITIATED, 0)),
+ "start ktrace");
kdebug_trace(DISPATCH_AFTER_EVENT, 0, 0, 0, 0);
#define KDEBUG_TRIGGER_TIMEOUT_NS (10 * NSEC_PER_SEC)
-#define NON_TRIGGER_CLASS UINT8_C(0xfd)
-#define NON_TRIGGER_SUBCLASS UINT8_C(0xff)
-#define NON_TRIGGER_CODE UINT8_C(0xff)
+#define NON_TRIGGER_CLASS UINT32_C(0xfd)
+#define NON_TRIGGER_SUBCLASS UINT32_C(0xff)
+#define NON_TRIGGER_CODE UINT32_C(0xff)
#define NON_TRIGGER_EVENT \
- (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \
- NON_TRIGGER_CODE))
+ (KDBG_EVENTID(NON_TRIGGER_CLASS, NON_TRIGGER_SUBCLASS, \
+ NON_TRIGGER_CODE))
static void
expect_kdebug_trigger(const char *filter_desc, const uint32_t *debugids,
- unsigned int n_debugids)
+ unsigned int n_debugids)
{
__block int missing_kernel_stacks = 0;
__block int missing_user_stacks = 0;
T_QUIET; T_ASSERT_NOTNULL(s, NULL);
ktrace_events_single(s, PERF_STK_KHDR, ^(struct trace_point *tp) {
- missing_kernel_stacks--;
- T_LOG("saw kernel stack with %lu frames, flags = %#lx", tp->arg2,
- tp->arg1);
- });
+ missing_kernel_stacks--;
+ T_LOG("saw kernel stack with %" PRIu64 " frames, flags = %#"
+ PRIx64, tp->arg2, tp->arg1);
+ });
ktrace_events_single(s, PERF_STK_UHDR, ^(struct trace_point *tp) {
- missing_user_stacks--;
- T_LOG("saw user stack with %lu frames, flags = %#lx", tp->arg2,
- tp->arg1);
- });
+ missing_user_stacks--;
+ T_LOG("saw user stack with %" PRIu64 " frames, flags = %#"
+ PRIx64, tp->arg2, tp->arg1);
+ });
for (unsigned int i = 0; i < n_debugids; i++) {
ktrace_events_single(s, debugids[i], ^(struct trace_point *tp) {
- missing_kernel_stacks++;
- missing_user_stacks++;
- T_LOG("saw event with debugid 0x%" PRIx32, tp->debugid);
- });
+ missing_kernel_stacks++;
+ missing_user_stacks++;
+ T_LOG("saw event with debugid 0x%" PRIx32, tp->debugid);
+ });
}
ktrace_events_single(s, NON_TRIGGER_EVENT,
- ^(__unused struct trace_point *tp)
- {
- ktrace_end(s, 0);
- });
+ ^(__unused struct trace_point *tp)
+ {
+ ktrace_end(s, 0);
+ });
ktrace_set_completion_handler(s, ^{
- T_EXPECT_LE(missing_kernel_stacks, 0, NULL);
- T_EXPECT_LE(missing_user_stacks, 0, NULL);
+ T_EXPECT_LE(missing_kernel_stacks, 0, NULL);
+ T_EXPECT_LE(missing_user_stacks, 0, NULL);
- ktrace_session_destroy(s);
- T_END;
- });
+ ktrace_session_destroy(s);
+ T_END;
+ });
/* configure kperf */
(void)kperf_action_count_set(1);
T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
- KPERF_SAMPLER_KSTACK | KPERF_SAMPLER_USTACK), NULL);
+ KPERF_SAMPLER_KSTACK | KPERF_SAMPLER_USTACK), NULL);
filter = kperf_kdebug_filter_create();
T_ASSERT_NOTNULL(filter, NULL);
T_ASSERT_POSIX_SUCCESS(kperf_kdebug_action_set(1), NULL);
T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_add_desc(filter, filter_desc),
- NULL);
+ NULL);
T_ASSERT_POSIX_SUCCESS(kperf_kdebug_filter_set(filter), NULL);
kperf_kdebug_filter_destroy(filter);
T_ASSERT_POSIX_SUCCESS(kdebug_trace(NON_TRIGGER_EVENT, 0, 0, 0, 0), NULL);
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, KDEBUG_TRIGGER_TIMEOUT_NS),
- dispatch_get_main_queue(), ^(void)
- {
- ktrace_end(s, 1);
- });
+ dispatch_get_main_queue(), ^(void)
+ {
+ ktrace_end(s, 1);
+ });
}
-#define TRIGGER_CLASS UINT8_C(0xfe)
-#define TRIGGER_CLASS_END UINT8_C(0xfd)
-#define TRIGGER_SUBCLASS UINT8_C(0xff)
-#define TRIGGER_CODE UINT8_C(0)
+#define TRIGGER_CLASS UINT32_C(0xfe)
+#define TRIGGER_CLASS_END UINT32_C(0xfd)
+#define TRIGGER_SUBCLASS UINT32_C(0xff)
+#define TRIGGER_CODE UINT32_C(0)
#define TRIGGER_DEBUGID \
- (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE))
+ (KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, TRIGGER_CODE))
T_DECL(kdebug_trigger_classes,
- "test that kdebug trigger samples on classes",
- T_META_ASROOT(true))
+ "test that kdebug trigger samples on classes")
{
+ start_controlling_ktrace();
+
const uint32_t class_debugids[] = {
KDBG_EVENTID(TRIGGER_CLASS, 1, 1),
KDBG_EVENTID(TRIGGER_CLASS, 2, 1),
};
expect_kdebug_trigger("C0xfe,C0xfdr", class_debugids,
- sizeof(class_debugids) / sizeof(class_debugids[0]));
+ sizeof(class_debugids) / sizeof(class_debugids[0]));
dispatch_main();
}
T_DECL(kdebug_trigger_subclasses,
- "test that kdebug trigger samples on subclasses",
- T_META_ASROOT(true))
+ "test that kdebug trigger samples on subclasses")
{
+ start_controlling_ktrace();
+
const uint32_t subclass_debugids[] = {
KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 0),
KDBG_EVENTID(TRIGGER_CLASS, TRIGGER_SUBCLASS, 1),
};
expect_kdebug_trigger("S0xfeff,S0xfdffr", subclass_debugids,
- sizeof(subclass_debugids) / sizeof(subclass_debugids[0]));
+ sizeof(subclass_debugids) / sizeof(subclass_debugids[0]));
dispatch_main();
}
T_DECL(kdebug_trigger_debugids,
- "test that kdebug trigger samples on debugids",
- T_META_ASROOT(true))
+ "test that kdebug trigger samples on debugids")
{
+ start_controlling_ktrace();
+
const uint32_t debugids[] = {
TRIGGER_DEBUGID
};
expect_kdebug_trigger("D0xfeff0000", debugids,
- sizeof(debugids) / sizeof(debugids[0]));
+ sizeof(debugids) / sizeof(debugids[0]));
dispatch_main();
}
* events from that class.
*/
+static void
+reset_kperf(void)
+{
+ (void)kperf_reset();
+}
+
T_DECL(kdbg_callstacks,
- "test that the kdbg_callstacks samples on syscalls",
- T_META_ASROOT(true))
+ "test that the kdbg_callstacks samples on syscalls")
{
+ start_controlling_ktrace();
+
ktrace_session_t s;
__block bool saw_user_stack = false;
/*
* Make sure BSD events are traced in order to trigger samples on syscalls.
*/
- ktrace_events_class(s, DBG_BSD, ^void(__unused struct trace_point *tp) {});
+ ktrace_events_class(s, DBG_BSD, ^void (__unused struct trace_point *tp) {});
ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) {
saw_user_stack = true;
ktrace_session_destroy(s);
T_EXPECT_TRUE(saw_user_stack,
- "saw user stack after configuring kdbg_callstacks");
+ "saw user stack after configuring kdbg_callstacks");
T_END;
});
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
T_ASSERT_POSIX_SUCCESS(kperf_kdbg_callstacks_set(1), NULL);
#pragma clang diagnostic pop
- T_ATEND(kperf_reset);
+ T_ATEND(reset_kperf);
T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
- dispatch_get_main_queue(), ^(void) {
+ dispatch_get_main_queue(), ^(void) {
ktrace_end(s, 1);
});
__block unsigned int kernel_stacks = 0;
ktrace_events_single(s, PERF_STK_UHDR, ^(__unused struct trace_point *tp) {
- user_stacks++;
- });
+ user_stacks++;
+ });
ktrace_events_single(s, PERF_STK_KHDR, ^(__unused struct trace_point *tp) {
- kernel_stacks++;
- });
+ kernel_stacks++;
+ });
ktrace_set_completion_handler(s, ^(void) {
- ktrace_session_destroy(s);
- T_EXPECT_GT(user_stacks, 0U, NULL);
- T_EXPECT_GT(kernel_stacks, 0U, NULL);
- cb();
- });
+ ktrace_session_destroy(s);
+ T_EXPECT_GT(user_stacks, 0U, NULL);
+ T_EXPECT_GT(kernel_stacks, 0U, NULL);
+ cb();
+ });
T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), NULL);
T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, STACKS_WAIT_DURATION_NS),
- dispatch_get_main_queue(), ^(void)
- {
- kperf_reset();
- ktrace_end(s, 0);
- });
+ dispatch_get_main_queue(), ^(void)
+ {
+ kperf_reset();
+ ktrace_end(s, 0);
+ });
}
-T_DECL(pet, "test that PET mode samples kernel and user stacks",
- T_META_ASROOT(true))
+T_DECL(pet, "test that PET mode samples kernel and user stacks")
{
+ start_controlling_ktrace();
+
configure_kperf_stacks_timer(-1, 10);
T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
expect_stacks_traced(^(void) {
- T_END;
- });
+ T_END;
+ });
dispatch_main();
}
T_DECL(lightweight_pet,
- "test that lightweight PET mode samples kernel and user stacks",
- T_META_ASROOT(true))
+ "test that lightweight PET mode samples kernel and user stacks",
+ T_META_ASROOT(true))
{
+ start_controlling_ktrace();
+
int set = 1;
configure_kperf_stacks_timer(-1, 10);
T_ASSERT_POSIX_SUCCESS(sysctlbyname("kperf.lightweight_pet", NULL, NULL,
- &set, sizeof(set)), NULL);
+ &set, sizeof(set)), NULL);
T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
expect_stacks_traced(^(void) {
- T_END;
- });
+ T_END;
+ });
dispatch_main();
}
+
+T_DECL(pet_stress, "repeatedly enable and disable PET mode")
+{
+ start_controlling_ktrace();
+
+ int niters = 1000;
+ while (niters--) {
+ configure_kperf_stacks_timer(-1, 10);
+ T_QUIET; T_ASSERT_POSIX_SUCCESS(kperf_timer_pet_set(0), NULL);
+ usleep(20);
+ kperf_reset();
+ }
+ ;
+}
+
+T_DECL(timer_stress, "repeatedly enable and disable timers")
+{
+ start_controlling_ktrace();
+
+ int niters = 1000;
+ while (niters--) {
+ configure_kperf_stacks_timer(-1, 1);
+ usleep(20);
+ kperf_reset();
+ }
+ ;
+}
+
+T_DECL(pmc_config_only, "shouldn't show PMC config events unless requested")
+{
+ start_controlling_ktrace();
+
+ __block bool saw_kpc_config = false;
+ __block bool saw_kpc_reg = false;
+
+ ktrace_session_t s = ktrace_session_create();
+ T_ASSERT_NOTNULL(s, "ktrace_session_create");
+
+ /*
+ * Make sure BSD events are traced in order to trigger samples on syscalls.
+ */
+ ktrace_events_single(s, PERF_KPC_CONFIG,
+ ^(__unused struct trace_point *tp) {
+ saw_kpc_config = true;
+ });
+ ktrace_events_single(s, PERF_KPC_REG,
+ ^(__unused struct trace_point *tp) {
+ saw_kpc_reg = true;
+ });
+ ktrace_events_single(s, PERF_KPC_REG32,
+ ^(__unused struct trace_point *tp) {
+ saw_kpc_reg = true;
+ });
+
+ ktrace_set_completion_handler(s, ^{
+ ktrace_session_destroy(s);
+ T_EXPECT_FALSE(saw_kpc_config,
+ "should see no KPC configs without sampler enabled");
+ T_EXPECT_FALSE(saw_kpc_reg,
+ "should see no KPC registers without sampler enabled");
+ T_END;
+ });
+
+ uint32_t nconfigs = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
+ uint64_t *config = calloc(nconfigs, sizeof(*config));
+ config[0] = 0x02;
+ int ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config);
+ T_ASSERT_POSIX_SUCCESS(ret, "configured kpc");
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_CONFIGURABLE_MASK),
+ "kpc_set_counting");
+
+ (void)kperf_action_count_set(1);
+ T_ATEND(reset_kperf);
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1, KPERF_SAMPLER_PMC_CPU),
+ NULL);
+
+ (void)kperf_timer_count_set(1);
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
+ kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
+
+ T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
+
+ T_ASSERT_POSIX_ZERO(ktrace_start(s, dispatch_get_main_queue()), NULL);
+
+ dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
+ dispatch_get_main_queue(), ^(void) {
+ ktrace_end(s, 1);
+ });
+
+ dispatch_main();
+}
+
+static void
+skip_if_monotonic_unsupported(void)
+{
+ int r;
+ int supported = 0;
+ size_t supported_size = sizeof(supported);
+
+ r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size,
+ NULL, 0);
+ if (r < 0) {
+ T_WITH_ERRNO;
+ T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
+ }
+
+ if (!supported) {
+ T_SKIP("monotonic is not supported on this platform");
+ }
+}
+
+#define INSTRS_CYCLES_UPPER 500
+#define INSTRS_CYCLES_LOWER 50
+
+T_DECL(instrs_cycles, "ensure instructions and cycles are sampled")
+{
+ skip_if_monotonic_unsupported();
+
+ start_controlling_ktrace();
+
+ ktrace_session_t sess = ktrace_session_create();
+
+ __block uint64_t ninstrs_cycles = 0;
+ __block uint64_t nzeroes = 0;
+ ktrace_events_single(sess, PERF_INSTR_DATA,
+ ^(__unused struct trace_point *tp) {
+ ninstrs_cycles++;
+ if (tp->arg1 == 0) {
+ T_LOG("%llx (%s)\n", tp->threadid, tp->command);
+ nzeroes++;
+ }
+ if (ninstrs_cycles >= INSTRS_CYCLES_UPPER) {
+ ktrace_end(sess, 1);
+ }
+ });
+
+ ktrace_set_collection_interval(sess, 200);
+
+ ktrace_set_completion_handler(sess, ^{
+ T_EXPECT_GE(ninstrs_cycles, (uint64_t)INSTRS_CYCLES_LOWER,
+ "saw enough instructions and cycles events");
+ T_EXPECT_EQ(nzeroes, UINT64_C(0),
+ "saw no events with 0 instructions");
+ T_END;
+ });
+
+ (void)kperf_action_count_set(1);
+ T_ATEND(reset_kperf);
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(kperf_action_samplers_set(1,
+ KPERF_SAMPLER_TH_INSTRS_CYCLES), NULL);
+
+ (void)kperf_timer_count_set(1);
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(kperf_timer_period_set(0,
+ kperf_ns_to_ticks(TIMER_PERIOD_NS)), NULL);
+ T_QUIET;
+ T_ASSERT_POSIX_SUCCESS(kperf_timer_action_set(0, 1), NULL);
+
+ T_ASSERT_POSIX_SUCCESS(kperf_sample_set(1), "start kperf sampling");
+
+ T_ASSERT_POSIX_ZERO(ktrace_start(sess, dispatch_get_main_queue()),
+ NULL);
+
+ dispatch_after(dispatch_time(DISPATCH_TIME_NOW, 10 * NSEC_PER_SEC),
+ dispatch_get_main_queue(), ^(void) {
+ ktrace_end(sess, 1);
+ });
+
+ dispatch_main();
+}
+