]> git.saurik.com Git - apple/xnu.git/blob - tests/kpc.c
b9e45a30ce75cbf2375357c243f984ddeb9b5c36
[apple/xnu.git] / tests / kpc.c
1 // Copyright (c) 2018-2020 Apple Inc. All rights reserved.
2
3 #include <darwintest.h>
4 #include <ktrace/config.h>
5 #include <ktrace/session.h>
6 #include <inttypes.h>
7 #include <libproc.h>
8 #include <pthread.h>
9 #include <stdint.h>
10 #include <sys/resource.h>
11 #include <sys/sysctl.h>
12
13 #include <kperf/kpc.h>
14 #include <kperf/kperf.h>
15
16 #include "ktrace_helpers.h"
17 #include "kperf_helpers.h"
18
19 T_GLOBAL_META(
20 T_META_NAMESPACE("xnu.ktrace"),
21 T_META_ASROOT(true),
22 T_META_CHECK_LEAKS(false));
23
24 struct machine {
25 unsigned int ncpus;
26 unsigned int nfixed;
27 unsigned int nconfig;
28 };
29
30 static void
31 skip_if_unsupported(void)
32 {
33 int r;
34 int supported = 0;
35 size_t supported_size = sizeof(supported);
36
37 r = sysctlbyname("kern.monotonic.supported", &supported, &supported_size,
38 NULL, 0);
39 if (r < 0) {
40 T_WITH_ERRNO;
41 T_SKIP("could not find \"kern.monotonic.supported\" sysctl");
42 }
43
44 if (!supported) {
45 T_SKIP("PMCs are not supported on this platform");
46 }
47 }
48
49 static struct rusage_info_v4 pre_ru = {};
50
51 static void
52 start_kpc(void)
53 {
54 T_SETUPBEGIN;
55
56 kpc_classmask_t classes = KPC_CLASS_FIXED_MASK |
57 KPC_CLASS_CONFIGURABLE_MASK;
58 int ret = kpc_set_counting(classes);
59 T_ASSERT_POSIX_SUCCESS(ret, "started counting");
60
61 ret = proc_pid_rusage(getpid(), RUSAGE_INFO_V4, (rusage_info_t *)&pre_ru);
62 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "got rusage information");
63
64 kpc_classmask_t classes_on = kpc_get_counting();
65 T_QUIET;
66 T_ASSERT_EQ(classes, classes_on, "classes counting is correct");
67
68 T_SETUPEND;
69 }
70
71 static void kpc_reset_atend(void);
72
73 #if defined(__arm__) || defined(__arm64__)
74 #define CYCLES_EVENT 0x02
75 #else // defined(__arm__) || defined(__arm64__)
76 #define CYCLES_EVENT (0x10000 | 0x20000 | 0x3c)
77 #endif // !defined(__arm__) && !defined(__arm64__)
78
79 static void
80 prepare_kpc(struct machine *mch, bool config, bool reset)
81 {
82 T_SETUPBEGIN;
83
84 if (!reset) {
85 T_ATEND(kpc_reset_atend);
86 }
87
88 size_t ncpus_sz = sizeof(mch->ncpus);
89 int ret = sysctlbyname("hw.logicalcpu_max", &mch->ncpus, &ncpus_sz,
90 NULL, 0);
91 T_QUIET;
92 T_ASSERT_POSIX_SUCCESS(ret, "sysctlbyname(hw.logicalcpu_max)");
93 T_QUIET;
94 T_ASSERT_GT(mch->ncpus, 0, "must have some number of CPUs");
95
96 ret = kpc_force_all_ctrs_set(1);
97 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kpc_force_all_ctrs_set(1)");
98
99 int forcing = 0;
100 ret = kpc_force_all_ctrs_get(&forcing);
101 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kpc_force_all_ctrs_get");
102 T_QUIET; T_ASSERT_EQ(forcing, 1, "counters must be forced");
103
104 mch->nfixed = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
105 mch->nconfig = kpc_get_counter_count(KPC_CLASS_CONFIGURABLE_MASK);
106
107 T_LOG("machine: ncpus = %d, nfixed = %d, nconfig = %d", mch->ncpus,
108 mch->nfixed, mch->nconfig);
109
110 if (config) {
111 uint32_t nconfigs = kpc_get_config_count(
112 KPC_CLASS_CONFIGURABLE_MASK);
113 uint64_t *configs = calloc(nconfigs, sizeof(*configs));
114 T_QUIET; T_ASSERT_NOTNULL(configs, "allocated config words");
115
116 for (unsigned int i = 0; i < nconfigs; i++) {
117 configs[i] = reset ? 0 : CYCLES_EVENT;
118 }
119
120 ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, configs);
121 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kpc_set_config");
122 }
123
124 T_SETUPEND;
125 }
126
127 static void
128 kpc_reset_atend(void)
129 {
130 struct machine mch = {};
131 prepare_kpc(&mch, true, true);
132 uint64_t *periods = calloc(mch.nconfig, sizeof(*periods));
133 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(periods, "allocate periods array");
134
135 int ret = kpc_set_period(KPC_CLASS_CONFIGURABLE_MASK, periods);
136 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kpc_set_period");
137 free(periods);
138 }
139
140 static void *
141 spin(void *arg)
142 {
143 while (*(volatile int *)arg == 0) {
144 ;
145 }
146
147 return NULL;
148 }
149
150 static pthread_t *
151 start_threads(const struct machine *mch, void *(*func)(void *), void *arg)
152 {
153 T_SETUPBEGIN;
154
155 pthread_t *threads = calloc((unsigned int)mch->ncpus,
156 sizeof(*threads));
157 T_QUIET; T_ASSERT_NOTNULL(threads, "allocated array of threads");
158 for (unsigned int i = 0; i < mch->ncpus; i++) {
159 int error = pthread_create(&threads[i], NULL, func, arg);
160 T_QUIET; T_ASSERT_POSIX_ZERO(error, "pthread_create");
161 }
162
163 T_SETUPEND;
164
165 return threads;
166 }
167
168 static void
169 end_threads(const struct machine *mch, pthread_t *threads)
170 {
171 for (unsigned int i = 0; i < mch->ncpus; i++) {
172 int error = pthread_join(threads[i], NULL);
173 T_QUIET; T_ASSERT_POSIX_ZERO(error, "joined thread %d", i);
174 }
175 free(threads);
176 }
177
178 struct tally {
179 uint64_t firstvalue;
180 uint64_t lastvalue;
181 uint64_t nchecks;
182 uint64_t nzero;
183 uint64_t nstuck;
184 uint64_t ndecrease;
185 };
186
187 static void
188 check_counters(unsigned int ncpus, unsigned int nctrs, struct tally *tallies,
189 uint64_t *counts)
190 {
191 for (unsigned int i = 0; i < ncpus; i++) {
192 for (unsigned int j = 0; j < nctrs; j++) {
193 unsigned int ctr = i * nctrs + j;
194 struct tally *tly = &tallies[ctr];
195 uint64_t count = counts[ctr];
196
197 if (counts[ctr] == 0) {
198 tly->nzero++;
199 }
200 if (tly->lastvalue == count) {
201 tly->nstuck++;
202 }
203 if (tly->lastvalue > count) {
204 tly->ndecrease++;
205 }
206 tly->lastvalue = count;
207 if (tly->nchecks == 0) {
208 tly->firstvalue = count;
209 }
210 tly->nchecks++;
211 }
212 }
213 }
214
215 static void
216 check_tally(const char *name, unsigned int ncpus, unsigned int nctrs,
217 struct tally *tallies)
218 {
219 for (unsigned int i = 0; i < ncpus; i++) {
220 for (unsigned int j = 0; j < nctrs; j++) {
221 unsigned int ctr = i * nctrs + j;
222 struct tally *tly = &tallies[ctr];
223
224 T_LOG("CPU %2u PMC %u: nchecks = %llu, last value = %llx, "
225 "delta = %llu, nstuck = %llu", i, j,
226 tly->nchecks, tly->lastvalue, tly->lastvalue - tly->firstvalue,
227 tly->nstuck);
228 T_QUIET; T_EXPECT_GT(tly->nchecks, 0ULL,
229 "checked that CPU %d %s counter %d values", i, name, j);
230 T_QUIET; T_EXPECT_EQ(tly->nzero, 0ULL,
231 "CPU %d %s counter %d value was zero", i, name, j);
232 T_QUIET; T_EXPECT_EQ(tly->nstuck, 0ULL,
233 "CPU %d %s counter %d value was stuck", i, name, j);
234 T_QUIET; T_EXPECT_EQ(tly->ndecrease, 0ULL,
235 "CPU %d %s counter %d value decreased", i, name, j);
236 }
237 }
238 }
239
240 #define TESTDUR_NS (5 * NSEC_PER_SEC)
241
242 T_DECL(kpc_cpu_direct_configurable,
243 "test that configurable counters return monotonically increasing values")
244 {
245 skip_if_unsupported();
246
247 struct machine mch = {};
248 prepare_kpc(&mch, true, false);
249
250 int until = 0;
251 pthread_t *threads = start_threads(&mch, spin, &until);
252 start_kpc();
253
254 T_SETUPBEGIN;
255
256 uint64_t startns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
257 uint64_t *counts = kpc_counterbuf_alloc();
258 T_QUIET; T_ASSERT_NOTNULL(counts, "allocated space for counter values");
259 memset(counts, 0, sizeof(*counts) * mch.ncpus * (mch.nfixed + mch.nconfig));
260 struct tally *tly = calloc(mch.ncpus * mch.nconfig, sizeof(*tly));
261 T_QUIET; T_ASSERT_NOTNULL(tly, "allocated space for tallies");
262
263 T_SETUPEND;
264
265 int n = 0;
266 while (clock_gettime_nsec_np(CLOCK_MONOTONIC) - startns < TESTDUR_NS) {
267 int ret = kpc_get_cpu_counters(true,
268 KPC_CLASS_CONFIGURABLE_MASK, NULL, counts);
269 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kpc_get_cpu_counters");
270
271 check_counters(mch.ncpus, mch.nconfig, tly, counts);
272
273 usleep(10000);
274 n++;
275 if (n % 100 == 0) {
276 T_LOG("checked 100 times");
277 }
278 }
279
280 check_tally("config", mch.ncpus, mch.nconfig, tly);
281
282 until = 1;
283 end_threads(&mch, threads);
284 }
285
286 T_DECL(kpc_thread_direct_instrs_cycles,
287 "test that fixed thread counters return monotonically increasing values")
288 {
289 int err;
290 uint32_t ctrs_cnt;
291 uint64_t *ctrs_a;
292 uint64_t *ctrs_b;
293
294 skip_if_unsupported();
295
296 T_SETUPBEGIN;
297
298 ctrs_cnt = kpc_get_counter_count(KPC_CLASS_FIXED_MASK);
299 if (ctrs_cnt == 0) {
300 T_SKIP("no fixed counters available");
301 }
302 T_LOG("device has %" PRIu32 " fixed counters", ctrs_cnt);
303
304 T_QUIET; T_ASSERT_POSIX_SUCCESS(kpc_force_all_ctrs_set(1), NULL);
305 T_ASSERT_POSIX_SUCCESS(kpc_set_counting(KPC_CLASS_FIXED_MASK),
306 "kpc_set_counting");
307 T_ASSERT_POSIX_SUCCESS(kpc_set_thread_counting(KPC_CLASS_FIXED_MASK),
308 "kpc_set_thread_counting");
309
310 T_SETUPEND;
311
312 ctrs_a = malloc(ctrs_cnt * sizeof(uint64_t));
313 T_QUIET; T_ASSERT_NOTNULL(ctrs_a, NULL);
314
315 err = kpc_get_thread_counters(0, ctrs_cnt, ctrs_a);
316 T_ASSERT_POSIX_SUCCESS(err, "kpc_get_thread_counters");
317
318 for (uint32_t i = 0; i < ctrs_cnt; i++) {
319 T_LOG("checking counter %d with value %" PRIu64 " > 0", i, ctrs_a[i]);
320 T_QUIET;
321 T_EXPECT_GT(ctrs_a[i], UINT64_C(0), "counter %d is non-zero", i);
322 }
323
324 ctrs_b = malloc(ctrs_cnt * sizeof(uint64_t));
325 T_QUIET; T_ASSERT_NOTNULL(ctrs_b, NULL);
326
327 err = kpc_get_thread_counters(0, ctrs_cnt, ctrs_b);
328 T_ASSERT_POSIX_SUCCESS(err, "kpc_get_thread_counters");
329
330 for (uint32_t i = 0; i < ctrs_cnt; i++) {
331 T_LOG("checking counter %d with value %" PRIu64
332 " > previous value %" PRIu64, i, ctrs_b[i], ctrs_a[i]);
333 T_QUIET;
334 T_EXPECT_GT(ctrs_b[i], UINT64_C(0), "counter %d is non-zero", i);
335 T_QUIET; T_EXPECT_LT(ctrs_a[i], ctrs_b[i],
336 "counter %d is increasing", i);
337 }
338
339 free(ctrs_a);
340 free(ctrs_b);
341 }
342
343 #define PMI_TEST_DURATION_NS (15 * NSEC_PER_SEC)
344 #define PERIODIC_CPU_COUNT_MS (250)
345 #define NTIMESLICES (72)
346 #define PMI_PERIOD (50ULL * 1000 * 1000)
347 #define END_EVENT KDBG_EVENTID(0xfe, 0xfe, 0)
348
349 struct cpu {
350 uint64_t prev_count, max_skid;
351 unsigned int timeslices[NTIMESLICES];
352 };
353
354 T_DECL(kpc_pmi_configurable,
355 "test that PMIs don't interfere with sampling counters in kperf")
356 {
357 skip_if_unsupported();
358
359 start_controlling_ktrace();
360 struct machine mch = {};
361 prepare_kpc(&mch, true, false);
362
363 T_SETUPBEGIN;
364
365 uint64_t *periods = calloc(mch.nconfig, sizeof(*periods));
366 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(periods, "allocate periods array");
367 periods[0] = PMI_PERIOD;
368
369 int ret = kpc_set_period(KPC_CLASS_CONFIGURABLE_MASK, periods);
370 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kpc_set_period");
371 free(periods);
372
373 int32_t *actions = calloc(mch.nconfig, sizeof(*actions));
374 actions[0] = 1;
375 ret = kpc_set_actionid(KPC_CLASS_CONFIGURABLE_MASK, actions);
376 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kpc_set_actionid");
377 free(actions);
378
379 (void)kperf_action_count_set(1);
380 ret = kperf_action_samplers_set(1, KPERF_SAMPLER_TINFO);
381 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "kperf_action_samplers_set");
382
383 ktrace_config_t ktconfig = ktrace_config_create_current();
384 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(ktconfig, "create current config");
385 ret = ktrace_config_print_description(ktconfig, stdout);
386 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "print config description");
387
388 struct cpu *cpus = calloc(mch.ncpus, sizeof(*cpus));
389 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(cpus, "allocate CPUs array");
390
391 __block unsigned int nsamples = 0;
392 __block uint64_t first_ns = 0;
393 __block uint64_t last_ns = 0;
394
395 ktrace_session_t sess = ktrace_session_create();
396 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(sess, "ktrace_session_create");
397
398 ktrace_events_single(sess, PERF_KPC_PMI, ^(struct trace_point *tp) {
399 if (tp->debugid & DBG_FUNC_END) {
400 return;
401 }
402
403 uint64_t cur_ns = 0;
404 int cret = ktrace_convert_timestamp_to_nanoseconds(sess,
405 tp->timestamp, &cur_ns);
406 T_QUIET; T_ASSERT_POSIX_ZERO(cret, "convert timestamp");
407
408 uint64_t desc = tp->arg1;
409 uint64_t config = desc & UINT32_MAX;
410 T_QUIET; T_EXPECT_EQ(config & UINT8_MAX,
411 (uint64_t)CYCLES_EVENT & UINT8_MAX,
412 "PMI argument matches configuration");
413 __unused uint64_t counter = (desc >> 32) & UINT16_MAX;
414 __unused uint64_t flags = desc >> 48;
415
416 uint64_t count = tp->arg2;
417 if (first_ns == 0) {
418 first_ns = cur_ns;
419 }
420 struct cpu *cpu = &cpus[tp->cpuid];
421
422 if (cpu->prev_count != 0) {
423 uint64_t delta = count - cpu->prev_count;
424 uint64_t skid = delta - PMI_PERIOD;
425 if (skid > cpu->max_skid) {
426 cpu->max_skid = skid;
427 }
428 }
429 cpu->prev_count = count;
430
431 __unused uint64_t pc = tp->arg3;
432
433 double slice = (double)(cur_ns - first_ns) / PMI_TEST_DURATION_NS *
434 NTIMESLICES;
435 if (slice < NTIMESLICES) {
436 cpu->timeslices[(unsigned int)slice] += 1;
437 }
438
439 nsamples++;
440 });
441
442 ktrace_events_single(sess, END_EVENT, ^(struct trace_point *tp __unused) {
443 int cret = ktrace_convert_timestamp_to_nanoseconds(sess,
444 tp->timestamp, &last_ns);
445 T_QUIET; T_ASSERT_POSIX_ZERO(cret, "convert timestamp");
446
447 ktrace_end(sess, 1);
448 });
449
450 uint64_t *counts = kpc_counterbuf_alloc();
451 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(counts,
452 "allocated counter values array");
453 memset(counts, 0, sizeof(*counts) * mch.ncpus * (mch.nfixed + mch.nconfig));
454 struct tally *tly = calloc(mch.ncpus * (mch.nconfig + mch.nfixed),
455 sizeof(*tly));
456 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(tly, "allocated tallies array");
457
458 dispatch_source_t cpu_count_timer = dispatch_source_create(
459 DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_get_main_queue());
460 dispatch_source_set_timer(cpu_count_timer, dispatch_time(DISPATCH_TIME_NOW,
461 PERIODIC_CPU_COUNT_MS * NSEC_PER_MSEC),
462 PERIODIC_CPU_COUNT_MS * NSEC_PER_MSEC, 0);
463 dispatch_source_set_cancel_handler(cpu_count_timer, ^{
464 dispatch_release(cpu_count_timer);
465 });
466
467 __block uint64_t first_check_ns = 0;
468 __block uint64_t last_check_ns = 0;
469
470 dispatch_source_set_event_handler(cpu_count_timer, ^{
471 int cret = kpc_get_cpu_counters(true,
472 KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK, NULL, counts);
473 T_QUIET; T_ASSERT_POSIX_SUCCESS(cret, "kpc_get_cpu_counters");
474
475 if (!first_check_ns) {
476 first_check_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
477 } else {
478 last_check_ns = clock_gettime_nsec_np(CLOCK_MONOTONIC);
479 }
480 check_counters(mch.ncpus, mch.nfixed + mch.nconfig, tly, counts);
481 });
482
483 int stop = 0;
484 (void)start_threads(&mch, spin, &stop);
485
486 ktrace_set_completion_handler(sess, ^{
487 dispatch_cancel(cpu_count_timer);
488
489 check_tally("config", mch.ncpus, mch.nfixed + mch.nconfig, tly);
490
491 struct rusage_info_v4 post_ru = {};
492 int ruret = proc_pid_rusage(getpid(), RUSAGE_INFO_V4,
493 (rusage_info_t *)&post_ru);
494 T_QUIET; T_ASSERT_POSIX_SUCCESS(ruret, "got rusage information");
495
496 T_LOG("saw %llu cycles in process", post_ru.ri_cycles - pre_ru.ri_cycles);
497 uint64_t total = 0;
498
499 unsigned int nsamplecpus = 0;
500 char sample_slices[NTIMESLICES + 1];
501 sample_slices[NTIMESLICES] = '\0';
502 for (unsigned int i = 0; i < mch.ncpus; i++) {
503 memset(sample_slices, '.', sizeof(sample_slices) - 1);
504
505 struct cpu *cpu = &cpus[i];
506 unsigned int nsampleslices = 0, ncpusamples = 0,
507 last_contiguous = 0;
508 bool seen_empty = false;
509 for (unsigned int j = 0; j < NTIMESLICES; j++) {
510 unsigned int nslice = cpu->timeslices[j];
511 nsamples += nslice;
512 ncpusamples += nslice;
513 if (nslice > 0) {
514 nsampleslices++;
515 sample_slices[j] = '*';
516 } else {
517 seen_empty = true;
518 }
519 if (!seen_empty) {
520 last_contiguous = j;
521 }
522 }
523 unsigned int ctr = i * (mch.nfixed + mch.nconfig) + mch.nfixed;
524 uint64_t delta = tly[ctr].lastvalue - tly[ctr].firstvalue;
525 T_LOG("%g GHz", (double)delta / (last_check_ns - first_check_ns));
526 total += delta;
527 T_LOG("CPU %2u: %4u/%u, %6u/%llu, max skid = %llu (%.1f%%), "
528 "last contiguous = %u", i,
529 nsampleslices, NTIMESLICES, ncpusamples, delta / PMI_PERIOD,
530 cpu->max_skid, (double)cpu->max_skid / PMI_PERIOD * 100,
531 last_contiguous);
532 T_LOG("%s", sample_slices);
533 if (nsampleslices > 0) {
534 nsamplecpus++;
535 }
536 T_EXPECT_EQ(last_contiguous, NTIMESLICES - 1,
537 "CPU %2u: saw samples in each time slice", i);
538 }
539 T_LOG("kpc reported %llu total cycles", total);
540 T_LOG("saw %u sample events, across %u/%u cpus", nsamples, nsamplecpus,
541 mch.ncpus);
542 T_END;
543 });
544
545 int dbglvl = 3;
546 ret = sysctlbyname("kperf.debug_level", NULL, NULL, &dbglvl,
547 sizeof(dbglvl));
548 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "set kperf debug level");
549 ret = kperf_sample_set(1);
550 T_ASSERT_POSIX_SUCCESS(ret, "kperf_sample_set");
551
552 start_kpc();
553
554 int error = ktrace_start(sess, dispatch_get_main_queue());
555 T_ASSERT_POSIX_ZERO(error, "started tracing");
556
557 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, PMI_TEST_DURATION_NS),
558 dispatch_get_main_queue(), ^{
559 T_LOG("ending tracing after timeout");
560 kdebug_trace(END_EVENT, 0, 0, 0, 0);
561 });
562
563 dispatch_activate(cpu_count_timer);
564
565 T_SETUPEND;
566
567 dispatch_main();
568 }
569
570 static int g_prev_disablewl = 0;
571
572 static void
573 whitelist_atend(void)
574 {
575 int ret = sysctlbyname("kpc.disable_whitelist", NULL, NULL,
576 &g_prev_disablewl, sizeof(g_prev_disablewl));
577 if (ret < 0) {
578 T_LOG("failed to reset whitelist: %d (%s)", errno, strerror(errno));
579 }
580 }
581
582 T_DECL(kpc_whitelist, "ensure kpc's whitelist is filled out")
583 {
584 // This policy only applies to arm64 devices.
585 #if defined(__arm64__)
586 // Start enforcing the whitelist.
587 int set = 0;
588 size_t getsz = sizeof(g_prev_disablewl);
589 int ret = sysctlbyname("kpc.disable_whitelist", &g_prev_disablewl, &getsz,
590 &set, sizeof(set));
591 if (ret < 0 && errno == ENOENT) {
592 T_SKIP("kpc not running with a whitelist, or RELEASE kernel");
593 }
594
595 T_ASSERT_POSIX_SUCCESS(ret, "started enforcing the event whitelist");
596 T_ATEND(whitelist_atend);
597
598 uint32_t nconfigs = kpc_get_config_count(KPC_CLASS_CONFIGURABLE_MASK);
599 uint64_t *config = calloc(nconfigs, sizeof(*config));
600
601 // Check that events in the whitelist are allowed. CORE_CYCLE (0x2) is
602 // always present in the whitelist.
603 config[0] = 0x02;
604 ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config);
605 T_ASSERT_POSIX_SUCCESS(ret, "configured kpc to count cycles");
606
607 // Check that non-event bits are ignored by the whitelist.
608 config[0] = 0x102;
609 ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config);
610 T_ASSERT_POSIX_SUCCESS(ret,
611 "configured kpc to count cycles with non-event bits set");
612
613 // Check that configurations of non-whitelisted events fail.
614 config[0] = 0xfe;
615 ret = kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config);
616 T_ASSERT_POSIX_FAILURE(ret, EPERM,
617 "shouldn't allow arbitrary events with whitelist enabled");
618
619 // Clean up the configuration.
620 config[0] = 0;
621 (void)kpc_set_config(KPC_CLASS_CONFIGURABLE_MASK, config);
622
623 free(config);
624 #else /* defined(__arm64__) */
625 T_SKIP("kpc whitelist is only enforced on arm64")
626 #endif /* !defined(__arm64__) */
627 }