]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/kpc_x86.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / x86_64 / kpc_x86.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
39236c6e
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
39236c6e
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
39236c6e
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
39236c6e
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <i386/cpuid.h>
34#include <i386/proc_reg.h>
35#include <i386/mp.h>
39236c6e
A
36#include <sys/errno.h>
37#include <kperf/buffer.h>
38
39#include <kern/kpc.h>
40
41#include <kperf/kperf.h>
42#include <kperf/sample.h>
43#include <kperf/context.h>
44#include <kperf/action.h>
45
94ff46dc
A
46#include <kern/monotonic.h>
47
39236c6e
A
48/* Fixed counter mask -- three counters, each with OS and USER */
49#define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333)
50#define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888)
51
52#define IA32_PERFEVTSEL_PMI (1ull << 20)
53#define IA32_PERFEVTSEL_EN (1ull << 22)
54
55/* Non-serialising */
56#define USE_RDPMC
57
58#define RDPMC_FIXED_COUNTER_SELECTOR (1ULL<<30)
59
60/* track the last config we enabled */
3e170ce0
A
61static uint64_t kpc_running_cfg_pmc_mask = 0;
62static uint32_t kpc_running_classes = 0;
39236c6e
A
63
64/* PMC / MSR accesses */
65
66static uint64_t
67IA32_FIXED_CTR_CTRL(void)
68{
69 return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL );
70}
71
39236c6e
A
72#ifdef FIXED_COUNTER_RELOAD
73static void
74wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value)
75{
76 return wrmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr, value);
77}
78#endif
79
80static uint64_t
81IA32_PMCx(uint32_t ctr)
82{
83#ifdef USE_RDPMC
84 return rdpmc64(ctr);
85#else /* !USE_RDPMC */
86 return rdmsr64(MSR_IA32_PERFCTR0 + ctr);
87#endif /* !USE_RDPMC */
88}
89
90static void
91wrIA32_PMCx(uint32_t ctr, uint64_t value)
92{
93 return wrmsr64(MSR_IA32_PERFCTR0 + ctr, value);
94}
95
96static uint64_t
97IA32_PERFEVTSELx(uint32_t ctr)
98{
99 return rdmsr64(MSR_IA32_EVNTSEL0 + ctr);
100}
101
102static void
103wrIA32_PERFEVTSELx(uint32_t ctr, uint64_t value)
104{
105 wrmsr64(MSR_IA32_EVNTSEL0 + ctr, value);
106}
107
108
109/* internal functions */
110
111boolean_t
112kpc_is_running_fixed(void)
113{
3e170ce0 114 return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
39236c6e
A
115}
116
117boolean_t
3e170ce0 118kpc_is_running_configurable(uint64_t pmc_mask)
39236c6e 119{
3e170ce0
A
120 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
121 return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
122 ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
39236c6e
A
123}
124
125uint32_t
126kpc_fixed_count(void)
127{
0a7de745 128 i386_cpu_info_t *info = NULL;
39236c6e 129 info = cpuid_info();
39236c6e
A
130 return info->cpuid_arch_perf_leaf.fixed_number;
131}
132
133uint32_t
134kpc_configurable_count(void)
135{
0a7de745 136 i386_cpu_info_t *info = NULL;
39236c6e 137 info = cpuid_info();
39236c6e
A
138 return info->cpuid_arch_perf_leaf.number;
139}
140
141uint32_t
142kpc_fixed_config_count(void)
143{
144 return KPC_X86_64_FIXED_CONFIGS;
145}
146
147uint32_t
3e170ce0 148kpc_configurable_config_count(uint64_t pmc_mask)
39236c6e 149{
3e170ce0
A
150 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
151 return kpc_popcount(pmc_mask);
39236c6e
A
152}
153
fe8ab488
A
154uint32_t
155kpc_rawpmu_config_count(void)
156{
157 // RAW PMU access not implemented.
158 return 0;
159}
160
161int
162kpc_get_rawpmu_config(__unused kpc_config_t *configv)
163{
164 return 0;
165}
166
39236c6e
A
167static uint8_t
168kpc_fixed_width(void)
169{
0a7de745
A
170 i386_cpu_info_t *info = NULL;
171
39236c6e
A
172 info = cpuid_info();
173
174 return info->cpuid_arch_perf_leaf.fixed_width;
175}
176
177static uint8_t
178kpc_configurable_width(void)
179{
0a7de745 180 i386_cpu_info_t *info = NULL;
39236c6e
A
181
182 info = cpuid_info();
183
184 return info->cpuid_arch_perf_leaf.width;
185}
186
187uint64_t
188kpc_fixed_max(void)
189{
190 return (1ULL << kpc_fixed_width()) - 1;
191}
192
193uint64_t
194kpc_configurable_max(void)
195{
196 return (1ULL << kpc_configurable_width()) - 1;
197}
198
199#ifdef FIXED_COUNTER_SHADOW
200static uint64_t
201kpc_reload_fixed(int ctr)
202{
203 uint64_t old = IA32_FIXED_CTRx(ctr);
204 wrIA32_FIXED_CTRx(ctr, FIXED_RELOAD(ctr));
205 return old;
206}
207#endif
208
209static uint64_t
210kpc_reload_configurable(int ctr)
211{
212 uint64_t cfg = IA32_PERFEVTSELx(ctr);
213
214 /* counters must be disabled before they can be written to */
215 uint64_t old = IA32_PMCx(ctr);
216 wrIA32_PERFEVTSELx(ctr, cfg & ~IA32_PERFEVTSEL_EN);
217 wrIA32_PMCx(ctr, CONFIGURABLE_RELOAD(ctr));
218 wrIA32_PERFEVTSELx(ctr, cfg);
219 return old;
220}
221
cb323159 222void kpc_pmi_handler(void);
39236c6e
A
223
224static void
225set_running_fixed(boolean_t on)
226{
227 uint64_t global = 0, mask = 0, fixed_ctrl = 0;
228 int i;
229 boolean_t enabled;
230
0a7de745 231 if (on) {
39236c6e
A
232 /* these are per-thread in SMT */
233 fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI;
0a7de745 234 } else {
39236c6e
A
235 /* don't allow disabling fixed counters */
236 return;
0a7de745 237 }
39236c6e
A
238
239 wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl );
240
241 enabled = ml_set_interrupts_enabled(FALSE);
242
243 /* rmw the global control */
244 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
0a7de745
A
245 for (i = 0; i < (int) kpc_fixed_count(); i++) {
246 mask |= (1ULL << (32 + i));
247 }
39236c6e 248
0a7de745 249 if (on) {
39236c6e 250 global |= mask;
0a7de745 251 } else {
39236c6e 252 global &= ~mask;
0a7de745 253 }
39236c6e
A
254
255 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
256
257 ml_set_interrupts_enabled(enabled);
258}
259
260static void
3e170ce0 261set_running_configurable(uint64_t target_mask, uint64_t state_mask)
39236c6e 262{
3e170ce0
A
263 uint32_t cfg_count = kpc_configurable_count();
264 uint64_t global = 0ULL, cfg = 0ULL, save = 0ULL;
39236c6e 265 boolean_t enabled;
39236c6e
A
266
267 enabled = ml_set_interrupts_enabled(FALSE);
268
269 /* rmw the global control */
270 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
39236c6e 271
3e170ce0
A
272 /* need to save and restore counter since it resets when reconfigured */
273 for (uint32_t i = 0; i < cfg_count; ++i) {
39236c6e
A
274 cfg = IA32_PERFEVTSELx(i);
275 save = IA32_PMCx(i);
276 wrIA32_PERFEVTSELx(i, cfg | IA32_PERFEVTSEL_PMI | IA32_PERFEVTSEL_EN);
277 wrIA32_PMCx(i, save);
278 }
279
3e170ce0 280 /* update the global control value */
0a7de745
A
281 global &= ~target_mask; /* clear the targeted PMCs bits */
282 global |= state_mask; /* update the targeted PMCs bits with their new states */
39236c6e
A
283 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
284
285 ml_set_interrupts_enabled(enabled);
286}
287
288static void
289kpc_set_running_mp_call( void *vstate )
290{
3e170ce0
A
291 struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
292 assert(mp_config);
293
0a7de745 294 if (kpc_controls_fixed_counters()) {
3e170ce0 295 set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
0a7de745 296 }
39236c6e 297
3e170ce0 298 set_running_configurable(mp_config->cfg_target_mask,
0a7de745 299 mp_config->cfg_state_mask);
39236c6e
A
300}
301
302int
303kpc_get_fixed_config(kpc_config_t *configv)
304{
305 configv[0] = IA32_FIXED_CTR_CTRL();
39236c6e
A
306 return 0;
307}
308
309static int
310kpc_set_fixed_config(kpc_config_t *configv)
311{
312 (void) configv;
313
314 /* NYI */
315 return -1;
316}
317
318int
319kpc_get_fixed_counters(uint64_t *counterv)
320{
94ff46dc
A
321#if MONOTONIC
322 mt_fixed_counts(counterv);
39236c6e 323 return 0;
94ff46dc
A
324#else /* MONOTONIC */
325#pragma unused(counterv)
326 return ENOTSUP;
327#endif /* !MONOTONIC */
39236c6e
A
328}
329
330int
3e170ce0 331kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
39236c6e 332{
3e170ce0 333 uint32_t cfg_count = kpc_configurable_count();
39236c6e 334
3e170ce0 335 assert(configv);
39236c6e 336
0a7de745
A
337 for (uint32_t i = 0; i < cfg_count; ++i) {
338 if ((1ULL << i) & pmc_mask) {
3e170ce0 339 *configv++ = IA32_PERFEVTSELx(i);
0a7de745
A
340 }
341 }
39236c6e
A
342 return 0;
343}
344
345static int
3e170ce0 346kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
39236c6e 347{
3e170ce0 348 uint32_t cfg_count = kpc_configurable_count();
39236c6e
A
349 uint64_t save;
350
0a7de745
A
351 for (uint32_t i = 0; i < cfg_count; i++) {
352 if (((1ULL << i) & pmc_mask) == 0) {
3e170ce0 353 continue;
0a7de745 354 }
3e170ce0 355
39236c6e
A
356 /* need to save and restore counter since it resets when reconfigured */
357 save = IA32_PMCx(i);
3e170ce0 358
fe8ab488
A
359 /*
360 * Some bits are not safe to set from user space.
361 * Allow these bits to be set:
362 *
363 * 0-7 Event select
364 * 8-15 UMASK
365 * 16 USR
366 * 17 OS
367 * 18 E
368 * 22 EN
369 * 23 INV
370 * 24-31 CMASK
371 *
372 * Excluding:
373 *
374 * 19 PC
375 * 20 INT
376 * 21 AnyThread
377 * 32 IN_TX
378 * 33 IN_TXCP
379 * 34-63 Reserved
380 */
3e170ce0 381 wrIA32_PERFEVTSELx(i, *configv & 0xffc7ffffull);
39236c6e 382 wrIA32_PMCx(i, save);
3e170ce0
A
383
384 /* next configuration word */
385 configv++;
39236c6e
A
386 }
387
388 return 0;
389}
390
391int
3e170ce0 392kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
39236c6e 393{
3e170ce0
A
394 uint32_t cfg_count = kpc_configurable_count();
395 uint64_t status, *it_counterv = counterv;
39236c6e
A
396
397 /* snap the counters */
3e170ce0
A
398 for (uint32_t i = 0; i < cfg_count; ++i) {
399 if ((1ULL << i) & pmc_mask) {
400 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
0a7de745 401 (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
3e170ce0 402 }
39236c6e
A
403 }
404
405 /* Grab the overflow bits */
406 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
407
3e170ce0
A
408 /* reset the iterator */
409 it_counterv = counterv;
410
411 /*
412 * If the overflow bit is set for a counter, our previous read may or may not have been
39236c6e
A
413 * before the counter overflowed. Re-read any counter with it's overflow bit set so
414 * we know for sure that it has overflowed. The reason this matters is that the math
3e170ce0
A
415 * is different for a counter that has overflowed.
416 */
417 for (uint32_t i = 0; i < cfg_count; ++i) {
418 if (((1ULL << i) & pmc_mask) &&
0a7de745 419 ((1ULL << i) & status)) {
3e170ce0 420 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
0a7de745 421 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
39236c6e
A
422 }
423 }
424
425 return 0;
426}
427
3e170ce0
A
428static void
429kpc_get_curcpu_counters_mp_call(void *args)
430{
431 struct kpc_get_counters_remote *handler = args;
0a7de745 432 int offset = 0, r = 0;
3e170ce0
A
433
434 assert(handler);
435 assert(handler->buf);
436
437 offset = cpu_number() * handler->buf_stride;
438 r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
439
440 /* number of counters added by this CPU, needs to be atomic */
cb323159 441 os_atomic_add(&(handler->nb_counters), r, relaxed);
3e170ce0
A
442}
443
444int
445kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
446{
447 int enabled = 0;
448
449 struct kpc_get_counters_remote hdl = {
450 .classes = classes, .nb_counters = 0,
451 .buf_stride = kpc_get_counter_count(classes), .buf = buf
452 };
453
454 assert(buf);
455
456 enabled = ml_set_interrupts_enabled(FALSE);
457
0a7de745 458 if (curcpu) {
3e170ce0 459 *curcpu = current_processor()->cpu_id;
0a7de745 460 }
3e170ce0
A
461 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl);
462
463 ml_set_interrupts_enabled(enabled);
464
465 return hdl.nb_counters;
466}
467
39236c6e
A
468static void
469kpc_set_config_mp_call(void *vmp_config)
470{
471 struct kpc_config_remote *mp_config = vmp_config;
3e170ce0
A
472 kpc_config_t *new_config = NULL;
473 uint32_t classes = 0, count = 0;
39236c6e
A
474 boolean_t enabled;
475
3e170ce0
A
476 assert(mp_config);
477 assert(mp_config->configv);
478 classes = mp_config->classes;
479 new_config = mp_config->configv;
480
39236c6e 481 enabled = ml_set_interrupts_enabled(FALSE);
0a7de745
A
482
483 if (classes & KPC_CLASS_FIXED_MASK) {
39236c6e
A
484 kpc_set_fixed_config(&new_config[count]);
485 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
486 }
487
3e170ce0
A
488 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
489 kpc_set_configurable_config(&new_config[count], mp_config->pmc_mask);
490 count += kpc_popcount(mp_config->pmc_mask);
39236c6e
A
491 }
492
493 ml_set_interrupts_enabled(enabled);
494}
495
496static void
497kpc_set_reload_mp_call(void *vmp_config)
498{
499 struct kpc_config_remote *mp_config = vmp_config;
3e170ce0
A
500 uint64_t *new_period = NULL, max = kpc_configurable_max();
501 uint32_t classes = 0, count = 0;
502 boolean_t enabled;
39236c6e 503
3e170ce0
A
504 assert(mp_config);
505 assert(mp_config->configv);
39236c6e
A
506 classes = mp_config->classes;
507 new_period = mp_config->configv;
508
3e170ce0
A
509 enabled = ml_set_interrupts_enabled(FALSE);
510
39236c6e 511 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0
A
512 /*
513 * Update _all_ shadow counters, this cannot be done for only
514 * selected PMCs. Otherwise, we would corrupt the configurable
515 * shadow buffer since the PMCs are muxed according to the pmc
516 * mask.
517 */
518 uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
519 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
39236c6e 520
3e170ce0
A
521 /* set the new period */
522 count = kpc_configurable_count();
523 for (uint32_t i = 0; i < count; ++i) {
524 /* ignore the counter */
0a7de745 525 if (((1ULL << i) & mp_config->pmc_mask) == 0) {
3e170ce0 526 continue;
0a7de745 527 }
39236c6e 528
0a7de745 529 if (*new_period == 0) {
3e170ce0 530 *new_period = kpc_configurable_max();
0a7de745 531 }
39236c6e 532
3e170ce0 533 CONFIGURABLE_RELOAD(i) = max - *new_period;
39236c6e 534
3e170ce0 535 /* reload the counter */
39236c6e
A
536 kpc_reload_configurable(i);
537
538 /* clear overflow bit just in case */
539 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << i);
39236c6e 540
3e170ce0
A
541 /* next period value */
542 new_period++;
543 }
39236c6e 544 }
3e170ce0
A
545
546 ml_set_interrupts_enabled(enabled);
39236c6e
A
547}
548
549int
550kpc_set_period_arch( struct kpc_config_remote *mp_config )
551{
552 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_reload_mp_call, mp_config );
553
554 return 0;
555}
556
557
558/* interface functions */
559
fe8ab488
A
560void
561kpc_arch_init(void)
562{
d9a64523
A
563 i386_cpu_info_t *info = cpuid_info();
564 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
565 /*
566 * kpc only supports Intel PMU versions 2 and above.
567 */
568 if (version_id < 2) {
569 kpc_supported = false;
570 }
fe8ab488
A
571}
572
39236c6e
A
573uint32_t
574kpc_get_classes(void)
575{
576 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
577}
578
579int
3e170ce0 580kpc_set_running_arch(struct kpc_running_remote *mp_config)
39236c6e 581{
3e170ce0
A
582 assert(mp_config);
583
39236c6e 584 /* dispatch to all CPUs */
3e170ce0 585 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_set_running_mp_call, mp_config);
39236c6e 586
3e170ce0
A
587 kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
588 kpc_running_classes = mp_config->classes;
39236c6e
A
589
590 return 0;
591}
592
593int
594kpc_set_config_arch(struct kpc_config_remote *mp_config)
595{
596 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_config_mp_call, mp_config );
597
598 return 0;
599}
600
601/* PMI stuff */
0a7de745 602void
cb323159 603kpc_pmi_handler(void)
39236c6e
A
604{
605 uint64_t status, extra;
606 uint32_t ctr;
607 int enabled;
608
609 enabled = ml_set_interrupts_enabled(FALSE);
610
611 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
612
613#ifdef FIXED_COUNTER_SHADOW
614 for (ctr = 0; ctr < kpc_fixed_count(); ctr++) {
615 if ((1ULL << (ctr + 32)) & status) {
616 extra = kpc_reload_fixed(ctr);
617
618 FIXED_SHADOW(ctr)
0a7de745 619 += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
39236c6e
A
620
621 BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr));
622
0a7de745 623 if (FIXED_ACTIONID(ctr)) {
39236c6e 624 kpc_sample_kperf(FIXED_ACTIONID(ctr));
0a7de745 625 }
39236c6e
A
626 }
627 }
628#endif
629
630 for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
631 if ((1ULL << ctr) & status) {
632 extra = kpc_reload_configurable(ctr);
633
634 CONFIGURABLE_SHADOW(ctr)
0a7de745 635 += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra;
39236c6e
A
636
637 /* kperf can grab the PMCs when it samples so we need to make sure the overflow
638 * bits are in the correct state before the call to kperf_sample */
639 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
640
641 BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr));
0a7de745
A
642
643 if (CONFIGURABLE_ACTIONID(ctr)) {
39236c6e 644 kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr));
0a7de745 645 }
39236c6e
A
646 }
647 }
648
649 ml_set_interrupts_enabled(enabled);
650}
651
fe8ab488 652int
3e170ce0 653kpc_set_sw_inc( uint32_t mask __unused )
fe8ab488 654{
3e170ce0 655 return ENOTSUP;
fe8ab488 656}
39236c6e 657
fe8ab488 658int
3e170ce0 659kpc_get_pmu_version(void)
fe8ab488 660{
3e170ce0
A
661 i386_cpu_info_t *info = cpuid_info();
662
663 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
664
665 if (version_id == 3) {
666 return KPC_PMU_INTEL_V3;
667 } else if (version_id == 2) {
668 return KPC_PMU_INTEL_V2;
669 }
670
671 return KPC_PMU_ERROR;
fe8ab488 672}