]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/kpc_x86.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / x86_64 / kpc_x86.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
32#include <kern/kalloc.h>
33#include <i386/cpuid.h>
34#include <i386/proc_reg.h>
35#include <i386/mp.h>
39236c6e
A
36#include <sys/errno.h>
37#include <kperf/buffer.h>
38
39#include <kern/kpc.h>
40
41#include <kperf/kperf.h>
42#include <kperf/sample.h>
43#include <kperf/context.h>
44#include <kperf/action.h>
45
39236c6e
A
46/* Fixed counter mask -- three counters, each with OS and USER */
47#define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333)
48#define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888)
49
50#define IA32_PERFEVTSEL_PMI (1ull << 20)
51#define IA32_PERFEVTSEL_EN (1ull << 22)
52
53/* Non-serialising */
54#define USE_RDPMC
55
56#define RDPMC_FIXED_COUNTER_SELECTOR (1ULL<<30)
57
58/* track the last config we enabled */
3e170ce0
A
59static uint64_t kpc_running_cfg_pmc_mask = 0;
60static uint32_t kpc_running_classes = 0;
39236c6e
A
61
62/* PMC / MSR accesses */
63
64static uint64_t
65IA32_FIXED_CTR_CTRL(void)
66{
67 return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL );
68}
69
70static uint64_t
71IA32_FIXED_CTRx(uint32_t ctr)
72{
73#ifdef USE_RDPMC
74 return rdpmc64(RDPMC_FIXED_COUNTER_SELECTOR | ctr);
75#else /* !USE_RDPMC */
76 return rdmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr);
77#endif /* !USE_RDPMC */
78}
79
80#ifdef FIXED_COUNTER_RELOAD
81static void
82wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value)
83{
84 return wrmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr, value);
85}
86#endif
87
88static uint64_t
89IA32_PMCx(uint32_t ctr)
90{
91#ifdef USE_RDPMC
92 return rdpmc64(ctr);
93#else /* !USE_RDPMC */
94 return rdmsr64(MSR_IA32_PERFCTR0 + ctr);
95#endif /* !USE_RDPMC */
96}
97
98static void
99wrIA32_PMCx(uint32_t ctr, uint64_t value)
100{
101 return wrmsr64(MSR_IA32_PERFCTR0 + ctr, value);
102}
103
104static uint64_t
105IA32_PERFEVTSELx(uint32_t ctr)
106{
107 return rdmsr64(MSR_IA32_EVNTSEL0 + ctr);
108}
109
110static void
111wrIA32_PERFEVTSELx(uint32_t ctr, uint64_t value)
112{
113 wrmsr64(MSR_IA32_EVNTSEL0 + ctr, value);
114}
115
116
117/* internal functions */
118
119boolean_t
120kpc_is_running_fixed(void)
121{
3e170ce0 122 return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
39236c6e
A
123}
124
125boolean_t
3e170ce0 126kpc_is_running_configurable(uint64_t pmc_mask)
39236c6e 127{
3e170ce0
A
128 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
129 return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
130 ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
39236c6e
A
131}
132
133uint32_t
134kpc_fixed_count(void)
135{
136 i386_cpu_info_t *info = NULL;
39236c6e 137 info = cpuid_info();
39236c6e
A
138 return info->cpuid_arch_perf_leaf.fixed_number;
139}
140
141uint32_t
142kpc_configurable_count(void)
143{
144 i386_cpu_info_t *info = NULL;
39236c6e 145 info = cpuid_info();
39236c6e
A
146 return info->cpuid_arch_perf_leaf.number;
147}
148
149uint32_t
150kpc_fixed_config_count(void)
151{
152 return KPC_X86_64_FIXED_CONFIGS;
153}
154
155uint32_t
3e170ce0 156kpc_configurable_config_count(uint64_t pmc_mask)
39236c6e 157{
3e170ce0
A
158 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
159 return kpc_popcount(pmc_mask);
39236c6e
A
160}
161
fe8ab488
A
162uint32_t
163kpc_rawpmu_config_count(void)
164{
165 // RAW PMU access not implemented.
166 return 0;
167}
168
169int
170kpc_get_rawpmu_config(__unused kpc_config_t *configv)
171{
172 return 0;
173}
174
39236c6e
A
175static uint8_t
176kpc_fixed_width(void)
177{
178 i386_cpu_info_t *info = NULL;
179
180 info = cpuid_info();
181
182 return info->cpuid_arch_perf_leaf.fixed_width;
183}
184
185static uint8_t
186kpc_configurable_width(void)
187{
188 i386_cpu_info_t *info = NULL;
189
190 info = cpuid_info();
191
192 return info->cpuid_arch_perf_leaf.width;
193}
194
195uint64_t
196kpc_fixed_max(void)
197{
198 return (1ULL << kpc_fixed_width()) - 1;
199}
200
201uint64_t
202kpc_configurable_max(void)
203{
204 return (1ULL << kpc_configurable_width()) - 1;
205}
206
207#ifdef FIXED_COUNTER_SHADOW
208static uint64_t
209kpc_reload_fixed(int ctr)
210{
211 uint64_t old = IA32_FIXED_CTRx(ctr);
212 wrIA32_FIXED_CTRx(ctr, FIXED_RELOAD(ctr));
213 return old;
214}
215#endif
216
217static uint64_t
218kpc_reload_configurable(int ctr)
219{
220 uint64_t cfg = IA32_PERFEVTSELx(ctr);
221
222 /* counters must be disabled before they can be written to */
223 uint64_t old = IA32_PMCx(ctr);
224 wrIA32_PERFEVTSELx(ctr, cfg & ~IA32_PERFEVTSEL_EN);
225 wrIA32_PMCx(ctr, CONFIGURABLE_RELOAD(ctr));
226 wrIA32_PERFEVTSELx(ctr, cfg);
227 return old;
228}
229
230void kpc_pmi_handler(x86_saved_state_t *state);
231
232static void
233set_running_fixed(boolean_t on)
234{
235 uint64_t global = 0, mask = 0, fixed_ctrl = 0;
236 int i;
237 boolean_t enabled;
238
239 if( on )
240 /* these are per-thread in SMT */
241 fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI;
242 else
243 /* don't allow disabling fixed counters */
244 return;
245
246 wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl );
247
248 enabled = ml_set_interrupts_enabled(FALSE);
249
250 /* rmw the global control */
251 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
252 for( i = 0; i < (int) kpc_fixed_count(); i++ )
253 mask |= (1ULL<<(32+i));
254
255 if( on )
256 global |= mask;
257 else
258 global &= ~mask;
259
260 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
261
262 ml_set_interrupts_enabled(enabled);
263}
264
265static void
3e170ce0 266set_running_configurable(uint64_t target_mask, uint64_t state_mask)
39236c6e 267{
3e170ce0
A
268 uint32_t cfg_count = kpc_configurable_count();
269 uint64_t global = 0ULL, cfg = 0ULL, save = 0ULL;
39236c6e 270 boolean_t enabled;
39236c6e
A
271
272 enabled = ml_set_interrupts_enabled(FALSE);
273
274 /* rmw the global control */
275 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
39236c6e 276
3e170ce0
A
277 /* need to save and restore counter since it resets when reconfigured */
278 for (uint32_t i = 0; i < cfg_count; ++i) {
39236c6e
A
279 cfg = IA32_PERFEVTSELx(i);
280 save = IA32_PMCx(i);
281 wrIA32_PERFEVTSELx(i, cfg | IA32_PERFEVTSEL_PMI | IA32_PERFEVTSEL_EN);
282 wrIA32_PMCx(i, save);
283 }
284
3e170ce0
A
285 /* update the global control value */
286 global &= ~target_mask; /* clear the targeted PMCs bits */
287 global |= state_mask; /* update the targeted PMCs bits with their new states */
39236c6e
A
288 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
289
290 ml_set_interrupts_enabled(enabled);
291}
292
293static void
294kpc_set_running_mp_call( void *vstate )
295{
3e170ce0
A
296 struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
297 assert(mp_config);
298
299 if (kpc_controls_fixed_counters())
300 set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
39236c6e 301
3e170ce0
A
302 set_running_configurable(mp_config->cfg_target_mask,
303 mp_config->cfg_state_mask);
39236c6e
A
304}
305
306int
307kpc_get_fixed_config(kpc_config_t *configv)
308{
309 configv[0] = IA32_FIXED_CTR_CTRL();
39236c6e
A
310 return 0;
311}
312
313static int
314kpc_set_fixed_config(kpc_config_t *configv)
315{
316 (void) configv;
317
318 /* NYI */
319 return -1;
320}
321
322int
323kpc_get_fixed_counters(uint64_t *counterv)
324{
325 int i, n = kpc_fixed_count();
326
327#ifdef FIXED_COUNTER_SHADOW
328 uint64_t status;
329
330 /* snap the counters */
331 for( i = 0; i < n; i++ ) {
332 counterv[i] = FIXED_SHADOW(ctr) +
333 (IA32_FIXED_CTRx(i) - FIXED_RELOAD(ctr));
334 }
335
336 /* Grab the overflow bits */
337 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
338
339 /* If the overflow bit is set for a counter, our previous read may or may not have been
340 * before the counter overflowed. Re-read any counter with it's overflow bit set so
341 * we know for sure that it has overflowed. The reason this matters is that the math
342 * is different for a counter that has overflowed. */
343 for( i = 0; i < n; i++ ) {
344 if ((1ull << (i + 32)) & status)
345 counterv[i] = FIXED_SHADOW(ctr) +
fe8ab488 346 (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + IA32_FIXED_CTRx(i);
39236c6e
A
347 }
348#else
349 for( i = 0; i < n; i++ )
350 counterv[i] = IA32_FIXED_CTRx(i);
351#endif
352
353 return 0;
354}
355
356int
3e170ce0 357kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
39236c6e 358{
3e170ce0 359 uint32_t cfg_count = kpc_configurable_count();
39236c6e 360
3e170ce0 361 assert(configv);
39236c6e 362
3e170ce0
A
363 for (uint32_t i = 0; i < cfg_count; ++i)
364 if ((1ULL << i) & pmc_mask)
365 *configv++ = IA32_PERFEVTSELx(i);
39236c6e
A
366 return 0;
367}
368
369static int
3e170ce0 370kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
39236c6e 371{
3e170ce0 372 uint32_t cfg_count = kpc_configurable_count();
39236c6e
A
373 uint64_t save;
374
3e170ce0
A
375 for (uint32_t i = 0; i < cfg_count; i++ ) {
376 if (((1ULL << i) & pmc_mask) == 0)
377 continue;
378
39236c6e
A
379 /* need to save and restore counter since it resets when reconfigured */
380 save = IA32_PMCx(i);
3e170ce0 381
fe8ab488
A
382 /*
383 * Some bits are not safe to set from user space.
384 * Allow these bits to be set:
385 *
386 * 0-7 Event select
387 * 8-15 UMASK
388 * 16 USR
389 * 17 OS
390 * 18 E
391 * 22 EN
392 * 23 INV
393 * 24-31 CMASK
394 *
395 * Excluding:
396 *
397 * 19 PC
398 * 20 INT
399 * 21 AnyThread
400 * 32 IN_TX
401 * 33 IN_TXCP
402 * 34-63 Reserved
403 */
3e170ce0 404 wrIA32_PERFEVTSELx(i, *configv & 0xffc7ffffull);
39236c6e 405 wrIA32_PMCx(i, save);
3e170ce0
A
406
407 /* next configuration word */
408 configv++;
39236c6e
A
409 }
410
411 return 0;
412}
413
414int
3e170ce0 415kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
39236c6e 416{
3e170ce0
A
417 uint32_t cfg_count = kpc_configurable_count();
418 uint64_t status, *it_counterv = counterv;
39236c6e
A
419
420 /* snap the counters */
3e170ce0
A
421 for (uint32_t i = 0; i < cfg_count; ++i) {
422 if ((1ULL << i) & pmc_mask) {
423 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
424 (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
425 }
39236c6e
A
426 }
427
428 /* Grab the overflow bits */
429 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
430
3e170ce0
A
431 /* reset the iterator */
432 it_counterv = counterv;
433
434 /*
435 * If the overflow bit is set for a counter, our previous read may or may not have been
39236c6e
A
436 * before the counter overflowed. Re-read any counter with it's overflow bit set so
437 * we know for sure that it has overflowed. The reason this matters is that the math
3e170ce0
A
438 * is different for a counter that has overflowed.
439 */
440 for (uint32_t i = 0; i < cfg_count; ++i) {
441 if (((1ULL << i) & pmc_mask) &&
442 ((1ULL << i) & status))
443 {
444 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
445 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
39236c6e
A
446 }
447 }
448
449 return 0;
450}
451
3e170ce0
A
452static void
453kpc_get_curcpu_counters_mp_call(void *args)
454{
455 struct kpc_get_counters_remote *handler = args;
456 int offset=0, r=0;
457
458 assert(handler);
459 assert(handler->buf);
460
461 offset = cpu_number() * handler->buf_stride;
462 r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
463
464 /* number of counters added by this CPU, needs to be atomic */
465 hw_atomic_add(&(handler->nb_counters), r);
466}
467
468int
469kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
470{
471 int enabled = 0;
472
473 struct kpc_get_counters_remote hdl = {
474 .classes = classes, .nb_counters = 0,
475 .buf_stride = kpc_get_counter_count(classes), .buf = buf
476 };
477
478 assert(buf);
479
480 enabled = ml_set_interrupts_enabled(FALSE);
481
482 if (curcpu)
483 *curcpu = current_processor()->cpu_id;
484 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl);
485
486 ml_set_interrupts_enabled(enabled);
487
488 return hdl.nb_counters;
489}
490
39236c6e
A
491static void
492kpc_set_config_mp_call(void *vmp_config)
493{
3e170ce0 494
39236c6e 495 struct kpc_config_remote *mp_config = vmp_config;
3e170ce0
A
496 kpc_config_t *new_config = NULL;
497 uint32_t classes = 0, count = 0;
39236c6e
A
498 boolean_t enabled;
499
3e170ce0
A
500 assert(mp_config);
501 assert(mp_config->configv);
502 classes = mp_config->classes;
503 new_config = mp_config->configv;
504
39236c6e
A
505 enabled = ml_set_interrupts_enabled(FALSE);
506
3e170ce0 507 if (classes & KPC_CLASS_FIXED_MASK)
39236c6e
A
508 {
509 kpc_set_fixed_config(&new_config[count]);
510 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
511 }
512
3e170ce0
A
513 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
514 kpc_set_configurable_config(&new_config[count], mp_config->pmc_mask);
515 count += kpc_popcount(mp_config->pmc_mask);
39236c6e
A
516 }
517
518 ml_set_interrupts_enabled(enabled);
519}
520
521static void
522kpc_set_reload_mp_call(void *vmp_config)
523{
524 struct kpc_config_remote *mp_config = vmp_config;
3e170ce0
A
525 uint64_t *new_period = NULL, max = kpc_configurable_max();
526 uint32_t classes = 0, count = 0;
527 boolean_t enabled;
39236c6e 528
3e170ce0
A
529 assert(mp_config);
530 assert(mp_config->configv);
39236c6e
A
531 classes = mp_config->classes;
532 new_period = mp_config->configv;
533
3e170ce0
A
534 enabled = ml_set_interrupts_enabled(FALSE);
535
39236c6e 536 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0
A
537 /*
538 * Update _all_ shadow counters, this cannot be done for only
539 * selected PMCs. Otherwise, we would corrupt the configurable
540 * shadow buffer since the PMCs are muxed according to the pmc
541 * mask.
542 */
543 uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
544 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
39236c6e 545
3e170ce0
A
546 /* set the new period */
547 count = kpc_configurable_count();
548 for (uint32_t i = 0; i < count; ++i) {
549 /* ignore the counter */
550 if (((1ULL << i) & mp_config->pmc_mask) == 0)
551 continue;
39236c6e 552
3e170ce0
A
553 if (*new_period == 0)
554 *new_period = kpc_configurable_max();
39236c6e 555
3e170ce0 556 CONFIGURABLE_RELOAD(i) = max - *new_period;
39236c6e 557
3e170ce0 558 /* reload the counter */
39236c6e
A
559 kpc_reload_configurable(i);
560
561 /* clear overflow bit just in case */
562 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << i);
39236c6e 563
3e170ce0
A
564 /* next period value */
565 new_period++;
566 }
39236c6e 567 }
3e170ce0
A
568
569 ml_set_interrupts_enabled(enabled);
39236c6e
A
570}
571
572int
573kpc_set_period_arch( struct kpc_config_remote *mp_config )
574{
575 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_reload_mp_call, mp_config );
576
577 return 0;
578}
579
580
581/* interface functions */
582
fe8ab488
A
583void
584kpc_arch_init(void)
585{
d9a64523
A
586 i386_cpu_info_t *info = cpuid_info();
587 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
588 /*
589 * kpc only supports Intel PMU versions 2 and above.
590 */
591 if (version_id < 2) {
592 kpc_supported = false;
593 }
fe8ab488
A
594}
595
39236c6e
A
596uint32_t
597kpc_get_classes(void)
598{
599 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
600}
601
602int
3e170ce0 603kpc_set_running_arch(struct kpc_running_remote *mp_config)
39236c6e 604{
3e170ce0
A
605 assert(mp_config);
606
39236c6e 607 /* dispatch to all CPUs */
3e170ce0 608 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_set_running_mp_call, mp_config);
39236c6e 609
3e170ce0
A
610 kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
611 kpc_running_classes = mp_config->classes;
39236c6e
A
612
613 return 0;
614}
615
616int
617kpc_set_config_arch(struct kpc_config_remote *mp_config)
618{
619 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_config_mp_call, mp_config );
620
621 return 0;
622}
623
624/* PMI stuff */
625void kpc_pmi_handler(__unused x86_saved_state_t *state)
626{
627 uint64_t status, extra;
628 uint32_t ctr;
629 int enabled;
630
631 enabled = ml_set_interrupts_enabled(FALSE);
632
633 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
634
635#ifdef FIXED_COUNTER_SHADOW
636 for (ctr = 0; ctr < kpc_fixed_count(); ctr++) {
637 if ((1ULL << (ctr + 32)) & status) {
638 extra = kpc_reload_fixed(ctr);
639
640 FIXED_SHADOW(ctr)
fe8ab488 641 += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
39236c6e
A
642
643 BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, FIXED_ACTIONID(ctr));
644
645 if (FIXED_ACTIONID(ctr))
646 kpc_sample_kperf(FIXED_ACTIONID(ctr));
647 }
648 }
649#endif
650
651 for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
652 if ((1ULL << ctr) & status) {
653 extra = kpc_reload_configurable(ctr);
654
655 CONFIGURABLE_SHADOW(ctr)
656 += kpc_configurable_max() - CONFIGURABLE_RELOAD(ctr) + extra;
657
658 /* kperf can grab the PMCs when it samples so we need to make sure the overflow
659 * bits are in the correct state before the call to kperf_sample */
660 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
661
662 BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, CONFIGURABLE_ACTIONID(ctr));
663
664 if (CONFIGURABLE_ACTIONID(ctr))
665 kpc_sample_kperf(CONFIGURABLE_ACTIONID(ctr));
666 }
667 }
668
669 ml_set_interrupts_enabled(enabled);
670}
671
fe8ab488 672int
3e170ce0 673kpc_set_sw_inc( uint32_t mask __unused )
fe8ab488 674{
3e170ce0 675 return ENOTSUP;
fe8ab488 676}
39236c6e 677
fe8ab488 678int
3e170ce0 679kpc_get_pmu_version(void)
fe8ab488 680{
3e170ce0
A
681 i386_cpu_info_t *info = cpuid_info();
682
683 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
684
685 if (version_id == 3) {
686 return KPC_PMU_INTEL_V3;
687 } else if (version_id == 2) {
688 return KPC_PMU_INTEL_V2;
689 }
690
691 return KPC_PMU_ERROR;
fe8ab488 692}
3e170ce0 693