]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/kpc_x86.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / x86_64 / kpc_x86.c
CommitLineData
39236c6e
A
1/*
2 * Copyright (c) 2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
39236c6e
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
39236c6e
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
39236c6e
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
39236c6e
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <machine/machine_routines.h>
31#include <kern/processor.h>
39236c6e
A
32#include <i386/cpuid.h>
33#include <i386/proc_reg.h>
34#include <i386/mp.h>
39236c6e
A
35#include <sys/errno.h>
36#include <kperf/buffer.h>
37
38#include <kern/kpc.h>
39
40#include <kperf/kperf.h>
41#include <kperf/sample.h>
42#include <kperf/context.h>
43#include <kperf/action.h>
44
94ff46dc
A
45#include <kern/monotonic.h>
46
39236c6e
A
47/* Fixed counter mask -- three counters, each with OS and USER */
48#define IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS (0x333)
49#define IA32_FIXED_CTR_ENABLE_ALL_PMI (0x888)
50
f427ee49
A
51#define IA32_PERFEVT_USER_EN (0x10000)
52#define IA32_PERFEVT_OS_EN (0x20000)
53
39236c6e
A
54#define IA32_PERFEVTSEL_PMI (1ull << 20)
55#define IA32_PERFEVTSEL_EN (1ull << 22)
56
57/* Non-serialising */
58#define USE_RDPMC
59
60#define RDPMC_FIXED_COUNTER_SELECTOR (1ULL<<30)
61
62/* track the last config we enabled */
3e170ce0
A
63static uint64_t kpc_running_cfg_pmc_mask = 0;
64static uint32_t kpc_running_classes = 0;
39236c6e
A
65
66/* PMC / MSR accesses */
67
68static uint64_t
69IA32_FIXED_CTR_CTRL(void)
70{
71 return rdmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL );
72}
73
39236c6e
A
74#ifdef FIXED_COUNTER_RELOAD
75static void
76wrIA32_FIXED_CTRx(uint32_t ctr, uint64_t value)
77{
78 return wrmsr64(MSR_IA32_PERF_FIXED_CTR0 + ctr, value);
79}
80#endif
81
82static uint64_t
83IA32_PMCx(uint32_t ctr)
84{
85#ifdef USE_RDPMC
86 return rdpmc64(ctr);
87#else /* !USE_RDPMC */
88 return rdmsr64(MSR_IA32_PERFCTR0 + ctr);
89#endif /* !USE_RDPMC */
90}
91
92static void
93wrIA32_PMCx(uint32_t ctr, uint64_t value)
94{
95 return wrmsr64(MSR_IA32_PERFCTR0 + ctr, value);
96}
97
98static uint64_t
99IA32_PERFEVTSELx(uint32_t ctr)
100{
101 return rdmsr64(MSR_IA32_EVNTSEL0 + ctr);
102}
103
104static void
105wrIA32_PERFEVTSELx(uint32_t ctr, uint64_t value)
106{
107 wrmsr64(MSR_IA32_EVNTSEL0 + ctr, value);
108}
109
110
111/* internal functions */
112
113boolean_t
114kpc_is_running_fixed(void)
115{
3e170ce0 116 return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
39236c6e
A
117}
118
119boolean_t
3e170ce0 120kpc_is_running_configurable(uint64_t pmc_mask)
39236c6e 121{
3e170ce0
A
122 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
123 return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
124 ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
39236c6e
A
125}
126
127uint32_t
128kpc_fixed_count(void)
129{
0a7de745 130 i386_cpu_info_t *info = NULL;
39236c6e 131 info = cpuid_info();
39236c6e
A
132 return info->cpuid_arch_perf_leaf.fixed_number;
133}
134
135uint32_t
136kpc_configurable_count(void)
137{
0a7de745 138 i386_cpu_info_t *info = NULL;
39236c6e 139 info = cpuid_info();
39236c6e
A
140 return info->cpuid_arch_perf_leaf.number;
141}
142
143uint32_t
144kpc_fixed_config_count(void)
145{
146 return KPC_X86_64_FIXED_CONFIGS;
147}
148
149uint32_t
3e170ce0 150kpc_configurable_config_count(uint64_t pmc_mask)
39236c6e 151{
3e170ce0
A
152 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
153 return kpc_popcount(pmc_mask);
39236c6e
A
154}
155
fe8ab488
A
156uint32_t
157kpc_rawpmu_config_count(void)
158{
159 // RAW PMU access not implemented.
160 return 0;
161}
162
163int
164kpc_get_rawpmu_config(__unused kpc_config_t *configv)
165{
166 return 0;
167}
168
39236c6e
A
169static uint8_t
170kpc_fixed_width(void)
171{
0a7de745
A
172 i386_cpu_info_t *info = NULL;
173
39236c6e
A
174 info = cpuid_info();
175
176 return info->cpuid_arch_perf_leaf.fixed_width;
177}
178
179static uint8_t
180kpc_configurable_width(void)
181{
0a7de745 182 i386_cpu_info_t *info = NULL;
39236c6e
A
183
184 info = cpuid_info();
185
186 return info->cpuid_arch_perf_leaf.width;
187}
188
189uint64_t
190kpc_fixed_max(void)
191{
192 return (1ULL << kpc_fixed_width()) - 1;
193}
194
195uint64_t
196kpc_configurable_max(void)
197{
198 return (1ULL << kpc_configurable_width()) - 1;
199}
200
201#ifdef FIXED_COUNTER_SHADOW
202static uint64_t
203kpc_reload_fixed(int ctr)
204{
205 uint64_t old = IA32_FIXED_CTRx(ctr);
206 wrIA32_FIXED_CTRx(ctr, FIXED_RELOAD(ctr));
207 return old;
208}
209#endif
210
211static uint64_t
212kpc_reload_configurable(int ctr)
213{
214 uint64_t cfg = IA32_PERFEVTSELx(ctr);
215
216 /* counters must be disabled before they can be written to */
217 uint64_t old = IA32_PMCx(ctr);
218 wrIA32_PERFEVTSELx(ctr, cfg & ~IA32_PERFEVTSEL_EN);
219 wrIA32_PMCx(ctr, CONFIGURABLE_RELOAD(ctr));
220 wrIA32_PERFEVTSELx(ctr, cfg);
221 return old;
222}
223
cb323159 224void kpc_pmi_handler(void);
39236c6e
A
225
226static void
227set_running_fixed(boolean_t on)
228{
229 uint64_t global = 0, mask = 0, fixed_ctrl = 0;
230 int i;
231 boolean_t enabled;
232
0a7de745 233 if (on) {
39236c6e
A
234 /* these are per-thread in SMT */
235 fixed_ctrl = IA32_FIXED_CTR_ENABLE_ALL_CTRS_ALL_RINGS | IA32_FIXED_CTR_ENABLE_ALL_PMI;
0a7de745 236 } else {
39236c6e
A
237 /* don't allow disabling fixed counters */
238 return;
0a7de745 239 }
39236c6e
A
240
241 wrmsr64( MSR_IA32_PERF_FIXED_CTR_CTRL, fixed_ctrl );
242
243 enabled = ml_set_interrupts_enabled(FALSE);
244
245 /* rmw the global control */
246 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
0a7de745
A
247 for (i = 0; i < (int) kpc_fixed_count(); i++) {
248 mask |= (1ULL << (32 + i));
249 }
39236c6e 250
0a7de745 251 if (on) {
39236c6e 252 global |= mask;
0a7de745 253 } else {
39236c6e 254 global &= ~mask;
0a7de745 255 }
39236c6e
A
256
257 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
258
259 ml_set_interrupts_enabled(enabled);
260}
261
262static void
3e170ce0 263set_running_configurable(uint64_t target_mask, uint64_t state_mask)
39236c6e 264{
3e170ce0
A
265 uint32_t cfg_count = kpc_configurable_count();
266 uint64_t global = 0ULL, cfg = 0ULL, save = 0ULL;
39236c6e 267 boolean_t enabled;
39236c6e
A
268
269 enabled = ml_set_interrupts_enabled(FALSE);
270
271 /* rmw the global control */
272 global = rdmsr64(MSR_IA32_PERF_GLOBAL_CTRL);
39236c6e 273
3e170ce0
A
274 /* need to save and restore counter since it resets when reconfigured */
275 for (uint32_t i = 0; i < cfg_count; ++i) {
39236c6e
A
276 cfg = IA32_PERFEVTSELx(i);
277 save = IA32_PMCx(i);
278 wrIA32_PERFEVTSELx(i, cfg | IA32_PERFEVTSEL_PMI | IA32_PERFEVTSEL_EN);
279 wrIA32_PMCx(i, save);
280 }
281
3e170ce0 282 /* update the global control value */
0a7de745
A
283 global &= ~target_mask; /* clear the targeted PMCs bits */
284 global |= state_mask; /* update the targeted PMCs bits with their new states */
39236c6e
A
285 wrmsr64(MSR_IA32_PERF_GLOBAL_CTRL, global);
286
287 ml_set_interrupts_enabled(enabled);
288}
289
290static void
291kpc_set_running_mp_call( void *vstate )
292{
3e170ce0
A
293 struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
294 assert(mp_config);
295
0a7de745 296 if (kpc_controls_fixed_counters()) {
3e170ce0 297 set_running_fixed(mp_config->classes & KPC_CLASS_FIXED_MASK);
0a7de745 298 }
39236c6e 299
3e170ce0 300 set_running_configurable(mp_config->cfg_target_mask,
0a7de745 301 mp_config->cfg_state_mask);
39236c6e
A
302}
303
304int
305kpc_get_fixed_config(kpc_config_t *configv)
306{
307 configv[0] = IA32_FIXED_CTR_CTRL();
39236c6e
A
308 return 0;
309}
310
311static int
312kpc_set_fixed_config(kpc_config_t *configv)
313{
314 (void) configv;
315
316 /* NYI */
317 return -1;
318}
319
320int
321kpc_get_fixed_counters(uint64_t *counterv)
322{
94ff46dc
A
323#if MONOTONIC
324 mt_fixed_counts(counterv);
39236c6e 325 return 0;
94ff46dc
A
326#else /* MONOTONIC */
327#pragma unused(counterv)
328 return ENOTSUP;
329#endif /* !MONOTONIC */
39236c6e
A
330}
331
332int
3e170ce0 333kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
39236c6e 334{
3e170ce0 335 uint32_t cfg_count = kpc_configurable_count();
39236c6e 336
3e170ce0 337 assert(configv);
39236c6e 338
0a7de745
A
339 for (uint32_t i = 0; i < cfg_count; ++i) {
340 if ((1ULL << i) & pmc_mask) {
3e170ce0 341 *configv++ = IA32_PERFEVTSELx(i);
0a7de745
A
342 }
343 }
39236c6e
A
344 return 0;
345}
346
347static int
3e170ce0 348kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
39236c6e 349{
3e170ce0 350 uint32_t cfg_count = kpc_configurable_count();
39236c6e
A
351 uint64_t save;
352
0a7de745
A
353 for (uint32_t i = 0; i < cfg_count; i++) {
354 if (((1ULL << i) & pmc_mask) == 0) {
3e170ce0 355 continue;
0a7de745 356 }
3e170ce0 357
39236c6e
A
358 /* need to save and restore counter since it resets when reconfigured */
359 save = IA32_PMCx(i);
3e170ce0 360
fe8ab488
A
361 /*
362 * Some bits are not safe to set from user space.
363 * Allow these bits to be set:
364 *
365 * 0-7 Event select
366 * 8-15 UMASK
367 * 16 USR
368 * 17 OS
369 * 18 E
370 * 22 EN
371 * 23 INV
372 * 24-31 CMASK
373 *
374 * Excluding:
375 *
376 * 19 PC
377 * 20 INT
378 * 21 AnyThread
379 * 32 IN_TX
380 * 33 IN_TXCP
381 * 34-63 Reserved
382 */
3e170ce0 383 wrIA32_PERFEVTSELx(i, *configv & 0xffc7ffffull);
39236c6e 384 wrIA32_PMCx(i, save);
3e170ce0
A
385
386 /* next configuration word */
387 configv++;
39236c6e
A
388 }
389
390 return 0;
391}
392
393int
3e170ce0 394kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
39236c6e 395{
3e170ce0
A
396 uint32_t cfg_count = kpc_configurable_count();
397 uint64_t status, *it_counterv = counterv;
39236c6e
A
398
399 /* snap the counters */
3e170ce0
A
400 for (uint32_t i = 0; i < cfg_count; ++i) {
401 if ((1ULL << i) & pmc_mask) {
402 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
0a7de745 403 (IA32_PMCx(i) - CONFIGURABLE_RELOAD(i));
3e170ce0 404 }
39236c6e
A
405 }
406
407 /* Grab the overflow bits */
408 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
409
3e170ce0
A
410 /* reset the iterator */
411 it_counterv = counterv;
412
413 /*
414 * If the overflow bit is set for a counter, our previous read may or may not have been
39236c6e
A
415 * before the counter overflowed. Re-read any counter with it's overflow bit set so
416 * we know for sure that it has overflowed. The reason this matters is that the math
3e170ce0
A
417 * is different for a counter that has overflowed.
418 */
419 for (uint32_t i = 0; i < cfg_count; ++i) {
420 if (((1ULL << i) & pmc_mask) &&
0a7de745 421 ((1ULL << i) & status)) {
3e170ce0 422 *it_counterv++ = CONFIGURABLE_SHADOW(i) +
0a7de745 423 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i)) + IA32_PMCx(i);
39236c6e
A
424 }
425 }
426
427 return 0;
428}
429
3e170ce0
A
430static void
431kpc_get_curcpu_counters_mp_call(void *args)
432{
433 struct kpc_get_counters_remote *handler = args;
0a7de745 434 int offset = 0, r = 0;
3e170ce0
A
435
436 assert(handler);
437 assert(handler->buf);
438
439 offset = cpu_number() * handler->buf_stride;
440 r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
441
442 /* number of counters added by this CPU, needs to be atomic */
cb323159 443 os_atomic_add(&(handler->nb_counters), r, relaxed);
3e170ce0
A
444}
445
446int
447kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
448{
449 int enabled = 0;
450
451 struct kpc_get_counters_remote hdl = {
452 .classes = classes, .nb_counters = 0,
453 .buf_stride = kpc_get_counter_count(classes), .buf = buf
454 };
455
456 assert(buf);
457
458 enabled = ml_set_interrupts_enabled(FALSE);
459
0a7de745 460 if (curcpu) {
f427ee49 461 *curcpu = cpu_number();
0a7de745 462 }
3e170ce0
A
463 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_get_curcpu_counters_mp_call, &hdl);
464
465 ml_set_interrupts_enabled(enabled);
466
467 return hdl.nb_counters;
468}
469
39236c6e
A
470static void
471kpc_set_config_mp_call(void *vmp_config)
472{
473 struct kpc_config_remote *mp_config = vmp_config;
3e170ce0
A
474 kpc_config_t *new_config = NULL;
475 uint32_t classes = 0, count = 0;
39236c6e
A
476 boolean_t enabled;
477
3e170ce0
A
478 assert(mp_config);
479 assert(mp_config->configv);
480 classes = mp_config->classes;
481 new_config = mp_config->configv;
482
39236c6e 483 enabled = ml_set_interrupts_enabled(FALSE);
0a7de745
A
484
485 if (classes & KPC_CLASS_FIXED_MASK) {
39236c6e
A
486 kpc_set_fixed_config(&new_config[count]);
487 count += kpc_get_config_count(KPC_CLASS_FIXED_MASK);
488 }
489
3e170ce0
A
490 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
491 kpc_set_configurable_config(&new_config[count], mp_config->pmc_mask);
492 count += kpc_popcount(mp_config->pmc_mask);
39236c6e
A
493 }
494
495 ml_set_interrupts_enabled(enabled);
496}
497
498static void
499kpc_set_reload_mp_call(void *vmp_config)
500{
501 struct kpc_config_remote *mp_config = vmp_config;
3e170ce0
A
502 uint64_t *new_period = NULL, max = kpc_configurable_max();
503 uint32_t classes = 0, count = 0;
504 boolean_t enabled;
39236c6e 505
3e170ce0
A
506 assert(mp_config);
507 assert(mp_config->configv);
39236c6e
A
508 classes = mp_config->classes;
509 new_period = mp_config->configv;
510
3e170ce0
A
511 enabled = ml_set_interrupts_enabled(FALSE);
512
39236c6e 513 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
3e170ce0
A
514 /*
515 * Update _all_ shadow counters, this cannot be done for only
516 * selected PMCs. Otherwise, we would corrupt the configurable
517 * shadow buffer since the PMCs are muxed according to the pmc
518 * mask.
519 */
520 uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
521 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
39236c6e 522
3e170ce0
A
523 /* set the new period */
524 count = kpc_configurable_count();
525 for (uint32_t i = 0; i < count; ++i) {
526 /* ignore the counter */
0a7de745 527 if (((1ULL << i) & mp_config->pmc_mask) == 0) {
3e170ce0 528 continue;
0a7de745 529 }
39236c6e 530
0a7de745 531 if (*new_period == 0) {
3e170ce0 532 *new_period = kpc_configurable_max();
0a7de745 533 }
39236c6e 534
3e170ce0 535 CONFIGURABLE_RELOAD(i) = max - *new_period;
39236c6e 536
3e170ce0 537 /* reload the counter */
39236c6e
A
538 kpc_reload_configurable(i);
539
540 /* clear overflow bit just in case */
541 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << i);
39236c6e 542
3e170ce0
A
543 /* next period value */
544 new_period++;
545 }
39236c6e 546 }
3e170ce0
A
547
548 ml_set_interrupts_enabled(enabled);
39236c6e
A
549}
550
551int
552kpc_set_period_arch( struct kpc_config_remote *mp_config )
553{
554 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_reload_mp_call, mp_config );
555
556 return 0;
557}
558
559
560/* interface functions */
561
fe8ab488
A
562void
563kpc_arch_init(void)
564{
d9a64523
A
565 i386_cpu_info_t *info = cpuid_info();
566 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
567 /*
568 * kpc only supports Intel PMU versions 2 and above.
569 */
570 if (version_id < 2) {
571 kpc_supported = false;
572 }
fe8ab488
A
573}
574
39236c6e
A
575uint32_t
576kpc_get_classes(void)
577{
578 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK;
579}
580
581int
3e170ce0 582kpc_set_running_arch(struct kpc_running_remote *mp_config)
39236c6e 583{
3e170ce0
A
584 assert(mp_config);
585
39236c6e 586 /* dispatch to all CPUs */
3e170ce0 587 mp_cpus_call(CPUMASK_ALL, ASYNC, kpc_set_running_mp_call, mp_config);
39236c6e 588
3e170ce0
A
589 kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
590 kpc_running_classes = mp_config->classes;
39236c6e
A
591
592 return 0;
593}
594
595int
596kpc_set_config_arch(struct kpc_config_remote *mp_config)
597{
598 mp_cpus_call( CPUMASK_ALL, ASYNC, kpc_set_config_mp_call, mp_config );
599
600 return 0;
601}
602
f427ee49
A
603static uintptr_t
604get_interrupted_pc(bool *kernel_out)
605{
606 x86_saved_state_t *state = current_cpu_datap()->cpu_int_state;
607 if (!state) {
608 return 0;
609 }
610
611 bool state_64 = is_saved_state64(state);
612 uint64_t cs;
613 if (state_64) {
614 cs = saved_state64(state)->isf.cs;
615 } else {
616 cs = saved_state32(state)->cs;
617 }
618 bool kernel = (cs & SEL_PL) != SEL_PL_U;
619 *kernel_out = kernel;
620
621 uintptr_t pc = 0;
622 if (state_64) {
623 pc = saved_state64(state)->isf.rip;
624 } else {
625 pc = saved_state32(state)->eip;
626 }
627 if (kernel) {
628 pc = VM_KERNEL_UNSLIDE(pc);
629 }
630 return pc;
631}
632
633static void
2a1bd2d3
A
634kpc_sample_kperf_x86(uint32_t ctr, uint32_t actionid, uint64_t count,
635 uint64_t config)
f427ee49 636{
f427ee49
A
637 bool kernel = false;
638 uintptr_t pc = get_interrupted_pc(&kernel);
639 kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
640 if ((config) & IA32_PERFEVT_USER_EN) {
641 flags |= KPC_USER_COUNTING;
642 }
643 if ((config) & IA32_PERFEVT_OS_EN) {
644 flags |= KPC_KERNEL_COUNTING;
645 }
646 kpc_sample_kperf(actionid, ctr,
647 config & 0xffff /* just the number and umask */, count, pc, flags);
648}
649
0a7de745 650void
cb323159 651kpc_pmi_handler(void)
39236c6e
A
652{
653 uint64_t status, extra;
654 uint32_t ctr;
655 int enabled;
656
657 enabled = ml_set_interrupts_enabled(FALSE);
658
659 status = rdmsr64(MSR_IA32_PERF_GLOBAL_STATUS);
660
661#ifdef FIXED_COUNTER_SHADOW
662 for (ctr = 0; ctr < kpc_fixed_count(); ctr++) {
663 if ((1ULL << (ctr + 32)) & status) {
664 extra = kpc_reload_fixed(ctr);
665
666 FIXED_SHADOW(ctr)
0a7de745 667 += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
39236c6e 668
2a1bd2d3
A
669 uint32_t actionid = FIXED_ACTIONID(ctr);
670 BUF_INFO(PERF_KPC_FCOUNTER, ctr, FIXED_SHADOW(ctr), extra, actionid);
39236c6e 671
2a1bd2d3
A
672 if (actionid != 0) {
673 kpc_sample_kperf_x86(ctr, actionid, FIXED_SHADOW(ctr) + extra, 0);
0a7de745 674 }
39236c6e
A
675 }
676 }
2a1bd2d3 677#endif // FIXED_COUNTER_SHADOW
39236c6e
A
678
679 for (ctr = 0; ctr < kpc_configurable_count(); ctr++) {
680 if ((1ULL << ctr) & status) {
681 extra = kpc_reload_configurable(ctr);
682
f427ee49
A
683 CONFIGURABLE_SHADOW(ctr) += kpc_configurable_max() -
684 CONFIGURABLE_RELOAD(ctr) + extra;
39236c6e
A
685
686 /* kperf can grab the PMCs when it samples so we need to make sure the overflow
687 * bits are in the correct state before the call to kperf_sample */
688 wrmsr64(MSR_IA32_PERF_GLOBAL_OVF_CTRL, 1ull << ctr);
689
2a1bd2d3
A
690 unsigned int actionid = CONFIGURABLE_ACTIONID(ctr);
691 BUF_INFO(PERF_KPC_COUNTER, ctr, CONFIGURABLE_SHADOW(ctr), extra, actionid);
0a7de745 692
2a1bd2d3 693 if (actionid != 0) {
f427ee49 694 uint64_t config = IA32_PERFEVTSELx(ctr);
2a1bd2d3 695 kpc_sample_kperf_x86(ctr + kpc_fixed_count(), actionid,
f427ee49 696 CONFIGURABLE_SHADOW(ctr) + extra, config);
0a7de745 697 }
39236c6e
A
698 }
699 }
700
701 ml_set_interrupts_enabled(enabled);
702}
703
fe8ab488 704int
3e170ce0 705kpc_set_sw_inc( uint32_t mask __unused )
fe8ab488 706{
3e170ce0 707 return ENOTSUP;
fe8ab488 708}
39236c6e 709
fe8ab488 710int
3e170ce0 711kpc_get_pmu_version(void)
fe8ab488 712{
3e170ce0
A
713 i386_cpu_info_t *info = cpuid_info();
714
715 uint8_t version_id = info->cpuid_arch_perf_leaf.version;
716
717 if (version_id == 3) {
718 return KPC_PMU_INTEL_V3;
719 } else if (version_id == 2) {
720 return KPC_PMU_INTEL_V2;
721 }
722
723 return KPC_PMU_ERROR;
fe8ab488 724}