]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/kpc.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / arm64 / kpc.c
CommitLineData
5ba3f43e 1/*
cb323159 2 * Copyright (c) 2012-2018 Apple Inc. All rights reserved.
5ba3f43e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <arm/cpu_data_internal.h>
30#include <arm/cpu_internal.h>
31#include <kern/kalloc.h>
32#include <kern/kpc.h>
33#include <kern/thread.h>
34#include <kern/processor.h>
35#include <mach/mach_types.h>
36#include <machine/machine_routines.h>
37#include <stdint.h>
38#include <sys/errno.h>
39
cb323159
A
40#if APPLE_ARM64_ARCH_FAMILY
41
5ba3f43e
A
42#if MONOTONIC
43#include <kern/monotonic.h>
44#endif /* MONOTONIC */
45
d9a64523
A
46void kpc_pmi_handler(unsigned int ctr);
47
5ba3f43e
A
48/*
49 * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit
50 * positions of the other PMCs, their configuration bits start at position 32.
51 */
52#define PMCR_PMC_8_9_OFFSET (32)
53#define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET)
54#define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \
0a7de745 55 PMCR_PMC_8_9_SHIFT(PMC))
5ba3f43e
A
56
57/*
58 * PMCR0 controls enabling, interrupts, and overflow of performance counters.
59 */
60
61/* PMC is enabled */
62#define PMCR0_PMC_ENABLE_MASK(PMC) (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC))
63#define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC))
64
65/* how interrupts are generated on PMIs */
66#define PMCR0_INTGEN_SHIFT (8)
67#define PMCR0_INTGEN_MASK (UINT64_C(0x7) << PMCR0_INTGEN_SHIFT)
68#define PMCR0_INTGEN_OFF (UINT64_C(0) << PMCR0_INTGEN_SHIFT)
69#define PMCR0_INTGEN_PMI (UINT64_C(1) << PMCR0_INTGEN_SHIFT)
70#define PMCR0_INTGEN_AIC (UINT64_C(2) << PMCR0_INTGEN_SHIFT)
71#define PMCR0_INTGEN_DBG_HLT (UINT64_C(3) << PMCR0_INTGEN_SHIFT)
72#define PMCR0_INTGEN_FIQ (UINT64_C(4) << PMCR0_INTGEN_SHIFT)
73
74/* 10 unused */
75
76/* set by hardware if PMI was generated */
77#define PMCR0_PMAI_SHIFT (11)
78#define PMCR0_PMAI_MASK (UINT64_C(1) << PMCR0_PMAI_SHIFT)
79
80/* overflow on a PMC generates an interrupt */
81#define PMCR0_PMI_OFFSET (12)
82#define PMCR0_PMI_SHIFT(PMC) (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC))
83#define PMCR0_PMI_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC))
84#define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC))
85
86/* disable counting when a PMI is signaled (except for AIC interrupts) */
87#define PMCR0_DISCNT_SHIFT (20)
88#define PMCR0_DISCNT_ENABLE_MASK (UINT64_C(1) << PMCR0_DISCNT_SHIFT)
89#define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK)
90
91/* 21 unused */
92
93/* block PMIs until ERET retires */
94#define PMCR0_WFRFE_SHIFT (22)
95#define PMCR0_WFRFE_ENABLE_MASK (UINT64_C(1) << PMCR0_WFRE_SHIFT)
96#define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK)
97
98/* count global L2C events */
99#define PMCR0_L2CGLOBAL_SHIFT (23)
100#define PMCR0_L2CGLOBAL_ENABLE_MASK (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT)
101#define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK)
102
103/* allow user mode access to configuration registers */
104#define PMCR0_USEREN_SHIFT (30)
105#define PMCR0_USEREN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT)
106#define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK)
107
108/* force the CPMU clocks in case of a clocking bug */
109#define PMCR0_CLKEN_SHIFT (31)
110#define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT)
111#define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK)
112
113/* 32 - 44 mirror the low bits for PMCs 8 and 9 */
114
115/* PMCR1 enables counters in different processor modes */
116
117#define PMCR1_EL0_A32_OFFSET (0)
118#define PMCR1_EL0_A64_OFFSET (8)
119#define PMCR1_EL1_A64_OFFSET (16)
120#define PMCR1_EL3_A64_OFFSET (24)
121
122#define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC))
123#define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
124#define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
125#define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
126
127#define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC))
128#define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC))
129#define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC))
130/* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */
131#if NO_MONITOR
132#define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0)
133#else
134#define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC))
135#endif
136
137#define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \
0a7de745
A
138 PMCR1_EL0_A64_ENABLE_MASK(PMC) | \
139 PMCR1_EL1_A64_ENABLE_MASK(PMC) | \
140 PMCR1_EL3_A64_ENABLE_MASK(PMC))
5ba3f43e
A
141#define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC))
142
143/* PMESR0 and PMESR1 are event selection registers */
144
145/* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */
146/* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */
147
148#define PMESR_PMC_WIDTH (8)
149#define PMESR_PMC_MASK (UINT8_MAX)
150#define PMESR_SHIFT(PMC, OFF) (8 * ((PMC) - (OFF)))
151#define PMESR_EVT_MASK(PMC, OFF) (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF))
152#define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF))
153
154#define PMESR_EVT_DECODE(PMESR, PMC, OFF) \
155 (((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK)
156#define PMESR_EVT_ENCODE(EVT, PMC, OFF) \
157 (((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF))
158
159/* system registers in the CPMU */
160
161#define SREG_PMCR0 "S3_1_c15_c0_0"
162#define SREG_PMCR1 "S3_1_c15_c1_0"
163#define SREG_PMCR2 "S3_1_c15_c2_0"
164#define SREG_PMCR3 "S3_1_c15_c3_0"
165#define SREG_PMCR4 "S3_1_c15_c4_0"
166#define SREG_PMESR0 "S3_1_c15_c5_0"
167#define SREG_PMESR1 "S3_1_c15_c6_0"
168#define SREG_PMSR "S3_1_c15_c13_0"
169#define SREG_OPMAT0 "S3_1_c15_c7_0"
170#define SREG_OPMAT1 "S3_1_c15_c8_0"
171#define SREG_OPMSK0 "S3_1_c15_c9_0"
172#define SREG_OPMSK1 "S3_1_c15_c10_0"
173
174#define SREG_PMC0 "S3_2_c15_c0_0"
175#define SREG_PMC1 "S3_2_c15_c1_0"
176#define SREG_PMC2 "S3_2_c15_c2_0"
177#define SREG_PMC3 "S3_2_c15_c3_0"
178#define SREG_PMC4 "S3_2_c15_c4_0"
179#define SREG_PMC5 "S3_2_c15_c5_0"
180#define SREG_PMC6 "S3_2_c15_c6_0"
181#define SREG_PMC7 "S3_2_c15_c7_0"
182#define SREG_PMC8 "S3_2_c15_c9_0"
183#define SREG_PMC9 "S3_2_c15_c10_0"
184
5ba3f43e
A
185#define SREG_PMMMAP "S3_2_c15_c15_0"
186#define SREG_PMTRHLD2 "S3_2_c15_c14_0"
187#define SREG_PMTRHLD4 "S3_2_c15_c13_0"
188#define SREG_PMTRHLD6 "S3_2_c15_c12_0"
5ba3f43e
A
189
190/*
191 * The low 8 bits of a configuration words select the event to program on
192 * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits.
193 */
194#define CFGWORD_EL0A32EN_MASK (0x10000)
195#define CFGWORD_EL0A64EN_MASK (0x20000)
196#define CFGWORD_EL1EN_MASK (0x40000)
197#define CFGWORD_EL3EN_MASK (0x80000)
198#define CFGWORD_ALLMODES_MASK (0xf0000)
199
200/* ACC offsets for PIO */
201#define ACC_CPMU_PMC0_OFFSET (0x200)
202#define ACC_CPMU_PMC8_OFFSET (0x280)
203
204/*
205 * Macros for reading and writing system registers.
206 *
207 * SR must be one of the SREG_* defines above.
208 */
209#define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V))
210#define SREG_READ(SR) ({ uint64_t VAL; \
0a7de745
A
211 __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \
212 VAL; })
5ba3f43e
A
213
214/*
215 * Configuration registers that can be controlled by RAWPMU:
216 *
217 * All: PMCR2-4, OPMAT0-1, OPMSK0-1.
218 * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6.
219 */
cb323159 220#if HAS_EARLY_APPLE_CPMU
5ba3f43e 221#define RAWPMU_CONFIG_COUNT 7
cb323159 222#else /* HAS_EARLY_APPLE_CPMU */
5ba3f43e 223#define RAWPMU_CONFIG_COUNT 11
cb323159 224#endif /* !HAS_EARLY_APPLE_CPMU */
5ba3f43e
A
225
226/* TODO: allocate dynamically */
227static uint64_t saved_PMCR[MAX_CPUS][2];
228static uint64_t saved_PMESR[MAX_CPUS][2];
229static uint64_t saved_RAWPMU[MAX_CPUS][RAWPMU_CONFIG_COUNT];
230static uint64_t saved_counter[MAX_CPUS][KPC_MAX_COUNTERS];
231static uint64_t kpc_running_cfg_pmc_mask = 0;
232static uint32_t kpc_running_classes = 0;
233static uint32_t kpc_configured = 0;
234
5ba3f43e
A
235/*
236 * The whitelist is disabled by default on development/debug kernel. This can
237 * be changed via the kpc.disable_whitelist sysctl. The whitelist is enabled on
238 * release kernel and cannot be disabled.
239 */
240#if DEVELOPMENT || DEBUG
241static boolean_t whitelist_disabled = TRUE;
242#else
243static boolean_t whitelist_disabled = FALSE;
244#endif
245
cb323159
A
246#define CPMU_CORE_CYCLE 0x02
247
248#if HAS_EARLY_APPLE_CPMU
249
250#define CPMU_BIU_UPSTREAM_CYCLE 0x19
251#define CPMU_BIU_DOWNSTREAM_CYCLE 0x1a
252#define CPMU_L2C_AGENT_LD 0x22
253#define CPMU_L2C_AGENT_LD_MISS 0x23
254#define CPMU_L2C_AGENT_ST 0x24
255#define CPMU_L2C_AGENT_ST_MISS 0x25
256#define CPMU_INST_A32 0x78
257#define CPMU_INST_THUMB 0x79
258#define CPMU_INST_A64 0x7a
259#define CPMU_INST_BRANCH 0x7b
260#define CPMU_SYNC_DC_LOAD_MISS 0xb4
261#define CPMU_SYNC_DC_STORE_MISS 0xb5
262#define CPMU_SYNC_DTLB_MISS 0xb6
263#define CPMU_SYNC_ST_HIT_YNGR_LD 0xb9
264#define CPMU_SYNC_BR_ANY_MISP 0xc0
265#define CPMU_FED_IC_MISS_DEM 0xce
266#define CPMU_FED_ITLB_MISS 0xcf
267
268#else /* HAS_EARLY_APPLE_CPMU */
269
270#if HAS_CPMU_BIU_EVENTS
271#define CPMU_BIU_UPSTREAM_CYCLE 0x13
272#define CPMU_BIU_DOWNSTREAM_CYCLE 0x14
273#endif /* HAS_CPMU_BIU_EVENTS */
274
275#if HAS_CPMU_L2C_EVENTS
276#define CPMU_L2C_AGENT_LD 0x1a
277#define CPMU_L2C_AGENT_LD_MISS 0x1b
278#define CPMU_L2C_AGENT_ST 0x1c
279#define CPMU_L2C_AGENT_ST_MISS 0x1d
280#endif /* HAS_CPMU_L2C_EVENTS */
281
282#define CPMU_INST_A32 0x8a
283#define CPMU_INST_THUMB 0x8b
284#define CPMU_INST_A64 0x8c
285#define CPMU_INST_BRANCH 0x8d
286#define CPMU_SYNC_DC_LOAD_MISS 0xbf
287#define CPMU_SYNC_DC_STORE_MISS 0xc0
288#define CPMU_SYNC_DTLB_MISS 0xc1
289#define CPMU_SYNC_ST_HIT_YNGR_LD 0xc4
290#define CPMU_SYNC_BR_ANY_MISP 0xcb
291#define CPMU_FED_IC_MISS_DEM 0xd3
292#define CPMU_FED_ITLB_MISS 0xd4
293
294#endif /* !HAS_EARLY_APPLE_CPMU */
295
296/* List of counter events that are allowed to be used by 3rd-parties. */
5ba3f43e 297static kpc_config_t whitelist[] = {
cb323159 298 0, /* NO_EVENT */
d9a64523 299
cb323159
A
300 CPMU_CORE_CYCLE,
301
302#if HAS_CPMU_BIU_EVENTS
303 CPMU_BIU_UPSTREAM_CYCLE, CPMU_BIU_DOWNSTREAM_CYCLE,
304#endif /* HAS_CPMU_BIU_EVENTS */
305
306#if HAS_CPMU_L2C_EVENTS
307 CPMU_L2C_AGENT_LD, CPMU_L2C_AGENT_LD_MISS, CPMU_L2C_AGENT_ST,
308 CPMU_L2C_AGENT_ST_MISS,
309#endif /* HAS_CPMU_L2C_EVENTS */
310
311 CPMU_INST_A32, CPMU_INST_THUMB, CPMU_INST_A64, CPMU_INST_BRANCH,
312 CPMU_SYNC_DC_LOAD_MISS, CPMU_SYNC_DC_STORE_MISS,
313 CPMU_SYNC_DTLB_MISS, CPMU_SYNC_ST_HIT_YNGR_LD,
314 CPMU_SYNC_BR_ANY_MISP, CPMU_FED_IC_MISS_DEM, CPMU_FED_ITLB_MISS,
5ba3f43e 315};
cb323159
A
316#define WHITELIST_COUNT (sizeof(whitelist) / sizeof(whitelist[0]))
317#define EVENT_MASK 0xff
5ba3f43e 318
cb323159 319static bool
5ba3f43e
A
320config_in_whitelist(kpc_config_t cfg)
321{
cb323159
A
322 for (unsigned int i = 0; i < WHITELIST_COUNT; i++) {
323 /* Strip off any EL configuration bits -- just look at the event. */
324 if ((cfg & EVENT_MASK) == whitelist[i]) {
325 return true;
5ba3f43e
A
326 }
327 }
cb323159 328 return false;
5ba3f43e
A
329}
330
331#ifdef KPC_DEBUG
0a7de745
A
332static void
333dump_regs(void)
5ba3f43e
A
334{
335 uint64_t val;
336 kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR0));
337 kprintf("PMCR1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR1));
338 kprintf("PMCR2 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR2));
339 kprintf("PMCR3 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR3));
340 kprintf("PMCR4 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR4));
341 kprintf("PMESR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMESR0));
342 kprintf("PMESR1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMESR1));
343
344 kprintf("PMC0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC0));
345 kprintf("PMC1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC1));
346 kprintf("PMC2 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC2));
347 kprintf("PMC3 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC3));
348 kprintf("PMC4 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC4));
349 kprintf("PMC5 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC5));
350 kprintf("PMC6 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC6));
351 kprintf("PMC7 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC7));
352
353#if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
354 kprintf("PMC8 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC8));
355 kprintf("PMC9 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC9));
356#endif
357}
358#endif
359
360static boolean_t
361enable_counter(uint32_t counter)
362{
363 int cpuid = cpu_number();
364 uint64_t pmcr0 = 0, intgen_type;
365 boolean_t counter_running, pmi_enabled, intgen_correct, enabled;
366
367 pmcr0 = SREG_READ(SREG_PMCR0) | 0x3 /* leave the fixed counters enabled for monotonic */;
368
369 counter_running = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
370 pmi_enabled = (pmcr0 & PMCR0_PMI_ENABLE_MASK(counter)) != 0;
371
372 /* TODO this should use the PMI path rather than AIC for the interrupt
373 * as it is faster
374 */
375 intgen_type = PMCR0_INTGEN_AIC;
376 intgen_correct = (pmcr0 & PMCR0_INTGEN_MASK) == intgen_type;
377
378 enabled = counter_running && pmi_enabled && intgen_correct;
379
380 if (!enabled) {
381 pmcr0 |= PMCR0_PMC_ENABLE_MASK(counter);
382 pmcr0 |= PMCR0_PMI_ENABLE_MASK(counter);
383 pmcr0 &= ~PMCR0_INTGEN_MASK;
384 pmcr0 |= intgen_type;
385
386 SREG_WRITE(SREG_PMCR0, pmcr0);
387 }
388
389 saved_PMCR[cpuid][0] = pmcr0;
390 return enabled;
391}
392
393static boolean_t
394disable_counter(uint32_t counter)
395{
396 uint64_t pmcr0;
397 boolean_t enabled;
398 int cpuid = cpu_number();
399
400 if (counter < 2) {
401 return true;
402 }
403
404 pmcr0 = SREG_READ(SREG_PMCR0) | 0x3;
405 enabled = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
406
407 if (enabled) {
408 pmcr0 &= PMCR0_PMC_DISABLE_MASK(counter);
409 SREG_WRITE(SREG_PMCR0, pmcr0);
410 }
411
412 saved_PMCR[cpuid][0] = pmcr0;
413 return enabled;
414}
415
416/*
417 * Enable counter in processor modes determined by configuration word.
418 */
419static void
420set_modes(uint32_t counter, kpc_config_t cfgword)
421{
422 uint64_t bits = 0;
423 int cpuid = cpu_number();
424
425 if (cfgword & CFGWORD_EL0A32EN_MASK) {
426 bits |= PMCR1_EL0_A32_ENABLE_MASK(counter);
427 }
428 if (cfgword & CFGWORD_EL0A64EN_MASK) {
429 bits |= PMCR1_EL0_A64_ENABLE_MASK(counter);
430 }
431 if (cfgword & CFGWORD_EL1EN_MASK) {
432 bits |= PMCR1_EL1_A64_ENABLE_MASK(counter);
433 }
434#if !NO_MONITOR
435 if (cfgword & CFGWORD_EL3EN_MASK) {
436 bits |= PMCR1_EL3_A64_ENABLE_MASK(counter);
437 }
438#endif
439
440 /*
441 * Backwards compatibility: Writing a non-zero configuration word with
442 * all zeros in bits 16-19 is interpreted as enabling in all modes.
443 * This matches the behavior when the PMCR1 bits weren't exposed.
444 */
445 if (bits == 0 && cfgword != 0) {
446 bits = PMCR1_EL_ALL_ENABLE_MASK(counter);
447 }
448
449 uint64_t pmcr1 = SREG_READ(SREG_PMCR1);
450 pmcr1 &= PMCR1_EL_ALL_DISABLE_MASK(counter);
451 pmcr1 |= bits;
452 pmcr1 |= 0x30303; /* monotonic compatibility */
453 SREG_WRITE(SREG_PMCR1, pmcr1);
454 saved_PMCR[cpuid][1] = pmcr1;
455}
456
457static uint64_t
458read_counter(uint32_t counter)
459{
460 switch (counter) {
0a7de745
A
461 // case 0: return SREG_READ(SREG_PMC0);
462 // case 1: return SREG_READ(SREG_PMC1);
463 case 2: return SREG_READ(SREG_PMC2);
464 case 3: return SREG_READ(SREG_PMC3);
465 case 4: return SREG_READ(SREG_PMC4);
466 case 5: return SREG_READ(SREG_PMC5);
467 case 6: return SREG_READ(SREG_PMC6);
468 case 7: return SREG_READ(SREG_PMC7);
5ba3f43e 469#if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
0a7de745
A
470 case 8: return SREG_READ(SREG_PMC8);
471 case 9: return SREG_READ(SREG_PMC9);
5ba3f43e 472#endif
0a7de745 473 default: return 0;
5ba3f43e
A
474 }
475}
476
477static void
478write_counter(uint32_t counter, uint64_t value)
479{
480 switch (counter) {
0a7de745
A
481 // case 0: SREG_WRITE(SREG_PMC0, value); break;
482 // case 1: SREG_WRITE(SREG_PMC1, value); break;
483 case 2: SREG_WRITE(SREG_PMC2, value); break;
484 case 3: SREG_WRITE(SREG_PMC3, value); break;
485 case 4: SREG_WRITE(SREG_PMC4, value); break;
486 case 5: SREG_WRITE(SREG_PMC5, value); break;
487 case 6: SREG_WRITE(SREG_PMC6, value); break;
488 case 7: SREG_WRITE(SREG_PMC7, value); break;
5ba3f43e 489#if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
0a7de745
A
490 case 8: SREG_WRITE(SREG_PMC8, value); break;
491 case 9: SREG_WRITE(SREG_PMC9, value); break;
5ba3f43e 492#endif
0a7de745 493 default: break;
5ba3f43e
A
494 }
495}
496
497uint32_t
498kpc_rawpmu_config_count(void)
499{
500 return RAWPMU_CONFIG_COUNT;
501}
502
503int
504kpc_get_rawpmu_config(kpc_config_t *configv)
505{
506 configv[0] = SREG_READ(SREG_PMCR2);
507 configv[1] = SREG_READ(SREG_PMCR3);
508 configv[2] = SREG_READ(SREG_PMCR4);
509 configv[3] = SREG_READ(SREG_OPMAT0);
510 configv[4] = SREG_READ(SREG_OPMAT1);
511 configv[5] = SREG_READ(SREG_OPMSK0);
512 configv[6] = SREG_READ(SREG_OPMSK1);
513#if RAWPMU_CONFIG_COUNT > 7
514 configv[7] = SREG_READ(SREG_PMMMAP);
515 configv[8] = SREG_READ(SREG_PMTRHLD2);
516 configv[9] = SREG_READ(SREG_PMTRHLD4);
517 configv[10] = SREG_READ(SREG_PMTRHLD6);
518#endif
519 return 0;
520}
521
522static int
523kpc_set_rawpmu_config(kpc_config_t *configv)
524{
525 SREG_WRITE(SREG_PMCR2, configv[0]);
526 SREG_WRITE(SREG_PMCR3, configv[1]);
527 SREG_WRITE(SREG_PMCR4, configv[2]);
528 SREG_WRITE(SREG_OPMAT0, configv[3]);
529 SREG_WRITE(SREG_OPMAT1, configv[4]);
530 SREG_WRITE(SREG_OPMSK0, configv[5]);
531 SREG_WRITE(SREG_OPMSK1, configv[6]);
532#if RAWPMU_CONFIG_COUNT > 7
533 SREG_WRITE(SREG_PMMMAP, configv[7]);
534 SREG_WRITE(SREG_PMTRHLD2, configv[8]);
535 SREG_WRITE(SREG_PMTRHLD4, configv[9]);
536 SREG_WRITE(SREG_PMTRHLD6, configv[10]);
537#endif
538 return 0;
539}
540
541static void
542save_regs(void)
543{
544 int cpuid = cpu_number();
545
0a7de745 546 __asm__ volatile ("dmb ish");
5ba3f43e
A
547
548 assert(ml_get_interrupts_enabled() == FALSE);
549
550 /* Save current PMCR0/1 values. PMCR2-4 are in the RAWPMU set. */
551 saved_PMCR[cpuid][0] = SREG_READ(SREG_PMCR0) | 0x3;
552
553 /* Save event selections. */
554 saved_PMESR[cpuid][0] = SREG_READ(SREG_PMESR0);
555 saved_PMESR[cpuid][1] = SREG_READ(SREG_PMESR1);
556
557 kpc_get_rawpmu_config(saved_RAWPMU[cpuid]);
558
559 /* Disable the counters. */
560 // SREG_WRITE(SREG_PMCR0, clear);
561
562 /* Finally, save state for each counter*/
563 for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
564 saved_counter[cpuid][i] = read_counter(i);
565 }
566}
567
568static void
569restore_regs(void)
570{
571 int cpuid = cpu_number();
572
573 /* Restore PMESR values. */
574 SREG_WRITE(SREG_PMESR0, saved_PMESR[cpuid][0]);
575 SREG_WRITE(SREG_PMESR1, saved_PMESR[cpuid][1]);
576
577 kpc_set_rawpmu_config(saved_RAWPMU[cpuid]);
578
579 /* Restore counter values */
580 for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
581 write_counter(i, saved_counter[cpuid][i]);
582 }
583
584 /* Restore PMCR0/1 values (with PMCR0 last to enable). */
585 SREG_WRITE(SREG_PMCR1, saved_PMCR[cpuid][1] | 0x30303);
586 SREG_WRITE(SREG_PMCR0, saved_PMCR[cpuid][0] | 0x3);
587}
588
589static uint64_t
590get_counter_config(uint32_t counter)
591{
592 uint64_t pmesr;
593
594 switch (counter) {
0a7de745
A
595 case 2: /* FALLTHROUGH */
596 case 3: /* FALLTHROUGH */
597 case 4: /* FALLTHROUGH */
598 case 5:
599 pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0), counter, 2);
600 break;
601 case 6: /* FALLTHROUGH */
602 case 7:
5ba3f43e 603#if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
0a7de745
A
604 /* FALLTHROUGH */
605 case 8: /* FALLTHROUGH */
606 case 9:
5ba3f43e 607#endif
0a7de745
A
608 pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1), counter, 6);
609 break;
610 default:
611 pmesr = 0;
612 break;
5ba3f43e
A
613 }
614
615 kpc_config_t config = pmesr;
616
617 uint64_t pmcr1 = SREG_READ(SREG_PMCR1);
618
619 if (pmcr1 & PMCR1_EL0_A32_ENABLE_MASK(counter)) {
620 config |= CFGWORD_EL0A32EN_MASK;
621 }
622 if (pmcr1 & PMCR1_EL0_A64_ENABLE_MASK(counter)) {
623 config |= CFGWORD_EL0A64EN_MASK;
624 }
625 if (pmcr1 & PMCR1_EL1_A64_ENABLE_MASK(counter)) {
626 config |= CFGWORD_EL1EN_MASK;
627#if NO_MONITOR
628 config |= CFGWORD_EL3EN_MASK;
629#endif
630 }
631#if !NO_MONITOR
632 if (pmcr1 & PMCR1_EL3_A64_ENABLE_MASK(counter)) {
633 config |= CFGWORD_EL3EN_MASK;
634 }
635#endif
636
637 return config;
638}
639
640static void
641set_counter_config(uint32_t counter, uint64_t config)
642{
643 int cpuid = cpu_number();
644 uint64_t pmesr = 0;
645
646 switch (counter) {
0a7de745
A
647 case 2: /* FALLTHROUGH */
648 case 3: /* FALLTHROUGH */
649 case 4: /* FALLTHROUGH */
650 case 5:
651 pmesr = SREG_READ(SREG_PMESR0);
652 pmesr &= PMESR_EVT_CLEAR(counter, 2);
653 pmesr |= PMESR_EVT_ENCODE(config, counter, 2);
654 SREG_WRITE(SREG_PMESR0, pmesr);
655 saved_PMESR[cpuid][0] = pmesr;
656 break;
657
658 case 6: /* FALLTHROUGH */
659 case 7:
5ba3f43e 660#if KPC_ARM64_CONFIGURABLE_COUNT > 6
0a7de745
A
661 /* FALLTHROUGH */
662 case 8: /* FALLTHROUGH */
663 case 9:
5ba3f43e 664#endif
0a7de745
A
665 pmesr = SREG_READ(SREG_PMESR1);
666 pmesr &= PMESR_EVT_CLEAR(counter, 6);
667 pmesr |= PMESR_EVT_ENCODE(config, counter, 6);
668 SREG_WRITE(SREG_PMESR1, pmesr);
669 saved_PMESR[cpuid][1] = pmesr;
670 break;
671 default:
672 break;
5ba3f43e
A
673 }
674
675 set_modes(counter, config);
676}
677
678/* internal functions */
679
680void
681kpc_arch_init(void)
682{
683}
684
685boolean_t
686kpc_is_running_fixed(void)
687{
688 return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
689}
690
691boolean_t
692kpc_is_running_configurable(uint64_t pmc_mask)
693{
694 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
695 return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
696 ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
697}
698
699uint32_t
700kpc_fixed_count(void)
701{
702 return KPC_ARM64_FIXED_COUNT;
703}
704
705uint32_t
706kpc_configurable_count(void)
707{
708 return KPC_ARM64_CONFIGURABLE_COUNT;
709}
710
711uint32_t
712kpc_fixed_config_count(void)
713{
714 return 0;
715}
716
717uint32_t
718kpc_configurable_config_count(uint64_t pmc_mask)
719{
720 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
721 return kpc_popcount(pmc_mask);
722}
723
724int
725kpc_get_fixed_config(kpc_config_t *configv __unused)
726{
727 return 0;
728}
729
730uint64_t
731kpc_fixed_max(void)
732{
733 return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
734}
735
736uint64_t
737kpc_configurable_max(void)
738{
739 return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
740}
741
742static void
743set_running_configurable(uint64_t target_mask, uint64_t state_mask)
744{
745 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
746 boolean_t enabled;
747
748 enabled = ml_set_interrupts_enabled(FALSE);
749
750 for (uint32_t i = 0; i < cfg_count; ++i) {
0a7de745 751 if (((1ULL << i) & target_mask) == 0) {
5ba3f43e 752 continue;
0a7de745 753 }
5ba3f43e
A
754 assert(kpc_controls_counter(offset + i));
755
756 if ((1ULL << i) & state_mask) {
757 enable_counter(offset + i);
758 } else {
759 disable_counter(offset + i);
760 }
761 }
762
763 ml_set_interrupts_enabled(enabled);
764}
765
766static uint32_t kpc_xcall_sync;
767static void
768kpc_set_running_xcall( void *vstate )
769{
770 struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
771 assert(mp_config);
772
773 set_running_configurable(mp_config->cfg_target_mask,
0a7de745 774 mp_config->cfg_state_mask);
5ba3f43e 775
cb323159 776 if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) {
5ba3f43e 777 thread_wakeup((event_t) &kpc_xcall_sync);
0a7de745 778 }
5ba3f43e
A
779}
780
781static uint32_t kpc_xread_sync;
782static void
783kpc_get_curcpu_counters_xcall(void *args)
784{
785 struct kpc_get_counters_remote *handler = args;
786
787 assert(handler != NULL);
788 assert(handler->buf != NULL);
789
790 int offset = cpu_number() * handler->buf_stride;
791 int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
792
793 /* number of counters added by this CPU, needs to be atomic */
cb323159 794 os_atomic_add(&(handler->nb_counters), r, relaxed);
5ba3f43e 795
cb323159 796 if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) {
5ba3f43e
A
797 thread_wakeup((event_t) &kpc_xread_sync);
798 }
799}
800
801int
802kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
803{
804 assert(buf != NULL);
805
806 int enabled = ml_set_interrupts_enabled(FALSE);
807
808 /* grab counters and CPU number as close as possible */
809 if (curcpu) {
810 *curcpu = current_processor()->cpu_id;
811 }
812
813 struct kpc_get_counters_remote hdl = {
814 .classes = classes,
815 .nb_counters = 0,
816 .buf = buf,
817 .buf_stride = kpc_get_counter_count(classes)
818 };
819
820 cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
821 int offset = hdl.nb_counters;
822
823 (void)ml_set_interrupts_enabled(enabled);
824
825 return offset;
826}
827
828int
829kpc_get_fixed_counters(uint64_t *counterv)
830{
831#if MONOTONIC
832 mt_fixed_counts(counterv);
833 return 0;
834#else /* MONOTONIC */
835#pragma unused(counterv)
836 return ENOTSUP;
837#endif /* !MONOTONIC */
838}
839
840int
841kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
842{
843 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
844 uint64_t ctr = 0ULL;
845
846 assert(counterv);
847
848 for (uint32_t i = 0; i < cfg_count; ++i) {
0a7de745 849 if (((1ULL << i) & pmc_mask) == 0) {
5ba3f43e 850 continue;
0a7de745 851 }
5ba3f43e
A
852 ctr = read_counter(i + offset);
853
854 if (ctr & KPC_ARM64_COUNTER_OVF_MASK) {
855 ctr = CONFIGURABLE_SHADOW(i) +
0a7de745
A
856 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
857 (ctr & KPC_ARM64_COUNTER_MASK);
5ba3f43e
A
858 } else {
859 ctr = CONFIGURABLE_SHADOW(i) +
0a7de745 860 (ctr - CONFIGURABLE_RELOAD(i));
5ba3f43e
A
861 }
862
863 *counterv++ = ctr;
864 }
865
866 return 0;
867}
868
869int
870kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
871{
872 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
873
874 assert(configv);
875
0a7de745
A
876 for (uint32_t i = 0; i < cfg_count; ++i) {
877 if ((1ULL << i) & pmc_mask) {
5ba3f43e 878 *configv++ = get_counter_config(i + offset);
0a7de745
A
879 }
880 }
5ba3f43e
A
881 return 0;
882}
883
884static int
885kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
886{
887 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
888 boolean_t enabled;
889
890 assert(configv);
891
892 enabled = ml_set_interrupts_enabled(FALSE);
893
894 for (uint32_t i = 0; i < cfg_count; ++i) {
0a7de745 895 if (((1ULL << i) & pmc_mask) == 0) {
5ba3f43e 896 continue;
0a7de745 897 }
5ba3f43e
A
898 assert(kpc_controls_counter(i + offset));
899
900 set_counter_config(i + offset, *configv++);
901 }
902
903 ml_set_interrupts_enabled(enabled);
904
905 return 0;
906}
907
908static uint32_t kpc_config_sync;
909static void
910kpc_set_config_xcall(void *vmp_config)
911{
912 struct kpc_config_remote *mp_config = vmp_config;
913 kpc_config_t *new_config = NULL;
914 uint32_t classes = 0ULL;
915
916 assert(mp_config);
917 assert(mp_config->configv);
918 classes = mp_config->classes;
919 new_config = mp_config->configv;
920
921 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
922 kpc_set_configurable_config(new_config, mp_config->pmc_mask);
923 new_config += kpc_popcount(mp_config->pmc_mask);
924 }
925
926 if (classes & KPC_CLASS_RAWPMU_MASK) {
927 kpc_set_rawpmu_config(new_config);
928 new_config += RAWPMU_CONFIG_COUNT;
929 }
930
cb323159 931 if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) {
5ba3f43e 932 thread_wakeup((event_t) &kpc_config_sync);
0a7de745 933 }
5ba3f43e
A
934}
935
936static uint64_t
937kpc_reload_counter(uint32_t ctr)
938{
939 assert(ctr < (kpc_configurable_count() + kpc_fixed_count()));
940
5ba3f43e 941 uint64_t old = read_counter(ctr);
b226f5e5
A
942
943 if (kpc_controls_counter(ctr)) {
944 write_counter(ctr, FIXED_RELOAD(ctr));
945 return old & KPC_ARM64_COUNTER_MASK;
946 } else {
947 /*
948 * Unset the overflow bit to clear the condition that drives
949 * PMIs. The power manager is not interested in handling PMIs.
950 */
951 write_counter(ctr, old & KPC_ARM64_COUNTER_MASK);
952 return 0;
953 }
5ba3f43e
A
954}
955
956static uint32_t kpc_reload_sync;
957static void
958kpc_set_reload_xcall(void *vmp_config)
959{
960 struct kpc_config_remote *mp_config = vmp_config;
961 uint32_t classes = 0, count = 0, offset = kpc_fixed_count();
962 uint64_t *new_period = NULL, max = kpc_configurable_max();
963 boolean_t enabled;
964
965 assert(mp_config);
966 assert(mp_config->configv);
967 classes = mp_config->classes;
968 new_period = mp_config->configv;
969
970 enabled = ml_set_interrupts_enabled(FALSE);
971
972 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
973 /*
974 * Update _all_ shadow counters, this cannot be done for only
975 * selected PMCs. Otherwise, we would corrupt the configurable
976 * shadow buffer since the PMCs are muxed according to the pmc
977 * mask.
978 */
979 uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
980 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
981
982 /* set the new period */
983 count = kpc_configurable_count();
984 for (uint32_t i = 0; i < count; ++i) {
985 /* ignore the counter */
0a7de745 986 if (((1ULL << i) & mp_config->pmc_mask) == 0) {
5ba3f43e 987 continue;
0a7de745
A
988 }
989 if (*new_period == 0) {
5ba3f43e 990 *new_period = kpc_configurable_max();
0a7de745 991 }
5ba3f43e
A
992 CONFIGURABLE_RELOAD(i) = max - *new_period;
993 /* reload the counter */
994 kpc_reload_counter(offset + i);
995 /* next period value */
996 new_period++;
997 }
998 }
999
1000 ml_set_interrupts_enabled(enabled);
1001
cb323159 1002 if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) {
5ba3f43e 1003 thread_wakeup((event_t) &kpc_reload_sync);
0a7de745 1004 }
5ba3f43e
A
1005}
1006
5ba3f43e 1007void
d9a64523 1008kpc_pmi_handler(unsigned int ctr)
5ba3f43e 1009{
d9a64523 1010 uint64_t extra = kpc_reload_counter(ctr);
5ba3f43e 1011
d9a64523 1012 FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
5ba3f43e 1013
d9a64523
A
1014 if (FIXED_ACTIONID(ctr)) {
1015 kpc_sample_kperf(FIXED_ACTIONID(ctr));
5ba3f43e 1016 }
5ba3f43e
A
1017}
1018
1019uint32_t
1020kpc_get_classes(void)
1021{
1022 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_RAWPMU_MASK;
1023}
1024
1025int
1026kpc_set_running_arch(struct kpc_running_remote *mp_config)
1027{
d9a64523 1028 assert(mp_config != NULL);
5ba3f43e
A
1029
1030 /* dispatch to all CPUs */
1031 cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config);
1032
1033 kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
1034 kpc_running_classes = mp_config->classes;
1035 kpc_configured = 1;
1036
1037 return 0;
1038}
1039
1040int
1041kpc_set_period_arch(struct kpc_config_remote *mp_config)
1042{
1043 assert(mp_config);
1044
1045 /* dispatch to all CPUs */
1046 cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config);
1047
1048 kpc_configured = 1;
1049
1050 return 0;
1051}
1052
1053int
1054kpc_set_config_arch(struct kpc_config_remote *mp_config)
1055{
1056 uint32_t count = kpc_popcount(mp_config->pmc_mask);
1057
1058 assert(mp_config);
1059 assert(mp_config->configv);
1060
1061 /* check config against whitelist for external devs */
1062 for (uint32_t i = 0; i < count; ++i) {
1063 if (!whitelist_disabled && !config_in_whitelist(mp_config->configv[i])) {
1064 return EPERM;
1065 }
1066 }
1067
1068 /* dispatch to all CPUs */
1069 cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
1070
1071 kpc_configured = 1;
1072
1073 return 0;
1074}
1075
0a7de745 1076void
5ba3f43e
A
1077kpc_idle(void)
1078{
1079 if (kpc_configured) {
1080 save_regs();
1081 }
1082}
1083
0a7de745
A
1084void
1085kpc_idle_exit(void)
5ba3f43e
A
1086{
1087 if (kpc_configured) {
1088 restore_regs();
1089 }
1090}
1091
1092int
1093kpc_set_sw_inc( uint32_t mask __unused )
1094{
1095 return ENOTSUP;
1096}
1097
1098int
1099kpc_disable_whitelist( int val )
1100{
1101 whitelist_disabled = val;
1102 return 0;
1103}
1104
1105int
1106kpc_get_whitelist_disabled( void )
1107{
1108 return whitelist_disabled;
1109}
1110
1111int
1112kpc_get_pmu_version(void)
1113{
1114 return KPC_PMU_ARM_APPLE;
1115}
cb323159
A
1116
1117#else /* APPLE_ARM64_ARCH_FAMILY */
1118
1119/* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */
1120
1121void
1122kpc_arch_init(void)
1123{
1124 /* No-op */
1125}
1126
1127uint32_t
1128kpc_get_classes(void)
1129{
1130 return 0;
1131}
1132
1133uint32_t
1134kpc_fixed_count(void)
1135{
1136 return 0;
1137}
1138
1139uint32_t
1140kpc_configurable_count(void)
1141{
1142 return 0;
1143}
1144
1145uint32_t
1146kpc_fixed_config_count(void)
1147{
1148 return 0;
1149}
1150
1151uint32_t
1152kpc_configurable_config_count(uint64_t pmc_mask __unused)
1153{
1154 return 0;
1155}
1156
1157int
1158kpc_get_fixed_config(kpc_config_t *configv __unused)
1159{
1160 return 0;
1161}
1162
1163uint64_t
1164kpc_fixed_max(void)
1165{
1166 return 0;
1167}
1168
1169uint64_t
1170kpc_configurable_max(void)
1171{
1172 return 0;
1173}
1174
1175int
1176kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused)
1177{
1178 return ENOTSUP;
1179}
1180
1181int
1182kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused)
1183{
1184 return ENOTSUP;
1185}
1186
1187int
1188kpc_get_fixed_counters(uint64_t *counterv __unused)
1189{
1190 return 0;
1191}
1192
1193boolean_t
1194kpc_is_running_fixed(void)
1195{
1196 return FALSE;
1197}
1198
1199boolean_t
1200kpc_is_running_configurable(uint64_t pmc_mask __unused)
1201{
1202 return FALSE;
1203}
1204
1205int
1206kpc_set_running_arch(struct kpc_running_remote *mp_config __unused)
1207{
1208 return ENOTSUP;
1209}
1210
1211int
1212kpc_set_period_arch(struct kpc_config_remote *mp_config __unused)
1213{
1214 return ENOTSUP;
1215}
1216
1217int
1218kpc_set_config_arch(struct kpc_config_remote *mp_config __unused)
1219{
1220 return ENOTSUP;
1221}
1222
1223void
1224kpc_idle(void)
1225{
1226 // do nothing
1227}
1228
1229void
1230kpc_idle_exit(void)
1231{
1232 // do nothing
1233}
1234
1235int
1236kpc_get_all_cpus_counters(uint32_t classes __unused, int *curcpu __unused, uint64_t *buf __unused)
1237{
1238 return 0;
1239}
1240
1241int
1242kpc_set_sw_inc( uint32_t mask __unused )
1243{
1244 return ENOTSUP;
1245}
1246
1247int
1248kpc_get_pmu_version(void)
1249{
1250 return KPC_PMU_ERROR;
1251}
1252
1253uint32_t
1254kpc_rawpmu_config_count(void)
1255{
1256 return 0;
1257}
1258
1259int
1260kpc_get_rawpmu_config(__unused kpc_config_t *configv)
1261{
1262 return 0;
1263}
1264
1265int
1266kpc_disable_whitelist( int val __unused )
1267{
1268 return 0;
1269}
1270
1271int
1272kpc_get_whitelist_disabled( void )
1273{
1274 return 0;
1275}
1276
1277#endif /* !APPLE_ARM64_ARCH_FAMILY */