2 * Copyright (c) 2012-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/cpu_data_internal.h>
30 #include <arm/cpu_internal.h>
31 #include <kern/kalloc.h>
33 #include <kern/thread.h>
34 #include <kern/processor.h>
35 #include <mach/mach_types.h>
36 #include <machine/machine_routines.h>
38 #include <sys/errno.h>
40 #if APPLE_ARM64_ARCH_FAMILY
43 #include <kern/monotonic.h>
44 #endif /* MONOTONIC */
46 void kpc_pmi_handler(unsigned int ctr
);
49 * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit
50 * positions of the other PMCs, their configuration bits start at position 32.
52 #define PMCR_PMC_8_9_OFFSET (32)
53 #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET)
54 #define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \
55 PMCR_PMC_8_9_SHIFT(PMC))
58 * PMCR0 controls enabling, interrupts, and overflow of performance counters.
62 #define PMCR0_PMC_ENABLE_MASK(PMC) (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC))
63 #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC))
65 /* how interrupts are generated on PMIs */
66 #define PMCR0_INTGEN_SHIFT (8)
67 #define PMCR0_INTGEN_MASK (UINT64_C(0x7) << PMCR0_INTGEN_SHIFT)
68 #define PMCR0_INTGEN_OFF (UINT64_C(0) << PMCR0_INTGEN_SHIFT)
69 #define PMCR0_INTGEN_PMI (UINT64_C(1) << PMCR0_INTGEN_SHIFT)
70 #define PMCR0_INTGEN_AIC (UINT64_C(2) << PMCR0_INTGEN_SHIFT)
71 #define PMCR0_INTGEN_DBG_HLT (UINT64_C(3) << PMCR0_INTGEN_SHIFT)
72 #define PMCR0_INTGEN_FIQ (UINT64_C(4) << PMCR0_INTGEN_SHIFT)
76 /* set by hardware if PMI was generated */
77 #define PMCR0_PMAI_SHIFT (11)
78 #define PMCR0_PMAI_MASK (UINT64_C(1) << PMCR0_PMAI_SHIFT)
80 /* overflow on a PMC generates an interrupt */
81 #define PMCR0_PMI_OFFSET (12)
82 #define PMCR0_PMI_SHIFT(PMC) (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC))
83 #define PMCR0_PMI_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC))
84 #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC))
86 /* disable counting when a PMI is signaled (except for AIC interrupts) */
87 #define PMCR0_DISCNT_SHIFT (20)
88 #define PMCR0_DISCNT_ENABLE_MASK (UINT64_C(1) << PMCR0_DISCNT_SHIFT)
89 #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK)
93 /* block PMIs until ERET retires */
94 #define PMCR0_WFRFE_SHIFT (22)
95 #define PMCR0_WFRFE_ENABLE_MASK (UINT64_C(1) << PMCR0_WFRE_SHIFT)
96 #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK)
98 /* count global L2C events */
99 #define PMCR0_L2CGLOBAL_SHIFT (23)
100 #define PMCR0_L2CGLOBAL_ENABLE_MASK (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT)
101 #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK)
103 /* allow user mode access to configuration registers */
104 #define PMCR0_USEREN_SHIFT (30)
105 #define PMCR0_USEREN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT)
106 #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK)
108 /* force the CPMU clocks in case of a clocking bug */
109 #define PMCR0_CLKEN_SHIFT (31)
110 #define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT)
111 #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK)
113 /* 32 - 44 mirror the low bits for PMCs 8 and 9 */
115 /* PMCR1 enables counters in different processor modes */
117 #define PMCR1_EL0_A32_OFFSET (0)
118 #define PMCR1_EL0_A64_OFFSET (8)
119 #define PMCR1_EL1_A64_OFFSET (16)
120 #define PMCR1_EL3_A64_OFFSET (24)
122 #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC))
123 #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
124 #define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
125 #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
127 #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC))
128 #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC))
129 #define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC))
130 /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */
132 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0)
134 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC))
137 #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \
138 PMCR1_EL0_A64_ENABLE_MASK(PMC) | \
139 PMCR1_EL1_A64_ENABLE_MASK(PMC) | \
140 PMCR1_EL3_A64_ENABLE_MASK(PMC))
141 #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC))
143 /* PMESR0 and PMESR1 are event selection registers */
145 /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */
146 /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */
148 #define PMESR_PMC_WIDTH (8)
149 #define PMESR_PMC_MASK (UINT8_MAX)
150 #define PMESR_SHIFT(PMC, OFF) (8 * ((PMC) - (OFF)))
151 #define PMESR_EVT_MASK(PMC, OFF) (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF))
152 #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF))
154 #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \
155 (((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK)
156 #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \
157 (((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF))
159 /* system registers in the CPMU */
161 #define SREG_PMCR0 "S3_1_c15_c0_0"
162 #define SREG_PMCR1 "S3_1_c15_c1_0"
163 #define SREG_PMCR2 "S3_1_c15_c2_0"
164 #define SREG_PMCR3 "S3_1_c15_c3_0"
165 #define SREG_PMCR4 "S3_1_c15_c4_0"
166 #define SREG_PMESR0 "S3_1_c15_c5_0"
167 #define SREG_PMESR1 "S3_1_c15_c6_0"
168 #define SREG_PMSR "S3_1_c15_c13_0"
169 #define SREG_OPMAT0 "S3_1_c15_c7_0"
170 #define SREG_OPMAT1 "S3_1_c15_c8_0"
171 #define SREG_OPMSK0 "S3_1_c15_c9_0"
172 #define SREG_OPMSK1 "S3_1_c15_c10_0"
174 #define SREG_PMC0 "S3_2_c15_c0_0"
175 #define SREG_PMC1 "S3_2_c15_c1_0"
176 #define SREG_PMC2 "S3_2_c15_c2_0"
177 #define SREG_PMC3 "S3_2_c15_c3_0"
178 #define SREG_PMC4 "S3_2_c15_c4_0"
179 #define SREG_PMC5 "S3_2_c15_c5_0"
180 #define SREG_PMC6 "S3_2_c15_c6_0"
181 #define SREG_PMC7 "S3_2_c15_c7_0"
182 #define SREG_PMC8 "S3_2_c15_c9_0"
183 #define SREG_PMC9 "S3_2_c15_c10_0"
185 #define SREG_PMMMAP "S3_2_c15_c15_0"
186 #define SREG_PMTRHLD2 "S3_2_c15_c14_0"
187 #define SREG_PMTRHLD4 "S3_2_c15_c13_0"
188 #define SREG_PMTRHLD6 "S3_2_c15_c12_0"
191 * The low 8 bits of a configuration words select the event to program on
192 * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits.
194 #define CFGWORD_EL0A32EN_MASK (0x10000)
195 #define CFGWORD_EL0A64EN_MASK (0x20000)
196 #define CFGWORD_EL1EN_MASK (0x40000)
197 #define CFGWORD_EL3EN_MASK (0x80000)
198 #define CFGWORD_ALLMODES_MASK (0xf0000)
200 /* ACC offsets for PIO */
201 #define ACC_CPMU_PMC0_OFFSET (0x200)
202 #define ACC_CPMU_PMC8_OFFSET (0x280)
205 * Macros for reading and writing system registers.
207 * SR must be one of the SREG_* defines above.
209 #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V))
210 #define SREG_READ(SR) ({ uint64_t VAL; \
211 __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \
215 * Configuration registers that can be controlled by RAWPMU:
217 * All: PMCR2-4, OPMAT0-1, OPMSK0-1.
218 * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6.
220 #if HAS_EARLY_APPLE_CPMU
221 #define RAWPMU_CONFIG_COUNT 7
222 #else /* HAS_EARLY_APPLE_CPMU */
223 #define RAWPMU_CONFIG_COUNT 11
224 #endif /* !HAS_EARLY_APPLE_CPMU */
226 /* TODO: allocate dynamically */
227 static uint64_t saved_PMCR
[MAX_CPUS
][2];
228 static uint64_t saved_PMESR
[MAX_CPUS
][2];
229 static uint64_t saved_RAWPMU
[MAX_CPUS
][RAWPMU_CONFIG_COUNT
];
230 static uint64_t saved_counter
[MAX_CPUS
][KPC_MAX_COUNTERS
];
231 static uint64_t kpc_running_cfg_pmc_mask
= 0;
232 static uint32_t kpc_running_classes
= 0;
233 static uint32_t kpc_configured
= 0;
236 * The whitelist is disabled by default on development/debug kernel. This can
237 * be changed via the kpc.disable_whitelist sysctl. The whitelist is enabled on
238 * release kernel and cannot be disabled.
240 #if DEVELOPMENT || DEBUG
241 static boolean_t whitelist_disabled
= TRUE
;
243 static boolean_t whitelist_disabled
= FALSE
;
246 #define CPMU_CORE_CYCLE 0x02
248 #if HAS_EARLY_APPLE_CPMU
250 #define CPMU_BIU_UPSTREAM_CYCLE 0x19
251 #define CPMU_BIU_DOWNSTREAM_CYCLE 0x1a
252 #define CPMU_L2C_AGENT_LD 0x22
253 #define CPMU_L2C_AGENT_LD_MISS 0x23
254 #define CPMU_L2C_AGENT_ST 0x24
255 #define CPMU_L2C_AGENT_ST_MISS 0x25
256 #define CPMU_INST_A32 0x78
257 #define CPMU_INST_THUMB 0x79
258 #define CPMU_INST_A64 0x7a
259 #define CPMU_INST_BRANCH 0x7b
260 #define CPMU_SYNC_DC_LOAD_MISS 0xb4
261 #define CPMU_SYNC_DC_STORE_MISS 0xb5
262 #define CPMU_SYNC_DTLB_MISS 0xb6
263 #define CPMU_SYNC_ST_HIT_YNGR_LD 0xb9
264 #define CPMU_SYNC_BR_ANY_MISP 0xc0
265 #define CPMU_FED_IC_MISS_DEM 0xce
266 #define CPMU_FED_ITLB_MISS 0xcf
268 #else /* HAS_EARLY_APPLE_CPMU */
270 #if HAS_CPMU_BIU_EVENTS
271 #define CPMU_BIU_UPSTREAM_CYCLE 0x13
272 #define CPMU_BIU_DOWNSTREAM_CYCLE 0x14
273 #endif /* HAS_CPMU_BIU_EVENTS */
275 #if HAS_CPMU_L2C_EVENTS
276 #define CPMU_L2C_AGENT_LD 0x1a
277 #define CPMU_L2C_AGENT_LD_MISS 0x1b
278 #define CPMU_L2C_AGENT_ST 0x1c
279 #define CPMU_L2C_AGENT_ST_MISS 0x1d
280 #endif /* HAS_CPMU_L2C_EVENTS */
282 #define CPMU_INST_A32 0x8a
283 #define CPMU_INST_THUMB 0x8b
284 #define CPMU_INST_A64 0x8c
285 #define CPMU_INST_BRANCH 0x8d
286 #define CPMU_SYNC_DC_LOAD_MISS 0xbf
287 #define CPMU_SYNC_DC_STORE_MISS 0xc0
288 #define CPMU_SYNC_DTLB_MISS 0xc1
289 #define CPMU_SYNC_ST_HIT_YNGR_LD 0xc4
290 #define CPMU_SYNC_BR_ANY_MISP 0xcb
291 #define CPMU_FED_IC_MISS_DEM 0xd3
292 #define CPMU_FED_ITLB_MISS 0xd4
294 #endif /* !HAS_EARLY_APPLE_CPMU */
296 /* List of counter events that are allowed to be used by 3rd-parties. */
297 static kpc_config_t whitelist
[] = {
302 #if HAS_CPMU_BIU_EVENTS
303 CPMU_BIU_UPSTREAM_CYCLE
, CPMU_BIU_DOWNSTREAM_CYCLE
,
304 #endif /* HAS_CPMU_BIU_EVENTS */
306 #if HAS_CPMU_L2C_EVENTS
307 CPMU_L2C_AGENT_LD
, CPMU_L2C_AGENT_LD_MISS
, CPMU_L2C_AGENT_ST
,
308 CPMU_L2C_AGENT_ST_MISS
,
309 #endif /* HAS_CPMU_L2C_EVENTS */
311 CPMU_INST_A32
, CPMU_INST_THUMB
, CPMU_INST_A64
, CPMU_INST_BRANCH
,
312 CPMU_SYNC_DC_LOAD_MISS
, CPMU_SYNC_DC_STORE_MISS
,
313 CPMU_SYNC_DTLB_MISS
, CPMU_SYNC_ST_HIT_YNGR_LD
,
314 CPMU_SYNC_BR_ANY_MISP
, CPMU_FED_IC_MISS_DEM
, CPMU_FED_ITLB_MISS
,
316 #define WHITELIST_COUNT (sizeof(whitelist) / sizeof(whitelist[0]))
317 #define EVENT_MASK 0xff
320 config_in_whitelist(kpc_config_t cfg
)
322 for (unsigned int i
= 0; i
< WHITELIST_COUNT
; i
++) {
323 /* Strip off any EL configuration bits -- just look at the event. */
324 if ((cfg
& EVENT_MASK
) == whitelist
[i
]) {
336 kprintf("PMCR0 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMCR0
));
337 kprintf("PMCR1 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMCR1
));
338 kprintf("PMCR2 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMCR2
));
339 kprintf("PMCR3 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMCR3
));
340 kprintf("PMCR4 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMCR4
));
341 kprintf("PMESR0 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMESR0
));
342 kprintf("PMESR1 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMESR1
));
344 kprintf("PMC0 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC0
));
345 kprintf("PMC1 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC1
));
346 kprintf("PMC2 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC2
));
347 kprintf("PMC3 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC3
));
348 kprintf("PMC4 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC4
));
349 kprintf("PMC5 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC5
));
350 kprintf("PMC6 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC6
));
351 kprintf("PMC7 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC7
));
353 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
354 kprintf("PMC8 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC8
));
355 kprintf("PMC9 = 0x%" PRIx64
"\n", SREG_READ(SREG_PMC9
));
361 enable_counter(uint32_t counter
)
363 int cpuid
= cpu_number();
364 uint64_t pmcr0
= 0, intgen_type
;
365 boolean_t counter_running
, pmi_enabled
, intgen_correct
, enabled
;
367 pmcr0
= SREG_READ(SREG_PMCR0
) | 0x3 /* leave the fixed counters enabled for monotonic */;
369 counter_running
= (pmcr0
& PMCR0_PMC_ENABLE_MASK(counter
)) != 0;
370 pmi_enabled
= (pmcr0
& PMCR0_PMI_ENABLE_MASK(counter
)) != 0;
372 /* TODO this should use the PMI path rather than AIC for the interrupt
375 intgen_type
= PMCR0_INTGEN_AIC
;
376 intgen_correct
= (pmcr0
& PMCR0_INTGEN_MASK
) == intgen_type
;
378 enabled
= counter_running
&& pmi_enabled
&& intgen_correct
;
381 pmcr0
|= PMCR0_PMC_ENABLE_MASK(counter
);
382 pmcr0
|= PMCR0_PMI_ENABLE_MASK(counter
);
383 pmcr0
&= ~PMCR0_INTGEN_MASK
;
384 pmcr0
|= intgen_type
;
386 SREG_WRITE(SREG_PMCR0
, pmcr0
);
389 saved_PMCR
[cpuid
][0] = pmcr0
;
394 disable_counter(uint32_t counter
)
398 int cpuid
= cpu_number();
404 pmcr0
= SREG_READ(SREG_PMCR0
) | 0x3;
405 enabled
= (pmcr0
& PMCR0_PMC_ENABLE_MASK(counter
)) != 0;
408 pmcr0
&= PMCR0_PMC_DISABLE_MASK(counter
);
409 SREG_WRITE(SREG_PMCR0
, pmcr0
);
412 saved_PMCR
[cpuid
][0] = pmcr0
;
417 * Enable counter in processor modes determined by configuration word.
420 set_modes(uint32_t counter
, kpc_config_t cfgword
)
423 int cpuid
= cpu_number();
425 if (cfgword
& CFGWORD_EL0A32EN_MASK
) {
426 bits
|= PMCR1_EL0_A32_ENABLE_MASK(counter
);
428 if (cfgword
& CFGWORD_EL0A64EN_MASK
) {
429 bits
|= PMCR1_EL0_A64_ENABLE_MASK(counter
);
431 if (cfgword
& CFGWORD_EL1EN_MASK
) {
432 bits
|= PMCR1_EL1_A64_ENABLE_MASK(counter
);
435 if (cfgword
& CFGWORD_EL3EN_MASK
) {
436 bits
|= PMCR1_EL3_A64_ENABLE_MASK(counter
);
441 * Backwards compatibility: Writing a non-zero configuration word with
442 * all zeros in bits 16-19 is interpreted as enabling in all modes.
443 * This matches the behavior when the PMCR1 bits weren't exposed.
445 if (bits
== 0 && cfgword
!= 0) {
446 bits
= PMCR1_EL_ALL_ENABLE_MASK(counter
);
449 uint64_t pmcr1
= SREG_READ(SREG_PMCR1
);
450 pmcr1
&= PMCR1_EL_ALL_DISABLE_MASK(counter
);
452 pmcr1
|= 0x30303; /* monotonic compatibility */
453 SREG_WRITE(SREG_PMCR1
, pmcr1
);
454 saved_PMCR
[cpuid
][1] = pmcr1
;
458 read_counter(uint32_t counter
)
461 // case 0: return SREG_READ(SREG_PMC0);
462 // case 1: return SREG_READ(SREG_PMC1);
463 case 2: return SREG_READ(SREG_PMC2
);
464 case 3: return SREG_READ(SREG_PMC3
);
465 case 4: return SREG_READ(SREG_PMC4
);
466 case 5: return SREG_READ(SREG_PMC5
);
467 case 6: return SREG_READ(SREG_PMC6
);
468 case 7: return SREG_READ(SREG_PMC7
);
469 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
470 case 8: return SREG_READ(SREG_PMC8
);
471 case 9: return SREG_READ(SREG_PMC9
);
478 write_counter(uint32_t counter
, uint64_t value
)
481 // case 0: SREG_WRITE(SREG_PMC0, value); break;
482 // case 1: SREG_WRITE(SREG_PMC1, value); break;
483 case 2: SREG_WRITE(SREG_PMC2
, value
); break;
484 case 3: SREG_WRITE(SREG_PMC3
, value
); break;
485 case 4: SREG_WRITE(SREG_PMC4
, value
); break;
486 case 5: SREG_WRITE(SREG_PMC5
, value
); break;
487 case 6: SREG_WRITE(SREG_PMC6
, value
); break;
488 case 7: SREG_WRITE(SREG_PMC7
, value
); break;
489 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
490 case 8: SREG_WRITE(SREG_PMC8
, value
); break;
491 case 9: SREG_WRITE(SREG_PMC9
, value
); break;
498 kpc_rawpmu_config_count(void)
500 return RAWPMU_CONFIG_COUNT
;
504 kpc_get_rawpmu_config(kpc_config_t
*configv
)
506 configv
[0] = SREG_READ(SREG_PMCR2
);
507 configv
[1] = SREG_READ(SREG_PMCR3
);
508 configv
[2] = SREG_READ(SREG_PMCR4
);
509 configv
[3] = SREG_READ(SREG_OPMAT0
);
510 configv
[4] = SREG_READ(SREG_OPMAT1
);
511 configv
[5] = SREG_READ(SREG_OPMSK0
);
512 configv
[6] = SREG_READ(SREG_OPMSK1
);
513 #if RAWPMU_CONFIG_COUNT > 7
514 configv
[7] = SREG_READ(SREG_PMMMAP
);
515 configv
[8] = SREG_READ(SREG_PMTRHLD2
);
516 configv
[9] = SREG_READ(SREG_PMTRHLD4
);
517 configv
[10] = SREG_READ(SREG_PMTRHLD6
);
523 kpc_set_rawpmu_config(kpc_config_t
*configv
)
525 SREG_WRITE(SREG_PMCR2
, configv
[0]);
526 SREG_WRITE(SREG_PMCR3
, configv
[1]);
527 SREG_WRITE(SREG_PMCR4
, configv
[2]);
528 SREG_WRITE(SREG_OPMAT0
, configv
[3]);
529 SREG_WRITE(SREG_OPMAT1
, configv
[4]);
530 SREG_WRITE(SREG_OPMSK0
, configv
[5]);
531 SREG_WRITE(SREG_OPMSK1
, configv
[6]);
532 #if RAWPMU_CONFIG_COUNT > 7
533 SREG_WRITE(SREG_PMMMAP
, configv
[7]);
534 SREG_WRITE(SREG_PMTRHLD2
, configv
[8]);
535 SREG_WRITE(SREG_PMTRHLD4
, configv
[9]);
536 SREG_WRITE(SREG_PMTRHLD6
, configv
[10]);
544 int cpuid
= cpu_number();
546 __asm__
volatile ("dmb ish");
548 assert(ml_get_interrupts_enabled() == FALSE
);
550 /* Save current PMCR0/1 values. PMCR2-4 are in the RAWPMU set. */
551 saved_PMCR
[cpuid
][0] = SREG_READ(SREG_PMCR0
) | 0x3;
553 /* Save event selections. */
554 saved_PMESR
[cpuid
][0] = SREG_READ(SREG_PMESR0
);
555 saved_PMESR
[cpuid
][1] = SREG_READ(SREG_PMESR1
);
557 kpc_get_rawpmu_config(saved_RAWPMU
[cpuid
]);
559 /* Disable the counters. */
560 // SREG_WRITE(SREG_PMCR0, clear);
562 /* Finally, save state for each counter*/
563 for (int i
= 2; i
< KPC_ARM64_PMC_COUNT
; i
++) {
564 saved_counter
[cpuid
][i
] = read_counter(i
);
571 int cpuid
= cpu_number();
573 /* Restore PMESR values. */
574 SREG_WRITE(SREG_PMESR0
, saved_PMESR
[cpuid
][0]);
575 SREG_WRITE(SREG_PMESR1
, saved_PMESR
[cpuid
][1]);
577 kpc_set_rawpmu_config(saved_RAWPMU
[cpuid
]);
579 /* Restore counter values */
580 for (int i
= 2; i
< KPC_ARM64_PMC_COUNT
; i
++) {
581 write_counter(i
, saved_counter
[cpuid
][i
]);
584 /* Restore PMCR0/1 values (with PMCR0 last to enable). */
585 SREG_WRITE(SREG_PMCR1
, saved_PMCR
[cpuid
][1] | 0x30303);
586 SREG_WRITE(SREG_PMCR0
, saved_PMCR
[cpuid
][0] | 0x3);
590 get_counter_config(uint32_t counter
)
595 case 2: /* FALLTHROUGH */
596 case 3: /* FALLTHROUGH */
597 case 4: /* FALLTHROUGH */
599 pmesr
= PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0
), counter
, 2);
601 case 6: /* FALLTHROUGH */
603 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
605 case 8: /* FALLTHROUGH */
608 pmesr
= PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1
), counter
, 6);
615 kpc_config_t config
= pmesr
;
617 uint64_t pmcr1
= SREG_READ(SREG_PMCR1
);
619 if (pmcr1
& PMCR1_EL0_A32_ENABLE_MASK(counter
)) {
620 config
|= CFGWORD_EL0A32EN_MASK
;
622 if (pmcr1
& PMCR1_EL0_A64_ENABLE_MASK(counter
)) {
623 config
|= CFGWORD_EL0A64EN_MASK
;
625 if (pmcr1
& PMCR1_EL1_A64_ENABLE_MASK(counter
)) {
626 config
|= CFGWORD_EL1EN_MASK
;
628 config
|= CFGWORD_EL3EN_MASK
;
632 if (pmcr1
& PMCR1_EL3_A64_ENABLE_MASK(counter
)) {
633 config
|= CFGWORD_EL3EN_MASK
;
641 set_counter_config(uint32_t counter
, uint64_t config
)
643 int cpuid
= cpu_number();
647 case 2: /* FALLTHROUGH */
648 case 3: /* FALLTHROUGH */
649 case 4: /* FALLTHROUGH */
651 pmesr
= SREG_READ(SREG_PMESR0
);
652 pmesr
&= PMESR_EVT_CLEAR(counter
, 2);
653 pmesr
|= PMESR_EVT_ENCODE(config
, counter
, 2);
654 SREG_WRITE(SREG_PMESR0
, pmesr
);
655 saved_PMESR
[cpuid
][0] = pmesr
;
658 case 6: /* FALLTHROUGH */
660 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
662 case 8: /* FALLTHROUGH */
665 pmesr
= SREG_READ(SREG_PMESR1
);
666 pmesr
&= PMESR_EVT_CLEAR(counter
, 6);
667 pmesr
|= PMESR_EVT_ENCODE(config
, counter
, 6);
668 SREG_WRITE(SREG_PMESR1
, pmesr
);
669 saved_PMESR
[cpuid
][1] = pmesr
;
675 set_modes(counter
, config
);
678 /* internal functions */
686 kpc_is_running_fixed(void)
688 return (kpc_running_classes
& KPC_CLASS_FIXED_MASK
) == KPC_CLASS_FIXED_MASK
;
692 kpc_is_running_configurable(uint64_t pmc_mask
)
694 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
695 return ((kpc_running_classes
& KPC_CLASS_CONFIGURABLE_MASK
) == KPC_CLASS_CONFIGURABLE_MASK
) &&
696 ((kpc_running_cfg_pmc_mask
& pmc_mask
) == pmc_mask
);
700 kpc_fixed_count(void)
702 return KPC_ARM64_FIXED_COUNT
;
706 kpc_configurable_count(void)
708 return KPC_ARM64_CONFIGURABLE_COUNT
;
712 kpc_fixed_config_count(void)
718 kpc_configurable_config_count(uint64_t pmc_mask
)
720 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
721 return kpc_popcount(pmc_mask
);
725 kpc_get_fixed_config(kpc_config_t
*configv __unused
)
733 return (1ULL << KPC_ARM64_COUNTER_WIDTH
) - 1;
737 kpc_configurable_max(void)
739 return (1ULL << KPC_ARM64_COUNTER_WIDTH
) - 1;
743 set_running_configurable(uint64_t target_mask
, uint64_t state_mask
)
745 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
748 enabled
= ml_set_interrupts_enabled(FALSE
);
750 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
751 if (((1ULL << i
) & target_mask
) == 0) {
754 assert(kpc_controls_counter(offset
+ i
));
756 if ((1ULL << i
) & state_mask
) {
757 enable_counter(offset
+ i
);
759 disable_counter(offset
+ i
);
763 ml_set_interrupts_enabled(enabled
);
766 static uint32_t kpc_xcall_sync
;
768 kpc_set_running_xcall( void *vstate
)
770 struct kpc_running_remote
*mp_config
= (struct kpc_running_remote
*) vstate
;
773 set_running_configurable(mp_config
->cfg_target_mask
,
774 mp_config
->cfg_state_mask
);
776 if (os_atomic_dec(&kpc_xcall_sync
, relaxed
) == 0) {
777 thread_wakeup((event_t
) &kpc_xcall_sync
);
781 static uint32_t kpc_xread_sync
;
783 kpc_get_curcpu_counters_xcall(void *args
)
785 struct kpc_get_counters_remote
*handler
= args
;
787 assert(handler
!= NULL
);
788 assert(handler
->buf
!= NULL
);
790 int offset
= cpu_number() * handler
->buf_stride
;
791 int r
= kpc_get_curcpu_counters(handler
->classes
, NULL
, &handler
->buf
[offset
]);
793 /* number of counters added by this CPU, needs to be atomic */
794 os_atomic_add(&(handler
->nb_counters
), r
, relaxed
);
796 if (os_atomic_dec(&kpc_xread_sync
, relaxed
) == 0) {
797 thread_wakeup((event_t
) &kpc_xread_sync
);
802 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
806 int enabled
= ml_set_interrupts_enabled(FALSE
);
808 /* grab counters and CPU number as close as possible */
810 *curcpu
= current_processor()->cpu_id
;
813 struct kpc_get_counters_remote hdl
= {
817 .buf_stride
= kpc_get_counter_count(classes
)
820 cpu_broadcast_xcall(&kpc_xread_sync
, TRUE
, kpc_get_curcpu_counters_xcall
, &hdl
);
821 int offset
= hdl
.nb_counters
;
823 (void)ml_set_interrupts_enabled(enabled
);
829 kpc_get_fixed_counters(uint64_t *counterv
)
832 mt_fixed_counts(counterv
);
834 #else /* MONOTONIC */
835 #pragma unused(counterv)
837 #endif /* !MONOTONIC */
841 kpc_get_configurable_counters(uint64_t *counterv
, uint64_t pmc_mask
)
843 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
848 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
849 if (((1ULL << i
) & pmc_mask
) == 0) {
852 ctr
= read_counter(i
+ offset
);
854 if (ctr
& KPC_ARM64_COUNTER_OVF_MASK
) {
855 ctr
= CONFIGURABLE_SHADOW(i
) +
856 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i
) + 1 /* Wrap */) +
857 (ctr
& KPC_ARM64_COUNTER_MASK
);
859 ctr
= CONFIGURABLE_SHADOW(i
) +
860 (ctr
- CONFIGURABLE_RELOAD(i
));
870 kpc_get_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
872 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
876 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
877 if ((1ULL << i
) & pmc_mask
) {
878 *configv
++ = get_counter_config(i
+ offset
);
885 kpc_set_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
887 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
892 enabled
= ml_set_interrupts_enabled(FALSE
);
894 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
895 if (((1ULL << i
) & pmc_mask
) == 0) {
898 assert(kpc_controls_counter(i
+ offset
));
900 set_counter_config(i
+ offset
, *configv
++);
903 ml_set_interrupts_enabled(enabled
);
908 static uint32_t kpc_config_sync
;
910 kpc_set_config_xcall(void *vmp_config
)
912 struct kpc_config_remote
*mp_config
= vmp_config
;
913 kpc_config_t
*new_config
= NULL
;
914 uint32_t classes
= 0ULL;
917 assert(mp_config
->configv
);
918 classes
= mp_config
->classes
;
919 new_config
= mp_config
->configv
;
921 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
922 kpc_set_configurable_config(new_config
, mp_config
->pmc_mask
);
923 new_config
+= kpc_popcount(mp_config
->pmc_mask
);
926 if (classes
& KPC_CLASS_RAWPMU_MASK
) {
927 kpc_set_rawpmu_config(new_config
);
928 new_config
+= RAWPMU_CONFIG_COUNT
;
931 if (os_atomic_dec(&kpc_config_sync
, relaxed
) == 0) {
932 thread_wakeup((event_t
) &kpc_config_sync
);
937 kpc_reload_counter(uint32_t ctr
)
939 assert(ctr
< (kpc_configurable_count() + kpc_fixed_count()));
941 uint64_t old
= read_counter(ctr
);
943 if (kpc_controls_counter(ctr
)) {
944 write_counter(ctr
, FIXED_RELOAD(ctr
));
945 return old
& KPC_ARM64_COUNTER_MASK
;
948 * Unset the overflow bit to clear the condition that drives
949 * PMIs. The power manager is not interested in handling PMIs.
951 write_counter(ctr
, old
& KPC_ARM64_COUNTER_MASK
);
956 static uint32_t kpc_reload_sync
;
958 kpc_set_reload_xcall(void *vmp_config
)
960 struct kpc_config_remote
*mp_config
= vmp_config
;
961 uint32_t classes
= 0, count
= 0, offset
= kpc_fixed_count();
962 uint64_t *new_period
= NULL
, max
= kpc_configurable_max();
966 assert(mp_config
->configv
);
967 classes
= mp_config
->classes
;
968 new_period
= mp_config
->configv
;
970 enabled
= ml_set_interrupts_enabled(FALSE
);
972 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
974 * Update _all_ shadow counters, this cannot be done for only
975 * selected PMCs. Otherwise, we would corrupt the configurable
976 * shadow buffer since the PMCs are muxed according to the pmc
979 uint64_t all_cfg_mask
= (1ULL << kpc_configurable_count()) - 1;
980 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask
);
982 /* set the new period */
983 count
= kpc_configurable_count();
984 for (uint32_t i
= 0; i
< count
; ++i
) {
985 /* ignore the counter */
986 if (((1ULL << i
) & mp_config
->pmc_mask
) == 0) {
989 if (*new_period
== 0) {
990 *new_period
= kpc_configurable_max();
992 CONFIGURABLE_RELOAD(i
) = max
- *new_period
;
993 /* reload the counter */
994 kpc_reload_counter(offset
+ i
);
995 /* next period value */
1000 ml_set_interrupts_enabled(enabled
);
1002 if (os_atomic_dec(&kpc_reload_sync
, relaxed
) == 0) {
1003 thread_wakeup((event_t
) &kpc_reload_sync
);
1008 kpc_pmi_handler(unsigned int ctr
)
1010 uint64_t extra
= kpc_reload_counter(ctr
);
1012 FIXED_SHADOW(ctr
) += (kpc_fixed_max() - FIXED_RELOAD(ctr
) + 1 /* Wrap */) + extra
;
1014 if (FIXED_ACTIONID(ctr
)) {
1015 kpc_sample_kperf(FIXED_ACTIONID(ctr
));
1020 kpc_get_classes(void)
1022 return KPC_CLASS_FIXED_MASK
| KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_RAWPMU_MASK
;
1026 kpc_set_running_arch(struct kpc_running_remote
*mp_config
)
1028 assert(mp_config
!= NULL
);
1030 /* dispatch to all CPUs */
1031 cpu_broadcast_xcall(&kpc_xcall_sync
, TRUE
, kpc_set_running_xcall
, mp_config
);
1033 kpc_running_cfg_pmc_mask
= mp_config
->cfg_state_mask
;
1034 kpc_running_classes
= mp_config
->classes
;
1041 kpc_set_period_arch(struct kpc_config_remote
*mp_config
)
1045 /* dispatch to all CPUs */
1046 cpu_broadcast_xcall(&kpc_reload_sync
, TRUE
, kpc_set_reload_xcall
, mp_config
);
1054 kpc_set_config_arch(struct kpc_config_remote
*mp_config
)
1056 uint32_t count
= kpc_popcount(mp_config
->pmc_mask
);
1059 assert(mp_config
->configv
);
1061 /* check config against whitelist for external devs */
1062 for (uint32_t i
= 0; i
< count
; ++i
) {
1063 if (!whitelist_disabled
&& !config_in_whitelist(mp_config
->configv
[i
])) {
1068 /* dispatch to all CPUs */
1069 cpu_broadcast_xcall(&kpc_config_sync
, TRUE
, kpc_set_config_xcall
, mp_config
);
1079 if (kpc_configured
) {
1087 if (kpc_configured
) {
1093 kpc_set_sw_inc( uint32_t mask __unused
)
1099 kpc_disable_whitelist( int val
)
1101 whitelist_disabled
= val
;
1106 kpc_get_whitelist_disabled( void )
1108 return whitelist_disabled
;
1112 kpc_get_pmu_version(void)
1114 return KPC_PMU_ARM_APPLE
;
1117 #else /* APPLE_ARM64_ARCH_FAMILY */
1119 /* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */
1128 kpc_get_classes(void)
1134 kpc_fixed_count(void)
1140 kpc_configurable_count(void)
1146 kpc_fixed_config_count(void)
1152 kpc_configurable_config_count(uint64_t pmc_mask __unused
)
1158 kpc_get_fixed_config(kpc_config_t
*configv __unused
)
1170 kpc_configurable_max(void)
1176 kpc_get_configurable_config(kpc_config_t
*configv __unused
, uint64_t pmc_mask __unused
)
1182 kpc_get_configurable_counters(uint64_t *counterv __unused
, uint64_t pmc_mask __unused
)
1188 kpc_get_fixed_counters(uint64_t *counterv __unused
)
1194 kpc_is_running_fixed(void)
1200 kpc_is_running_configurable(uint64_t pmc_mask __unused
)
1206 kpc_set_running_arch(struct kpc_running_remote
*mp_config __unused
)
1212 kpc_set_period_arch(struct kpc_config_remote
*mp_config __unused
)
1218 kpc_set_config_arch(struct kpc_config_remote
*mp_config __unused
)
1236 kpc_get_all_cpus_counters(uint32_t classes __unused
, int *curcpu __unused
, uint64_t *buf __unused
)
1242 kpc_set_sw_inc( uint32_t mask __unused
)
1248 kpc_get_pmu_version(void)
1250 return KPC_PMU_ERROR
;
1254 kpc_rawpmu_config_count(void)
1260 kpc_get_rawpmu_config(__unused kpc_config_t
*configv
)
1266 kpc_disable_whitelist( int val __unused
)
1272 kpc_get_whitelist_disabled( void )
1277 #endif /* !APPLE_ARM64_ARCH_FAMILY */