2 * Copyright (c) 2012-2018 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/cpu_data_internal.h>
30 #include <arm/cpu_internal.h>
31 #include <kern/cpu_number.h>
33 #include <kern/thread.h>
34 #include <kern/processor.h>
35 #include <mach/mach_types.h>
36 #include <machine/machine_routines.h>
38 #include <sys/errno.h>
40 #if APPLE_ARM64_ARCH_FAMILY
43 #include <kern/monotonic.h>
44 #endif /* MONOTONIC */
46 void kpc_pmi_handler(unsigned int ctr
);
49 * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit
50 * positions of the other PMCs, their configuration bits start at position 32.
52 #define PMCR_PMC_8_9_OFFSET (32)
53 #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET)
54 #define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \
55 PMCR_PMC_8_9_SHIFT(PMC))
58 * PMCR0 controls enabling, interrupts, and overflow of performance counters.
62 #define PMCR0_PMC_ENABLE_MASK(PMC) (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC))
63 #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC))
65 /* overflow on a PMC generates an interrupt */
66 #define PMCR0_PMI_OFFSET (12)
67 #define PMCR0_PMI_SHIFT(PMC) (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC))
68 #define PMCR0_PMI_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC))
69 #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC))
71 /* disable counting when a PMI is signaled (except for AIC interrupts) */
72 #define PMCR0_DISCNT_SHIFT (20)
73 #define PMCR0_DISCNT_ENABLE_MASK (UINT64_C(1) << PMCR0_DISCNT_SHIFT)
74 #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK)
78 /* block PMIs until ERET retires */
79 #define PMCR0_WFRFE_SHIFT (22)
80 #define PMCR0_WFRFE_ENABLE_MASK (UINT64_C(1) << PMCR0_WFRE_SHIFT)
81 #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK)
83 /* count global L2C events */
84 #define PMCR0_L2CGLOBAL_SHIFT (23)
85 #define PMCR0_L2CGLOBAL_ENABLE_MASK (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT)
86 #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK)
88 /* allow user mode access to configuration registers */
89 #define PMCR0_USEREN_SHIFT (30)
90 #define PMCR0_USEREN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT)
91 #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK)
93 /* force the CPMU clocks in case of a clocking bug */
94 #define PMCR0_CLKEN_SHIFT (31)
95 #define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_CLKEN_SHIFT)
96 #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK)
98 /* 32 - 44 mirror the low bits for PMCs 8 and 9 */
100 /* PMCR1 enables counters in different processor modes */
102 #define PMCR1_EL0_A32_OFFSET (0)
103 #define PMCR1_EL0_A64_OFFSET (8)
104 #define PMCR1_EL1_A64_OFFSET (16)
105 #define PMCR1_EL3_A64_OFFSET (24)
107 #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC))
108 #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
109 #define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
110 #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
112 #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC))
113 #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC))
114 #define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC))
115 /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */
117 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0)
119 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC))
122 #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \
123 PMCR1_EL0_A64_ENABLE_MASK(PMC) | \
124 PMCR1_EL1_A64_ENABLE_MASK(PMC) | \
125 PMCR1_EL3_A64_ENABLE_MASK(PMC))
126 #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC))
128 /* PMESR0 and PMESR1 are event selection registers */
130 /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */
131 /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */
133 #define PMESR_PMC_WIDTH (8)
134 #define PMESR_PMC_MASK (UINT8_MAX)
135 #define PMESR_SHIFT(PMC, OFF) (8 * ((PMC) - (OFF)))
136 #define PMESR_EVT_MASK(PMC, OFF) (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF))
137 #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF))
139 #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \
140 (((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK)
141 #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \
142 (((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF))
145 * The low 8 bits of a configuration words select the event to program on
146 * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits.
148 #define CFGWORD_EL0A32EN_MASK (0x10000)
149 #define CFGWORD_EL0A64EN_MASK (0x20000)
150 #define CFGWORD_EL1EN_MASK (0x40000)
151 #define CFGWORD_EL3EN_MASK (0x80000)
152 #define CFGWORD_ALLMODES_MASK (0xf0000)
154 /* ACC offsets for PIO */
155 #define ACC_CPMU_PMC0_OFFSET (0x200)
156 #define ACC_CPMU_PMC8_OFFSET (0x280)
159 * Macros for reading and writing system registers.
161 * SR must be one of the SREG_* defines above.
163 #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V))
164 #define SREG_READ(SR) ({ uint64_t VAL; \
165 __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \
169 * Configuration registers that can be controlled by RAWPMU:
171 * All: PMCR2-4, OPMAT0-1, OPMSK0-1.
172 * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6.
174 #if HAS_EARLY_APPLE_CPMU
175 #define RAWPMU_CONFIG_COUNT 7
176 #else /* HAS_EARLY_APPLE_CPMU */
177 #define RAWPMU_CONFIG_COUNT 11
178 #endif /* !HAS_EARLY_APPLE_CPMU */
180 /* TODO: allocate dynamically */
181 static uint64_t saved_PMCR
[MAX_CPUS
][2];
182 static uint64_t saved_PMESR
[MAX_CPUS
][2];
183 static uint64_t saved_RAWPMU
[MAX_CPUS
][RAWPMU_CONFIG_COUNT
];
184 static uint64_t saved_counter
[MAX_CPUS
][KPC_MAX_COUNTERS
];
185 static uint64_t kpc_running_cfg_pmc_mask
= 0;
186 static uint32_t kpc_running_classes
= 0;
187 static uint32_t kpc_configured
= 0;
190 * The whitelist is disabled by default on development/debug kernel. This can
191 * be changed via the kpc.disable_whitelist sysctl. The whitelist is enabled on
192 * release kernel and cannot be disabled.
194 #if DEVELOPMENT || DEBUG
195 static boolean_t whitelist_disabled
= TRUE
;
197 static boolean_t whitelist_disabled
= FALSE
;
200 #define CPMU_CORE_CYCLE 0x02
202 #if HAS_EARLY_APPLE_CPMU
204 #define CPMU_BIU_UPSTREAM_CYCLE 0x19
205 #define CPMU_BIU_DOWNSTREAM_CYCLE 0x1a
206 #define CPMU_L2C_AGENT_LD 0x22
207 #define CPMU_L2C_AGENT_LD_MISS 0x23
208 #define CPMU_L2C_AGENT_ST 0x24
209 #define CPMU_L2C_AGENT_ST_MISS 0x25
210 #define CPMU_INST_A32 0x78
211 #define CPMU_INST_THUMB 0x79
212 #define CPMU_INST_A64 0x7a
213 #define CPMU_INST_BRANCH 0x7b
214 #define CPMU_SYNC_DC_LOAD_MISS 0xb4
215 #define CPMU_SYNC_DC_STORE_MISS 0xb5
216 #define CPMU_SYNC_DTLB_MISS 0xb6
217 #define CPMU_SYNC_ST_HIT_YNGR_LD 0xb9
218 #define CPMU_SYNC_BR_ANY_MISP 0xc0
219 #define CPMU_FED_IC_MISS_DEM 0xce
220 #define CPMU_FED_ITLB_MISS 0xcf
222 #else /* HAS_EARLY_APPLE_CPMU */
224 #if HAS_CPMU_BIU_EVENTS
225 #define CPMU_BIU_UPSTREAM_CYCLE 0x13
226 #define CPMU_BIU_DOWNSTREAM_CYCLE 0x14
227 #endif /* HAS_CPMU_BIU_EVENTS */
229 #if HAS_CPMU_L2C_EVENTS
230 #define CPMU_L2C_AGENT_LD 0x1a
231 #define CPMU_L2C_AGENT_LD_MISS 0x1b
232 #define CPMU_L2C_AGENT_ST 0x1c
233 #define CPMU_L2C_AGENT_ST_MISS 0x1d
234 #endif /* HAS_CPMU_L2C_EVENTS */
236 #define CPMU_INST_A32 0x8a
237 #define CPMU_INST_THUMB 0x8b
238 #define CPMU_INST_A64 0x8c
239 #define CPMU_INST_BRANCH 0x8d
240 #define CPMU_SYNC_DC_LOAD_MISS 0xbf
241 #define CPMU_SYNC_DC_STORE_MISS 0xc0
242 #define CPMU_SYNC_DTLB_MISS 0xc1
243 #define CPMU_SYNC_ST_HIT_YNGR_LD 0xc4
244 #define CPMU_SYNC_BR_ANY_MISP 0xcb
245 #define CPMU_FED_IC_MISS_DEM 0xd3
246 #define CPMU_FED_ITLB_MISS 0xd4
248 #endif /* !HAS_EARLY_APPLE_CPMU */
250 /* List of counter events that are allowed to be used by 3rd-parties. */
251 static kpc_config_t whitelist
[] = {
256 #if HAS_CPMU_BIU_EVENTS
257 CPMU_BIU_UPSTREAM_CYCLE
, CPMU_BIU_DOWNSTREAM_CYCLE
,
258 #endif /* HAS_CPMU_BIU_EVENTS */
260 #if HAS_CPMU_L2C_EVENTS
261 CPMU_L2C_AGENT_LD
, CPMU_L2C_AGENT_LD_MISS
, CPMU_L2C_AGENT_ST
,
262 CPMU_L2C_AGENT_ST_MISS
,
263 #endif /* HAS_CPMU_L2C_EVENTS */
265 CPMU_INST_A32
, CPMU_INST_THUMB
, CPMU_INST_A64
, CPMU_INST_BRANCH
,
266 CPMU_SYNC_DC_LOAD_MISS
, CPMU_SYNC_DC_STORE_MISS
,
267 CPMU_SYNC_DTLB_MISS
, CPMU_SYNC_ST_HIT_YNGR_LD
,
268 CPMU_SYNC_BR_ANY_MISP
, CPMU_FED_IC_MISS_DEM
, CPMU_FED_ITLB_MISS
,
270 #define WHITELIST_COUNT (sizeof(whitelist) / sizeof(whitelist[0]))
271 #define EVENT_MASK 0xff
274 config_in_whitelist(kpc_config_t cfg
)
276 for (unsigned int i
= 0; i
< WHITELIST_COUNT
; i
++) {
277 /* Strip off any EL configuration bits -- just look at the event. */
278 if ((cfg
& EVENT_MASK
) == whitelist
[i
]) {
290 kprintf("PMCR0 = 0x%" PRIx64
"\n", SREG_READ("S3_1_C15_C0_0"));
291 kprintf("PMCR1 = 0x%" PRIx64
"\n", SREG_READ("S3_1_C15_C1_0"));
292 kprintf("PMCR2 = 0x%" PRIx64
"\n", SREG_READ("S3_1_C15_C2_0"));
293 kprintf("PMCR3 = 0x%" PRIx64
"\n", SREG_READ("S3_1_C15_C3_0"));
294 kprintf("PMCR4 = 0x%" PRIx64
"\n", SREG_READ("S3_1_C15_C4_0"));
295 kprintf("PMESR0 = 0x%" PRIx64
"\n", SREG_READ("S3_1_C15_C5_0"));
296 kprintf("PMESR1 = 0x%" PRIx64
"\n", SREG_READ("S3_1_C15_C6_0"));
298 kprintf("PMC0 = 0x%" PRIx64
"\n", SREG_READ("PMC0"));
299 kprintf("PMC1 = 0x%" PRIx64
"\n", SREG_READ("PMC1"));
300 kprintf("S3_2_C15_C2_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C2_0"));
301 kprintf("S3_2_C15_C3_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C3_0"));
302 kprintf("S3_2_C15_C4_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C4_0"));
303 kprintf("S3_2_C15_C5_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C5_0"));
304 kprintf("S3_2_C15_C6_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C6_0"));
305 kprintf("S3_2_C15_C7_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C7_0"));
307 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
308 kprintf("S3_2_C15_C9_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C9_0"));
309 kprintf("S3_2_C15_C10_0 = 0x%" PRIx64
"\n", SREG_READ("S3_2_C15_C10_0"));
315 enable_counter(uint32_t counter
)
318 boolean_t counter_running
, pmi_enabled
, enabled
;
320 pmcr0
= SREG_READ("S3_1_C15_C0_0") | 0x3 /* leave the fixed counters enabled for monotonic */;
322 counter_running
= (pmcr0
& PMCR0_PMC_ENABLE_MASK(counter
)) != 0;
323 pmi_enabled
= (pmcr0
& PMCR0_PMI_ENABLE_MASK(counter
)) != 0;
325 enabled
= counter_running
&& pmi_enabled
;
328 pmcr0
|= PMCR0_PMC_ENABLE_MASK(counter
);
329 pmcr0
|= PMCR0_PMI_ENABLE_MASK(counter
);
330 SREG_WRITE("S3_1_C15_C0_0", pmcr0
);
337 disable_counter(uint32_t counter
)
346 pmcr0
= SREG_READ("S3_1_C15_C0_0") | 0x3;
347 enabled
= (pmcr0
& PMCR0_PMC_ENABLE_MASK(counter
)) != 0;
350 pmcr0
&= PMCR0_PMC_DISABLE_MASK(counter
);
351 SREG_WRITE("S3_1_C15_C0_0", pmcr0
);
358 * Enable counter in processor modes determined by configuration word.
361 set_modes(uint32_t counter
, kpc_config_t cfgword
)
364 int cpuid
= cpu_number();
366 if (cfgword
& CFGWORD_EL0A32EN_MASK
) {
367 bits
|= PMCR1_EL0_A32_ENABLE_MASK(counter
);
369 if (cfgword
& CFGWORD_EL0A64EN_MASK
) {
370 bits
|= PMCR1_EL0_A64_ENABLE_MASK(counter
);
372 if (cfgword
& CFGWORD_EL1EN_MASK
) {
373 bits
|= PMCR1_EL1_A64_ENABLE_MASK(counter
);
376 if (cfgword
& CFGWORD_EL3EN_MASK
) {
377 bits
|= PMCR1_EL3_A64_ENABLE_MASK(counter
);
382 * Backwards compatibility: Writing a non-zero configuration word with
383 * all zeros in bits 16-19 is interpreted as enabling in all modes.
384 * This matches the behavior when the PMCR1 bits weren't exposed.
386 if (bits
== 0 && cfgword
!= 0) {
387 bits
= PMCR1_EL_ALL_ENABLE_MASK(counter
);
390 uint64_t pmcr1
= SREG_READ("S3_1_C15_C1_0");
391 pmcr1
&= PMCR1_EL_ALL_DISABLE_MASK(counter
);
393 pmcr1
|= 0x30303; /* monotonic compatibility */
394 SREG_WRITE("S3_1_C15_C1_0", pmcr1
);
395 saved_PMCR
[cpuid
][1] = pmcr1
;
399 read_counter(uint32_t counter
)
402 // case 0: return SREG_READ("PMC0");
403 // case 1: return SREG_READ("PMC1");
404 case 2: return SREG_READ("S3_2_C15_C2_0");
405 case 3: return SREG_READ("S3_2_C15_C3_0");
406 case 4: return SREG_READ("S3_2_C15_C4_0");
407 case 5: return SREG_READ("S3_2_C15_C5_0");
408 case 6: return SREG_READ("S3_2_C15_C6_0");
409 case 7: return SREG_READ("S3_2_C15_C7_0");
410 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
411 case 8: return SREG_READ("S3_2_C15_C9_0");
412 case 9: return SREG_READ("S3_2_C15_C10_0");
419 write_counter(uint32_t counter
, uint64_t value
)
422 // case 0: SREG_WRITE("PMC0", value); break;
423 // case 1: SREG_WRITE("PMC1", value); break;
424 case 2: SREG_WRITE("S3_2_C15_C2_0", value
); break;
425 case 3: SREG_WRITE("S3_2_C15_C3_0", value
); break;
426 case 4: SREG_WRITE("S3_2_C15_C4_0", value
); break;
427 case 5: SREG_WRITE("S3_2_C15_C5_0", value
); break;
428 case 6: SREG_WRITE("S3_2_C15_C6_0", value
); break;
429 case 7: SREG_WRITE("S3_2_C15_C7_0", value
); break;
430 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
431 case 8: SREG_WRITE("S3_2_C15_C9_0", value
); break;
432 case 9: SREG_WRITE("S3_2_C15_C10_0", value
); break;
439 kpc_rawpmu_config_count(void)
441 return RAWPMU_CONFIG_COUNT
;
445 kpc_get_rawpmu_config(kpc_config_t
*configv
)
447 configv
[0] = SREG_READ("S3_1_C15_C2_0");
448 configv
[1] = SREG_READ("S3_1_C15_C3_0");
449 configv
[2] = SREG_READ("S3_1_C15_C4_0");
450 configv
[3] = SREG_READ("S3_1_C15_C7_0");
451 configv
[4] = SREG_READ("S3_1_C15_C8_0");
452 configv
[5] = SREG_READ("S3_1_C15_C9_0");
453 configv
[6] = SREG_READ("S3_1_C15_C10_0");
454 #if RAWPMU_CONFIG_COUNT > 7
455 configv
[7] = SREG_READ("S3_2_C15_C15_0");
456 configv
[8] = SREG_READ("S3_2_C15_C14_0");
457 configv
[9] = SREG_READ("S3_2_C15_C13_0");
458 configv
[10] = SREG_READ("S3_2_C15_C12_0");
464 kpc_set_rawpmu_config(kpc_config_t
*configv
)
466 SREG_WRITE("S3_1_C15_C2_0", configv
[0]);
467 SREG_WRITE("S3_1_C15_C3_0", configv
[1]);
468 SREG_WRITE("S3_1_C15_C4_0", configv
[2]);
469 SREG_WRITE("S3_1_C15_C7_0", configv
[3]);
470 SREG_WRITE("S3_1_C15_C8_0", configv
[4]);
471 SREG_WRITE("S3_1_C15_C9_0", configv
[5]);
472 SREG_WRITE("S3_1_C15_C10_0", configv
[6]);
473 #if RAWPMU_CONFIG_COUNT > 7
474 SREG_WRITE("S3_2_C15_C15_0", configv
[7]);
475 SREG_WRITE("S3_2_C15_C14_0", configv
[8]);
476 SREG_WRITE("S3_2_C15_C13_0", configv
[9]);
477 SREG_WRITE("S3_2_C15_C12_0", configv
[10]);
485 int cpuid
= cpu_number();
487 __asm__
volatile ("dmb ish");
489 assert(ml_get_interrupts_enabled() == FALSE
);
491 /* Save event selections. */
492 saved_PMESR
[cpuid
][0] = SREG_READ("S3_1_C15_C5_0");
493 saved_PMESR
[cpuid
][1] = SREG_READ("S3_1_C15_C6_0");
495 kpc_get_rawpmu_config(saved_RAWPMU
[cpuid
]);
497 /* Disable the counters. */
498 // SREG_WRITE("S3_1_C15_C0_0", clear);
500 /* Finally, save state for each counter*/
501 for (int i
= 2; i
< KPC_ARM64_PMC_COUNT
; i
++) {
502 saved_counter
[cpuid
][i
] = read_counter(i
);
509 int cpuid
= cpu_number();
511 /* Restore PMESR values. */
512 SREG_WRITE("S3_1_C15_C5_0", saved_PMESR
[cpuid
][0]);
513 SREG_WRITE("S3_1_C15_C6_0", saved_PMESR
[cpuid
][1]);
515 kpc_set_rawpmu_config(saved_RAWPMU
[cpuid
]);
517 /* Restore counter values */
518 for (int i
= 2; i
< KPC_ARM64_PMC_COUNT
; i
++) {
519 write_counter(i
, saved_counter
[cpuid
][i
]);
522 /* Restore PMCR0/1 values (with PMCR0 last to enable). */
523 SREG_WRITE("S3_1_C15_C1_0", saved_PMCR
[cpuid
][1] | 0x30303);
527 get_counter_config(uint32_t counter
)
532 case 2: /* FALLTHROUGH */
533 case 3: /* FALLTHROUGH */
534 case 4: /* FALLTHROUGH */
536 pmesr
= PMESR_EVT_DECODE(SREG_READ("S3_1_C15_C5_0"), counter
, 2);
538 case 6: /* FALLTHROUGH */
540 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
542 case 8: /* FALLTHROUGH */
545 pmesr
= PMESR_EVT_DECODE(SREG_READ("S3_1_C15_C6_0"), counter
, 6);
552 kpc_config_t config
= pmesr
;
554 uint64_t pmcr1
= SREG_READ("S3_1_C15_C1_0");
556 if (pmcr1
& PMCR1_EL0_A32_ENABLE_MASK(counter
)) {
557 config
|= CFGWORD_EL0A32EN_MASK
;
559 if (pmcr1
& PMCR1_EL0_A64_ENABLE_MASK(counter
)) {
560 config
|= CFGWORD_EL0A64EN_MASK
;
562 if (pmcr1
& PMCR1_EL1_A64_ENABLE_MASK(counter
)) {
563 config
|= CFGWORD_EL1EN_MASK
;
565 config
|= CFGWORD_EL3EN_MASK
;
569 if (pmcr1
& PMCR1_EL3_A64_ENABLE_MASK(counter
)) {
570 config
|= CFGWORD_EL3EN_MASK
;
578 set_counter_config(uint32_t counter
, uint64_t config
)
580 int cpuid
= cpu_number();
584 case 2: /* FALLTHROUGH */
585 case 3: /* FALLTHROUGH */
586 case 4: /* FALLTHROUGH */
588 pmesr
= SREG_READ("S3_1_C15_C5_0");
589 pmesr
&= PMESR_EVT_CLEAR(counter
, 2);
590 pmesr
|= PMESR_EVT_ENCODE(config
, counter
, 2);
591 SREG_WRITE("S3_1_C15_C5_0", pmesr
);
592 saved_PMESR
[cpuid
][0] = pmesr
;
595 case 6: /* FALLTHROUGH */
597 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
599 case 8: /* FALLTHROUGH */
602 pmesr
= SREG_READ("S3_1_C15_C6_0");
603 pmesr
&= PMESR_EVT_CLEAR(counter
, 6);
604 pmesr
|= PMESR_EVT_ENCODE(config
, counter
, 6);
605 SREG_WRITE("S3_1_C15_C6_0", pmesr
);
606 saved_PMESR
[cpuid
][1] = pmesr
;
612 set_modes(counter
, config
);
615 /* internal functions */
623 kpc_is_running_fixed(void)
625 return (kpc_running_classes
& KPC_CLASS_FIXED_MASK
) == KPC_CLASS_FIXED_MASK
;
629 kpc_is_running_configurable(uint64_t pmc_mask
)
631 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
632 return ((kpc_running_classes
& KPC_CLASS_CONFIGURABLE_MASK
) == KPC_CLASS_CONFIGURABLE_MASK
) &&
633 ((kpc_running_cfg_pmc_mask
& pmc_mask
) == pmc_mask
);
637 kpc_fixed_count(void)
639 return KPC_ARM64_FIXED_COUNT
;
643 kpc_configurable_count(void)
645 return KPC_ARM64_CONFIGURABLE_COUNT
;
649 kpc_fixed_config_count(void)
655 kpc_configurable_config_count(uint64_t pmc_mask
)
657 assert(kpc_popcount(pmc_mask
) <= kpc_configurable_count());
658 return kpc_popcount(pmc_mask
);
662 kpc_get_fixed_config(kpc_config_t
*configv __unused
)
670 return (1ULL << KPC_ARM64_COUNTER_WIDTH
) - 1;
674 kpc_configurable_max(void)
676 return (1ULL << KPC_ARM64_COUNTER_WIDTH
) - 1;
680 set_running_configurable(uint64_t target_mask
, uint64_t state_mask
)
682 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
685 enabled
= ml_set_interrupts_enabled(FALSE
);
687 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
688 if (((1ULL << i
) & target_mask
) == 0) {
691 assert(kpc_controls_counter(offset
+ i
));
693 if ((1ULL << i
) & state_mask
) {
694 enable_counter(offset
+ i
);
696 disable_counter(offset
+ i
);
700 ml_set_interrupts_enabled(enabled
);
703 static uint32_t kpc_xcall_sync
;
705 kpc_set_running_xcall( void *vstate
)
707 struct kpc_running_remote
*mp_config
= (struct kpc_running_remote
*) vstate
;
710 set_running_configurable(mp_config
->cfg_target_mask
,
711 mp_config
->cfg_state_mask
);
713 if (os_atomic_dec(&kpc_xcall_sync
, relaxed
) == 0) {
714 thread_wakeup((event_t
) &kpc_xcall_sync
);
718 static uint32_t kpc_xread_sync
;
720 kpc_get_curcpu_counters_xcall(void *args
)
722 struct kpc_get_counters_remote
*handler
= args
;
724 assert(handler
!= NULL
);
725 assert(handler
->buf
!= NULL
);
727 int offset
= cpu_number() * handler
->buf_stride
;
728 int r
= kpc_get_curcpu_counters(handler
->classes
, NULL
, &handler
->buf
[offset
]);
730 /* number of counters added by this CPU, needs to be atomic */
731 os_atomic_add(&(handler
->nb_counters
), r
, relaxed
);
733 if (os_atomic_dec(&kpc_xread_sync
, relaxed
) == 0) {
734 thread_wakeup((event_t
) &kpc_xread_sync
);
739 kpc_get_all_cpus_counters(uint32_t classes
, int *curcpu
, uint64_t *buf
)
743 int enabled
= ml_set_interrupts_enabled(FALSE
);
745 /* grab counters and CPU number as close as possible */
747 *curcpu
= cpu_number();
750 struct kpc_get_counters_remote hdl
= {
754 .buf_stride
= kpc_get_counter_count(classes
)
757 cpu_broadcast_xcall(&kpc_xread_sync
, TRUE
, kpc_get_curcpu_counters_xcall
, &hdl
);
758 int offset
= hdl
.nb_counters
;
760 (void)ml_set_interrupts_enabled(enabled
);
766 kpc_get_fixed_counters(uint64_t *counterv
)
769 mt_fixed_counts(counterv
);
771 #else /* MONOTONIC */
772 #pragma unused(counterv)
774 #endif /* !MONOTONIC */
778 kpc_get_configurable_counters(uint64_t *counterv
, uint64_t pmc_mask
)
780 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
785 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
786 if (((1ULL << i
) & pmc_mask
) == 0) {
789 ctr
= read_counter(i
+ offset
);
791 if (ctr
& KPC_ARM64_COUNTER_OVF_MASK
) {
792 ctr
= CONFIGURABLE_SHADOW(i
) +
793 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i
) + 1 /* Wrap */) +
794 (ctr
& KPC_ARM64_COUNTER_MASK
);
796 ctr
= CONFIGURABLE_SHADOW(i
) +
797 (ctr
- CONFIGURABLE_RELOAD(i
));
807 kpc_get_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
809 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
813 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
814 if ((1ULL << i
) & pmc_mask
) {
815 *configv
++ = get_counter_config(i
+ offset
);
822 kpc_set_configurable_config(kpc_config_t
*configv
, uint64_t pmc_mask
)
824 uint32_t cfg_count
= kpc_configurable_count(), offset
= kpc_fixed_count();
829 enabled
= ml_set_interrupts_enabled(FALSE
);
831 for (uint32_t i
= 0; i
< cfg_count
; ++i
) {
832 if (((1ULL << i
) & pmc_mask
) == 0) {
835 assert(kpc_controls_counter(i
+ offset
));
837 set_counter_config(i
+ offset
, *configv
++);
840 ml_set_interrupts_enabled(enabled
);
845 static uint32_t kpc_config_sync
;
847 kpc_set_config_xcall(void *vmp_config
)
849 struct kpc_config_remote
*mp_config
= vmp_config
;
850 kpc_config_t
*new_config
= NULL
;
851 uint32_t classes
= 0ULL;
854 assert(mp_config
->configv
);
855 classes
= mp_config
->classes
;
856 new_config
= mp_config
->configv
;
858 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
859 kpc_set_configurable_config(new_config
, mp_config
->pmc_mask
);
860 new_config
+= kpc_popcount(mp_config
->pmc_mask
);
863 if (classes
& KPC_CLASS_RAWPMU_MASK
) {
864 kpc_set_rawpmu_config(new_config
);
865 new_config
+= RAWPMU_CONFIG_COUNT
;
868 if (os_atomic_dec(&kpc_config_sync
, relaxed
) == 0) {
869 thread_wakeup((event_t
) &kpc_config_sync
);
874 kpc_reload_counter(uint32_t ctr
)
876 assert(ctr
< (kpc_configurable_count() + kpc_fixed_count()));
878 uint64_t old
= read_counter(ctr
);
880 if (kpc_controls_counter(ctr
)) {
881 write_counter(ctr
, FIXED_RELOAD(ctr
));
882 return old
& KPC_ARM64_COUNTER_MASK
;
885 * Unset the overflow bit to clear the condition that drives
886 * PMIs. The power manager is not interested in handling PMIs.
888 write_counter(ctr
, old
& KPC_ARM64_COUNTER_MASK
);
893 static uint32_t kpc_reload_sync
;
895 kpc_set_reload_xcall(void *vmp_config
)
897 struct kpc_config_remote
*mp_config
= vmp_config
;
898 uint32_t classes
= 0, count
= 0, offset
= kpc_fixed_count();
899 uint64_t *new_period
= NULL
, max
= kpc_configurable_max();
903 assert(mp_config
->configv
);
904 classes
= mp_config
->classes
;
905 new_period
= mp_config
->configv
;
907 enabled
= ml_set_interrupts_enabled(FALSE
);
909 if (classes
& KPC_CLASS_CONFIGURABLE_MASK
) {
911 * Update _all_ shadow counters, this cannot be done for only
912 * selected PMCs. Otherwise, we would corrupt the configurable
913 * shadow buffer since the PMCs are muxed according to the pmc
916 uint64_t all_cfg_mask
= (1ULL << kpc_configurable_count()) - 1;
917 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask
);
919 /* set the new period */
920 count
= kpc_configurable_count();
921 for (uint32_t i
= 0; i
< count
; ++i
) {
922 /* ignore the counter */
923 if (((1ULL << i
) & mp_config
->pmc_mask
) == 0) {
926 if (*new_period
== 0) {
927 *new_period
= kpc_configurable_max();
929 CONFIGURABLE_RELOAD(i
) = max
- *new_period
;
930 /* reload the counter */
931 kpc_reload_counter(offset
+ i
);
932 /* next period value */
937 ml_set_interrupts_enabled(enabled
);
939 if (os_atomic_dec(&kpc_reload_sync
, relaxed
) == 0) {
940 thread_wakeup((event_t
) &kpc_reload_sync
);
945 kpc_pmi_handler(unsigned int ctr
)
947 uint64_t extra
= kpc_reload_counter(ctr
);
949 FIXED_SHADOW(ctr
) += (kpc_fixed_max() - FIXED_RELOAD(ctr
) + 1 /* Wrap */) + extra
;
951 if (FIXED_ACTIONID(ctr
)) {
954 struct arm_saved_state
*state
;
955 state
= getCpuDatap()->cpu_int_state
;
957 kernel
= !PSR64_IS_USER(get_saved_state_cpsr(state
));
958 pc
= get_saved_state_pc(state
);
960 pc
= VM_KERNEL_UNSLIDE(pc
);
964 uint64_t config
= get_counter_config(ctr
);
965 kperf_kpc_flags_t flags
= kernel
? KPC_KERNEL_PC
: 0;
966 bool custom_mode
= false;
967 if ((config
& CFGWORD_EL0A32EN_MASK
) || (config
& CFGWORD_EL0A64EN_MASK
)) {
968 flags
|= KPC_USER_COUNTING
;
971 if ((config
& CFGWORD_EL1EN_MASK
)) {
972 flags
|= KPC_KERNEL_COUNTING
;
976 * For backwards-compatibility.
979 flags
|= KPC_USER_COUNTING
| KPC_KERNEL_COUNTING
;
981 kpc_sample_kperf(FIXED_ACTIONID(ctr
), ctr
, config
& 0xff, FIXED_SHADOW(ctr
),
987 kpc_get_classes(void)
989 return KPC_CLASS_FIXED_MASK
| KPC_CLASS_CONFIGURABLE_MASK
| KPC_CLASS_RAWPMU_MASK
;
993 kpc_set_running_arch(struct kpc_running_remote
*mp_config
)
995 assert(mp_config
!= NULL
);
997 /* dispatch to all CPUs */
998 cpu_broadcast_xcall(&kpc_xcall_sync
, TRUE
, kpc_set_running_xcall
, mp_config
);
1000 kpc_running_cfg_pmc_mask
= mp_config
->cfg_state_mask
;
1001 kpc_running_classes
= mp_config
->classes
;
1008 kpc_set_period_arch(struct kpc_config_remote
*mp_config
)
1012 /* dispatch to all CPUs */
1013 cpu_broadcast_xcall(&kpc_reload_sync
, TRUE
, kpc_set_reload_xcall
, mp_config
);
1021 kpc_set_config_arch(struct kpc_config_remote
*mp_config
)
1023 uint32_t count
= kpc_popcount(mp_config
->pmc_mask
);
1026 assert(mp_config
->configv
);
1028 /* check config against whitelist for external devs */
1029 for (uint32_t i
= 0; i
< count
; ++i
) {
1030 if (!whitelist_disabled
&& !config_in_whitelist(mp_config
->configv
[i
])) {
1035 /* dispatch to all CPUs */
1036 cpu_broadcast_xcall(&kpc_config_sync
, TRUE
, kpc_set_config_xcall
, mp_config
);
1046 if (kpc_configured
) {
1054 if (kpc_configured
) {
1060 kpc_set_sw_inc( uint32_t mask __unused
)
1066 kpc_disable_whitelist( int val
)
1068 whitelist_disabled
= val
;
1073 kpc_get_whitelist_disabled( void )
1075 return whitelist_disabled
;
1079 kpc_get_pmu_version(void)
1081 return KPC_PMU_ARM_APPLE
;
1084 #else /* APPLE_ARM64_ARCH_FAMILY */
1086 /* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */
1095 kpc_get_classes(void)
1101 kpc_fixed_count(void)
1107 kpc_configurable_count(void)
1113 kpc_fixed_config_count(void)
1119 kpc_configurable_config_count(uint64_t pmc_mask __unused
)
1125 kpc_get_fixed_config(kpc_config_t
*configv __unused
)
1137 kpc_configurable_max(void)
1143 kpc_get_configurable_config(kpc_config_t
*configv __unused
, uint64_t pmc_mask __unused
)
1149 kpc_get_configurable_counters(uint64_t *counterv __unused
, uint64_t pmc_mask __unused
)
1155 kpc_get_fixed_counters(uint64_t *counterv __unused
)
1161 kpc_is_running_fixed(void)
1167 kpc_is_running_configurable(uint64_t pmc_mask __unused
)
1173 kpc_set_running_arch(struct kpc_running_remote
*mp_config __unused
)
1179 kpc_set_period_arch(struct kpc_config_remote
*mp_config __unused
)
1185 kpc_set_config_arch(struct kpc_config_remote
*mp_config __unused
)
1203 kpc_get_all_cpus_counters(uint32_t classes __unused
, int *curcpu __unused
, uint64_t *buf __unused
)
1209 kpc_set_sw_inc( uint32_t mask __unused
)
1215 kpc_get_pmu_version(void)
1217 return KPC_PMU_ERROR
;
1221 kpc_rawpmu_config_count(void)
1227 kpc_get_rawpmu_config(__unused kpc_config_t
*configv
)
1233 kpc_disable_whitelist( int val __unused
)
1239 kpc_get_whitelist_disabled( void )
1244 #endif /* !APPLE_ARM64_ARCH_FAMILY */