]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2012-2018 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/cpu_data_internal.h> | |
30 | #include <arm/cpu_internal.h> | |
31 | #include <kern/cpu_number.h> | |
32 | #include <kern/kpc.h> | |
33 | #include <kern/thread.h> | |
34 | #include <kern/processor.h> | |
35 | #include <mach/mach_types.h> | |
36 | #include <machine/machine_routines.h> | |
37 | #include <stdint.h> | |
38 | #include <sys/errno.h> | |
39 | ||
40 | #if APPLE_ARM64_ARCH_FAMILY | |
41 | ||
42 | #if MONOTONIC | |
43 | #include <kern/monotonic.h> | |
44 | #endif /* MONOTONIC */ | |
45 | ||
46 | void kpc_pmi_handler(unsigned int ctr); | |
47 | ||
48 | /* | |
49 | * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit | |
50 | * positions of the other PMCs, their configuration bits start at position 32. | |
51 | */ | |
52 | #define PMCR_PMC_8_9_OFFSET (32) | |
53 | #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET) | |
54 | #define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \ | |
55 | PMCR_PMC_8_9_SHIFT(PMC)) | |
56 | ||
57 | /* | |
58 | * PMCR0 controls enabling, interrupts, and overflow of performance counters. | |
59 | */ | |
60 | ||
61 | /* PMC is enabled */ | |
62 | #define PMCR0_PMC_ENABLE_MASK(PMC) (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC)) | |
63 | #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC)) | |
64 | ||
65 | /* overflow on a PMC generates an interrupt */ | |
66 | #define PMCR0_PMI_OFFSET (12) | |
67 | #define PMCR0_PMI_SHIFT(PMC) (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
68 | #define PMCR0_PMI_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC)) | |
69 | #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC)) | |
70 | ||
71 | /* disable counting when a PMI is signaled (except for AIC interrupts) */ | |
72 | #define PMCR0_DISCNT_SHIFT (20) | |
73 | #define PMCR0_DISCNT_ENABLE_MASK (UINT64_C(1) << PMCR0_DISCNT_SHIFT) | |
74 | #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK) | |
75 | ||
76 | /* 21 unused */ | |
77 | ||
78 | /* block PMIs until ERET retires */ | |
79 | #define PMCR0_WFRFE_SHIFT (22) | |
80 | #define PMCR0_WFRFE_ENABLE_MASK (UINT64_C(1) << PMCR0_WFRE_SHIFT) | |
81 | #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK) | |
82 | ||
83 | /* count global L2C events */ | |
84 | #define PMCR0_L2CGLOBAL_SHIFT (23) | |
85 | #define PMCR0_L2CGLOBAL_ENABLE_MASK (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT) | |
86 | #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK) | |
87 | ||
88 | /* allow user mode access to configuration registers */ | |
89 | #define PMCR0_USEREN_SHIFT (30) | |
90 | #define PMCR0_USEREN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT) | |
91 | #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK) | |
92 | ||
93 | /* force the CPMU clocks in case of a clocking bug */ | |
94 | #define PMCR0_CLKEN_SHIFT (31) | |
95 | #define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_CLKEN_SHIFT) | |
96 | #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK) | |
97 | ||
98 | /* 32 - 44 mirror the low bits for PMCs 8 and 9 */ | |
99 | ||
100 | /* PMCR1 enables counters in different processor modes */ | |
101 | ||
102 | #define PMCR1_EL0_A32_OFFSET (0) | |
103 | #define PMCR1_EL0_A64_OFFSET (8) | |
104 | #define PMCR1_EL1_A64_OFFSET (16) | |
105 | #define PMCR1_EL3_A64_OFFSET (24) | |
106 | ||
107 | #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
108 | #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
109 | #define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
110 | #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
111 | ||
112 | #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC)) | |
113 | #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC)) | |
114 | #define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC)) | |
115 | /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */ | |
116 | #if NO_MONITOR | |
117 | #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0) | |
118 | #else | |
119 | #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC)) | |
120 | #endif | |
121 | ||
122 | #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \ | |
123 | PMCR1_EL0_A64_ENABLE_MASK(PMC) | \ | |
124 | PMCR1_EL1_A64_ENABLE_MASK(PMC) | \ | |
125 | PMCR1_EL3_A64_ENABLE_MASK(PMC)) | |
126 | #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC)) | |
127 | ||
128 | /* PMESR0 and PMESR1 are event selection registers */ | |
129 | ||
130 | /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */ | |
131 | /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */ | |
132 | ||
133 | #define PMESR_PMC_WIDTH (8) | |
134 | #define PMESR_PMC_MASK (UINT8_MAX) | |
135 | #define PMESR_SHIFT(PMC, OFF) (8 * ((PMC) - (OFF))) | |
136 | #define PMESR_EVT_MASK(PMC, OFF) (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF)) | |
137 | #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF)) | |
138 | ||
139 | #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \ | |
140 | (((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK) | |
141 | #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \ | |
142 | (((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF)) | |
143 | ||
144 | /* | |
145 | * The low 8 bits of a configuration words select the event to program on | |
146 | * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits. | |
147 | */ | |
148 | #define CFGWORD_EL0A32EN_MASK (0x10000) | |
149 | #define CFGWORD_EL0A64EN_MASK (0x20000) | |
150 | #define CFGWORD_EL1EN_MASK (0x40000) | |
151 | #define CFGWORD_EL3EN_MASK (0x80000) | |
152 | #define CFGWORD_ALLMODES_MASK (0xf0000) | |
153 | ||
154 | /* ACC offsets for PIO */ | |
155 | #define ACC_CPMU_PMC0_OFFSET (0x200) | |
156 | #define ACC_CPMU_PMC8_OFFSET (0x280) | |
157 | ||
158 | /* | |
159 | * Macros for reading and writing system registers. | |
160 | * | |
161 | * SR must be one of the SREG_* defines above. | |
162 | */ | |
163 | #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V)) | |
164 | #define SREG_READ(SR) ({ uint64_t VAL; \ | |
165 | __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \ | |
166 | VAL; }) | |
167 | ||
168 | /* | |
169 | * Configuration registers that can be controlled by RAWPMU: | |
170 | * | |
171 | * All: PMCR2-4, OPMAT0-1, OPMSK0-1. | |
172 | * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6. | |
173 | */ | |
174 | #if HAS_EARLY_APPLE_CPMU | |
175 | #define RAWPMU_CONFIG_COUNT 7 | |
176 | #else /* HAS_EARLY_APPLE_CPMU */ | |
177 | #define RAWPMU_CONFIG_COUNT 11 | |
178 | #endif /* !HAS_EARLY_APPLE_CPMU */ | |
179 | ||
180 | /* TODO: allocate dynamically */ | |
181 | static uint64_t saved_PMCR[MAX_CPUS][2]; | |
182 | static uint64_t saved_PMESR[MAX_CPUS][2]; | |
183 | static uint64_t saved_RAWPMU[MAX_CPUS][RAWPMU_CONFIG_COUNT]; | |
184 | static uint64_t saved_counter[MAX_CPUS][KPC_MAX_COUNTERS]; | |
185 | static uint64_t kpc_running_cfg_pmc_mask = 0; | |
186 | static uint32_t kpc_running_classes = 0; | |
187 | static uint32_t kpc_configured = 0; | |
188 | ||
189 | /* | |
190 | * The whitelist is disabled by default on development/debug kernel. This can | |
191 | * be changed via the kpc.disable_whitelist sysctl. The whitelist is enabled on | |
192 | * release kernel and cannot be disabled. | |
193 | */ | |
194 | #if DEVELOPMENT || DEBUG | |
195 | static boolean_t whitelist_disabled = TRUE; | |
196 | #else | |
197 | static boolean_t whitelist_disabled = FALSE; | |
198 | #endif | |
199 | ||
200 | #define CPMU_CORE_CYCLE 0x02 | |
201 | ||
202 | #if HAS_EARLY_APPLE_CPMU | |
203 | ||
204 | #define CPMU_BIU_UPSTREAM_CYCLE 0x19 | |
205 | #define CPMU_BIU_DOWNSTREAM_CYCLE 0x1a | |
206 | #define CPMU_L2C_AGENT_LD 0x22 | |
207 | #define CPMU_L2C_AGENT_LD_MISS 0x23 | |
208 | #define CPMU_L2C_AGENT_ST 0x24 | |
209 | #define CPMU_L2C_AGENT_ST_MISS 0x25 | |
210 | #define CPMU_INST_A32 0x78 | |
211 | #define CPMU_INST_THUMB 0x79 | |
212 | #define CPMU_INST_A64 0x7a | |
213 | #define CPMU_INST_BRANCH 0x7b | |
214 | #define CPMU_SYNC_DC_LOAD_MISS 0xb4 | |
215 | #define CPMU_SYNC_DC_STORE_MISS 0xb5 | |
216 | #define CPMU_SYNC_DTLB_MISS 0xb6 | |
217 | #define CPMU_SYNC_ST_HIT_YNGR_LD 0xb9 | |
218 | #define CPMU_SYNC_BR_ANY_MISP 0xc0 | |
219 | #define CPMU_FED_IC_MISS_DEM 0xce | |
220 | #define CPMU_FED_ITLB_MISS 0xcf | |
221 | ||
222 | #else /* HAS_EARLY_APPLE_CPMU */ | |
223 | ||
224 | #if HAS_CPMU_BIU_EVENTS | |
225 | #define CPMU_BIU_UPSTREAM_CYCLE 0x13 | |
226 | #define CPMU_BIU_DOWNSTREAM_CYCLE 0x14 | |
227 | #endif /* HAS_CPMU_BIU_EVENTS */ | |
228 | ||
229 | #if HAS_CPMU_L2C_EVENTS | |
230 | #define CPMU_L2C_AGENT_LD 0x1a | |
231 | #define CPMU_L2C_AGENT_LD_MISS 0x1b | |
232 | #define CPMU_L2C_AGENT_ST 0x1c | |
233 | #define CPMU_L2C_AGENT_ST_MISS 0x1d | |
234 | #endif /* HAS_CPMU_L2C_EVENTS */ | |
235 | ||
236 | #define CPMU_INST_A32 0x8a | |
237 | #define CPMU_INST_THUMB 0x8b | |
238 | #define CPMU_INST_A64 0x8c | |
239 | #define CPMU_INST_BRANCH 0x8d | |
240 | #define CPMU_SYNC_DC_LOAD_MISS 0xbf | |
241 | #define CPMU_SYNC_DC_STORE_MISS 0xc0 | |
242 | #define CPMU_SYNC_DTLB_MISS 0xc1 | |
243 | #define CPMU_SYNC_ST_HIT_YNGR_LD 0xc4 | |
244 | #define CPMU_SYNC_BR_ANY_MISP 0xcb | |
245 | #define CPMU_FED_IC_MISS_DEM 0xd3 | |
246 | #define CPMU_FED_ITLB_MISS 0xd4 | |
247 | ||
248 | #endif /* !HAS_EARLY_APPLE_CPMU */ | |
249 | ||
250 | /* List of counter events that are allowed to be used by 3rd-parties. */ | |
251 | static kpc_config_t whitelist[] = { | |
252 | 0, /* NO_EVENT */ | |
253 | ||
254 | CPMU_CORE_CYCLE, | |
255 | ||
256 | #if HAS_CPMU_BIU_EVENTS | |
257 | CPMU_BIU_UPSTREAM_CYCLE, CPMU_BIU_DOWNSTREAM_CYCLE, | |
258 | #endif /* HAS_CPMU_BIU_EVENTS */ | |
259 | ||
260 | #if HAS_CPMU_L2C_EVENTS | |
261 | CPMU_L2C_AGENT_LD, CPMU_L2C_AGENT_LD_MISS, CPMU_L2C_AGENT_ST, | |
262 | CPMU_L2C_AGENT_ST_MISS, | |
263 | #endif /* HAS_CPMU_L2C_EVENTS */ | |
264 | ||
265 | CPMU_INST_A32, CPMU_INST_THUMB, CPMU_INST_A64, CPMU_INST_BRANCH, | |
266 | CPMU_SYNC_DC_LOAD_MISS, CPMU_SYNC_DC_STORE_MISS, | |
267 | CPMU_SYNC_DTLB_MISS, CPMU_SYNC_ST_HIT_YNGR_LD, | |
268 | CPMU_SYNC_BR_ANY_MISP, CPMU_FED_IC_MISS_DEM, CPMU_FED_ITLB_MISS, | |
269 | }; | |
270 | #define WHITELIST_COUNT (sizeof(whitelist) / sizeof(whitelist[0])) | |
271 | #define EVENT_MASK 0xff | |
272 | ||
273 | static bool | |
274 | config_in_whitelist(kpc_config_t cfg) | |
275 | { | |
276 | for (unsigned int i = 0; i < WHITELIST_COUNT; i++) { | |
277 | /* Strip off any EL configuration bits -- just look at the event. */ | |
278 | if ((cfg & EVENT_MASK) == whitelist[i]) { | |
279 | return true; | |
280 | } | |
281 | } | |
282 | return false; | |
283 | } | |
284 | ||
285 | #ifdef KPC_DEBUG | |
286 | static void | |
287 | dump_regs(void) | |
288 | { | |
289 | uint64_t val; | |
290 | kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C0_0")); | |
291 | kprintf("PMCR1 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C1_0")); | |
292 | kprintf("PMCR2 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C2_0")); | |
293 | kprintf("PMCR3 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C3_0")); | |
294 | kprintf("PMCR4 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C4_0")); | |
295 | kprintf("PMESR0 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C5_0")); | |
296 | kprintf("PMESR1 = 0x%" PRIx64 "\n", SREG_READ("S3_1_C15_C6_0")); | |
297 | ||
298 | kprintf("PMC0 = 0x%" PRIx64 "\n", SREG_READ("PMC0")); | |
299 | kprintf("PMC1 = 0x%" PRIx64 "\n", SREG_READ("PMC1")); | |
300 | kprintf("S3_2_C15_C2_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C2_0")); | |
301 | kprintf("S3_2_C15_C3_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C3_0")); | |
302 | kprintf("S3_2_C15_C4_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C4_0")); | |
303 | kprintf("S3_2_C15_C5_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C5_0")); | |
304 | kprintf("S3_2_C15_C6_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C6_0")); | |
305 | kprintf("S3_2_C15_C7_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C7_0")); | |
306 | ||
307 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
308 | kprintf("S3_2_C15_C9_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C9_0")); | |
309 | kprintf("S3_2_C15_C10_0 = 0x%" PRIx64 "\n", SREG_READ("S3_2_C15_C10_0")); | |
310 | #endif | |
311 | } | |
312 | #endif | |
313 | ||
314 | static boolean_t | |
315 | enable_counter(uint32_t counter) | |
316 | { | |
317 | uint64_t pmcr0 = 0; | |
318 | boolean_t counter_running, pmi_enabled, enabled; | |
319 | ||
320 | pmcr0 = SREG_READ("S3_1_C15_C0_0") | 0x3 /* leave the fixed counters enabled for monotonic */; | |
321 | ||
322 | counter_running = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0; | |
323 | pmi_enabled = (pmcr0 & PMCR0_PMI_ENABLE_MASK(counter)) != 0; | |
324 | ||
325 | enabled = counter_running && pmi_enabled; | |
326 | ||
327 | if (!enabled) { | |
328 | pmcr0 |= PMCR0_PMC_ENABLE_MASK(counter); | |
329 | pmcr0 |= PMCR0_PMI_ENABLE_MASK(counter); | |
330 | SREG_WRITE("S3_1_C15_C0_0", pmcr0); | |
331 | } | |
332 | ||
333 | return enabled; | |
334 | } | |
335 | ||
336 | static boolean_t | |
337 | disable_counter(uint32_t counter) | |
338 | { | |
339 | uint64_t pmcr0; | |
340 | boolean_t enabled; | |
341 | ||
342 | if (counter < 2) { | |
343 | return true; | |
344 | } | |
345 | ||
346 | pmcr0 = SREG_READ("S3_1_C15_C0_0") | 0x3; | |
347 | enabled = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0; | |
348 | ||
349 | if (enabled) { | |
350 | pmcr0 &= PMCR0_PMC_DISABLE_MASK(counter); | |
351 | SREG_WRITE("S3_1_C15_C0_0", pmcr0); | |
352 | } | |
353 | ||
354 | return enabled; | |
355 | } | |
356 | ||
357 | /* | |
358 | * Enable counter in processor modes determined by configuration word. | |
359 | */ | |
360 | static void | |
361 | set_modes(uint32_t counter, kpc_config_t cfgword) | |
362 | { | |
363 | uint64_t bits = 0; | |
364 | int cpuid = cpu_number(); | |
365 | ||
366 | if (cfgword & CFGWORD_EL0A32EN_MASK) { | |
367 | bits |= PMCR1_EL0_A32_ENABLE_MASK(counter); | |
368 | } | |
369 | if (cfgword & CFGWORD_EL0A64EN_MASK) { | |
370 | bits |= PMCR1_EL0_A64_ENABLE_MASK(counter); | |
371 | } | |
372 | if (cfgword & CFGWORD_EL1EN_MASK) { | |
373 | bits |= PMCR1_EL1_A64_ENABLE_MASK(counter); | |
374 | } | |
375 | #if !NO_MONITOR | |
376 | if (cfgword & CFGWORD_EL3EN_MASK) { | |
377 | bits |= PMCR1_EL3_A64_ENABLE_MASK(counter); | |
378 | } | |
379 | #endif | |
380 | ||
381 | /* | |
382 | * Backwards compatibility: Writing a non-zero configuration word with | |
383 | * all zeros in bits 16-19 is interpreted as enabling in all modes. | |
384 | * This matches the behavior when the PMCR1 bits weren't exposed. | |
385 | */ | |
386 | if (bits == 0 && cfgword != 0) { | |
387 | bits = PMCR1_EL_ALL_ENABLE_MASK(counter); | |
388 | } | |
389 | ||
390 | uint64_t pmcr1 = SREG_READ("S3_1_C15_C1_0"); | |
391 | pmcr1 &= PMCR1_EL_ALL_DISABLE_MASK(counter); | |
392 | pmcr1 |= bits; | |
393 | pmcr1 |= 0x30303; /* monotonic compatibility */ | |
394 | SREG_WRITE("S3_1_C15_C1_0", pmcr1); | |
395 | saved_PMCR[cpuid][1] = pmcr1; | |
396 | } | |
397 | ||
398 | static uint64_t | |
399 | read_counter(uint32_t counter) | |
400 | { | |
401 | switch (counter) { | |
402 | // case 0: return SREG_READ("PMC0"); | |
403 | // case 1: return SREG_READ("PMC1"); | |
404 | case 2: return SREG_READ("S3_2_C15_C2_0"); | |
405 | case 3: return SREG_READ("S3_2_C15_C3_0"); | |
406 | case 4: return SREG_READ("S3_2_C15_C4_0"); | |
407 | case 5: return SREG_READ("S3_2_C15_C5_0"); | |
408 | case 6: return SREG_READ("S3_2_C15_C6_0"); | |
409 | case 7: return SREG_READ("S3_2_C15_C7_0"); | |
410 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
411 | case 8: return SREG_READ("S3_2_C15_C9_0"); | |
412 | case 9: return SREG_READ("S3_2_C15_C10_0"); | |
413 | #endif | |
414 | default: return 0; | |
415 | } | |
416 | } | |
417 | ||
418 | static void | |
419 | write_counter(uint32_t counter, uint64_t value) | |
420 | { | |
421 | switch (counter) { | |
422 | // case 0: SREG_WRITE("PMC0", value); break; | |
423 | // case 1: SREG_WRITE("PMC1", value); break; | |
424 | case 2: SREG_WRITE("S3_2_C15_C2_0", value); break; | |
425 | case 3: SREG_WRITE("S3_2_C15_C3_0", value); break; | |
426 | case 4: SREG_WRITE("S3_2_C15_C4_0", value); break; | |
427 | case 5: SREG_WRITE("S3_2_C15_C5_0", value); break; | |
428 | case 6: SREG_WRITE("S3_2_C15_C6_0", value); break; | |
429 | case 7: SREG_WRITE("S3_2_C15_C7_0", value); break; | |
430 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
431 | case 8: SREG_WRITE("S3_2_C15_C9_0", value); break; | |
432 | case 9: SREG_WRITE("S3_2_C15_C10_0", value); break; | |
433 | #endif | |
434 | default: break; | |
435 | } | |
436 | } | |
437 | ||
438 | uint32_t | |
439 | kpc_rawpmu_config_count(void) | |
440 | { | |
441 | return RAWPMU_CONFIG_COUNT; | |
442 | } | |
443 | ||
444 | int | |
445 | kpc_get_rawpmu_config(kpc_config_t *configv) | |
446 | { | |
447 | configv[0] = SREG_READ("S3_1_C15_C2_0"); | |
448 | configv[1] = SREG_READ("S3_1_C15_C3_0"); | |
449 | configv[2] = SREG_READ("S3_1_C15_C4_0"); | |
450 | configv[3] = SREG_READ("S3_1_C15_C7_0"); | |
451 | configv[4] = SREG_READ("S3_1_C15_C8_0"); | |
452 | configv[5] = SREG_READ("S3_1_C15_C9_0"); | |
453 | configv[6] = SREG_READ("S3_1_C15_C10_0"); | |
454 | #if RAWPMU_CONFIG_COUNT > 7 | |
455 | configv[7] = SREG_READ("S3_2_C15_C15_0"); | |
456 | configv[8] = SREG_READ("S3_2_C15_C14_0"); | |
457 | configv[9] = SREG_READ("S3_2_C15_C13_0"); | |
458 | configv[10] = SREG_READ("S3_2_C15_C12_0"); | |
459 | #endif | |
460 | return 0; | |
461 | } | |
462 | ||
463 | static int | |
464 | kpc_set_rawpmu_config(kpc_config_t *configv) | |
465 | { | |
466 | SREG_WRITE("S3_1_C15_C2_0", configv[0]); | |
467 | SREG_WRITE("S3_1_C15_C3_0", configv[1]); | |
468 | SREG_WRITE("S3_1_C15_C4_0", configv[2]); | |
469 | SREG_WRITE("S3_1_C15_C7_0", configv[3]); | |
470 | SREG_WRITE("S3_1_C15_C8_0", configv[4]); | |
471 | SREG_WRITE("S3_1_C15_C9_0", configv[5]); | |
472 | SREG_WRITE("S3_1_C15_C10_0", configv[6]); | |
473 | #if RAWPMU_CONFIG_COUNT > 7 | |
474 | SREG_WRITE("S3_2_C15_C15_0", configv[7]); | |
475 | SREG_WRITE("S3_2_C15_C14_0", configv[8]); | |
476 | SREG_WRITE("S3_2_C15_C13_0", configv[9]); | |
477 | SREG_WRITE("S3_2_C15_C12_0", configv[10]); | |
478 | #endif | |
479 | return 0; | |
480 | } | |
481 | ||
482 | static void | |
483 | save_regs(void) | |
484 | { | |
485 | int cpuid = cpu_number(); | |
486 | ||
487 | __asm__ volatile ("dmb ish"); | |
488 | ||
489 | assert(ml_get_interrupts_enabled() == FALSE); | |
490 | ||
491 | /* Save event selections. */ | |
492 | saved_PMESR[cpuid][0] = SREG_READ("S3_1_C15_C5_0"); | |
493 | saved_PMESR[cpuid][1] = SREG_READ("S3_1_C15_C6_0"); | |
494 | ||
495 | kpc_get_rawpmu_config(saved_RAWPMU[cpuid]); | |
496 | ||
497 | /* Disable the counters. */ | |
498 | // SREG_WRITE("S3_1_C15_C0_0", clear); | |
499 | ||
500 | /* Finally, save state for each counter*/ | |
501 | for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) { | |
502 | saved_counter[cpuid][i] = read_counter(i); | |
503 | } | |
504 | } | |
505 | ||
506 | static void | |
507 | restore_regs(void) | |
508 | { | |
509 | int cpuid = cpu_number(); | |
510 | ||
511 | /* Restore PMESR values. */ | |
512 | SREG_WRITE("S3_1_C15_C5_0", saved_PMESR[cpuid][0]); | |
513 | SREG_WRITE("S3_1_C15_C6_0", saved_PMESR[cpuid][1]); | |
514 | ||
515 | kpc_set_rawpmu_config(saved_RAWPMU[cpuid]); | |
516 | ||
517 | /* Restore counter values */ | |
518 | for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) { | |
519 | write_counter(i, saved_counter[cpuid][i]); | |
520 | } | |
521 | ||
522 | /* Restore PMCR0/1 values (with PMCR0 last to enable). */ | |
523 | SREG_WRITE("S3_1_C15_C1_0", saved_PMCR[cpuid][1] | 0x30303); | |
524 | } | |
525 | ||
526 | static uint64_t | |
527 | get_counter_config(uint32_t counter) | |
528 | { | |
529 | uint64_t pmesr; | |
530 | ||
531 | switch (counter) { | |
532 | case 2: /* FALLTHROUGH */ | |
533 | case 3: /* FALLTHROUGH */ | |
534 | case 4: /* FALLTHROUGH */ | |
535 | case 5: | |
536 | pmesr = PMESR_EVT_DECODE(SREG_READ("S3_1_C15_C5_0"), counter, 2); | |
537 | break; | |
538 | case 6: /* FALLTHROUGH */ | |
539 | case 7: | |
540 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
541 | /* FALLTHROUGH */ | |
542 | case 8: /* FALLTHROUGH */ | |
543 | case 9: | |
544 | #endif | |
545 | pmesr = PMESR_EVT_DECODE(SREG_READ("S3_1_C15_C6_0"), counter, 6); | |
546 | break; | |
547 | default: | |
548 | pmesr = 0; | |
549 | break; | |
550 | } | |
551 | ||
552 | kpc_config_t config = pmesr; | |
553 | ||
554 | uint64_t pmcr1 = SREG_READ("S3_1_C15_C1_0"); | |
555 | ||
556 | if (pmcr1 & PMCR1_EL0_A32_ENABLE_MASK(counter)) { | |
557 | config |= CFGWORD_EL0A32EN_MASK; | |
558 | } | |
559 | if (pmcr1 & PMCR1_EL0_A64_ENABLE_MASK(counter)) { | |
560 | config |= CFGWORD_EL0A64EN_MASK; | |
561 | } | |
562 | if (pmcr1 & PMCR1_EL1_A64_ENABLE_MASK(counter)) { | |
563 | config |= CFGWORD_EL1EN_MASK; | |
564 | #if NO_MONITOR | |
565 | config |= CFGWORD_EL3EN_MASK; | |
566 | #endif | |
567 | } | |
568 | #if !NO_MONITOR | |
569 | if (pmcr1 & PMCR1_EL3_A64_ENABLE_MASK(counter)) { | |
570 | config |= CFGWORD_EL3EN_MASK; | |
571 | } | |
572 | #endif | |
573 | ||
574 | return config; | |
575 | } | |
576 | ||
577 | static void | |
578 | set_counter_config(uint32_t counter, uint64_t config) | |
579 | { | |
580 | int cpuid = cpu_number(); | |
581 | uint64_t pmesr = 0; | |
582 | ||
583 | switch (counter) { | |
584 | case 2: /* FALLTHROUGH */ | |
585 | case 3: /* FALLTHROUGH */ | |
586 | case 4: /* FALLTHROUGH */ | |
587 | case 5: | |
588 | pmesr = SREG_READ("S3_1_C15_C5_0"); | |
589 | pmesr &= PMESR_EVT_CLEAR(counter, 2); | |
590 | pmesr |= PMESR_EVT_ENCODE(config, counter, 2); | |
591 | SREG_WRITE("S3_1_C15_C5_0", pmesr); | |
592 | saved_PMESR[cpuid][0] = pmesr; | |
593 | break; | |
594 | ||
595 | case 6: /* FALLTHROUGH */ | |
596 | case 7: | |
597 | #if KPC_ARM64_CONFIGURABLE_COUNT > 6 | |
598 | /* FALLTHROUGH */ | |
599 | case 8: /* FALLTHROUGH */ | |
600 | case 9: | |
601 | #endif | |
602 | pmesr = SREG_READ("S3_1_C15_C6_0"); | |
603 | pmesr &= PMESR_EVT_CLEAR(counter, 6); | |
604 | pmesr |= PMESR_EVT_ENCODE(config, counter, 6); | |
605 | SREG_WRITE("S3_1_C15_C6_0", pmesr); | |
606 | saved_PMESR[cpuid][1] = pmesr; | |
607 | break; | |
608 | default: | |
609 | break; | |
610 | } | |
611 | ||
612 | set_modes(counter, config); | |
613 | } | |
614 | ||
615 | /* internal functions */ | |
616 | ||
617 | void | |
618 | kpc_arch_init(void) | |
619 | { | |
620 | } | |
621 | ||
622 | boolean_t | |
623 | kpc_is_running_fixed(void) | |
624 | { | |
625 | return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK; | |
626 | } | |
627 | ||
628 | boolean_t | |
629 | kpc_is_running_configurable(uint64_t pmc_mask) | |
630 | { | |
631 | assert(kpc_popcount(pmc_mask) <= kpc_configurable_count()); | |
632 | return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) && | |
633 | ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask); | |
634 | } | |
635 | ||
636 | uint32_t | |
637 | kpc_fixed_count(void) | |
638 | { | |
639 | return KPC_ARM64_FIXED_COUNT; | |
640 | } | |
641 | ||
642 | uint32_t | |
643 | kpc_configurable_count(void) | |
644 | { | |
645 | return KPC_ARM64_CONFIGURABLE_COUNT; | |
646 | } | |
647 | ||
648 | uint32_t | |
649 | kpc_fixed_config_count(void) | |
650 | { | |
651 | return 0; | |
652 | } | |
653 | ||
654 | uint32_t | |
655 | kpc_configurable_config_count(uint64_t pmc_mask) | |
656 | { | |
657 | assert(kpc_popcount(pmc_mask) <= kpc_configurable_count()); | |
658 | return kpc_popcount(pmc_mask); | |
659 | } | |
660 | ||
661 | int | |
662 | kpc_get_fixed_config(kpc_config_t *configv __unused) | |
663 | { | |
664 | return 0; | |
665 | } | |
666 | ||
667 | uint64_t | |
668 | kpc_fixed_max(void) | |
669 | { | |
670 | return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1; | |
671 | } | |
672 | ||
673 | uint64_t | |
674 | kpc_configurable_max(void) | |
675 | { | |
676 | return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1; | |
677 | } | |
678 | ||
679 | static void | |
680 | set_running_configurable(uint64_t target_mask, uint64_t state_mask) | |
681 | { | |
682 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
683 | boolean_t enabled; | |
684 | ||
685 | enabled = ml_set_interrupts_enabled(FALSE); | |
686 | ||
687 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
688 | if (((1ULL << i) & target_mask) == 0) { | |
689 | continue; | |
690 | } | |
691 | assert(kpc_controls_counter(offset + i)); | |
692 | ||
693 | if ((1ULL << i) & state_mask) { | |
694 | enable_counter(offset + i); | |
695 | } else { | |
696 | disable_counter(offset + i); | |
697 | } | |
698 | } | |
699 | ||
700 | ml_set_interrupts_enabled(enabled); | |
701 | } | |
702 | ||
703 | static uint32_t kpc_xcall_sync; | |
704 | static void | |
705 | kpc_set_running_xcall( void *vstate ) | |
706 | { | |
707 | struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate; | |
708 | assert(mp_config); | |
709 | ||
710 | set_running_configurable(mp_config->cfg_target_mask, | |
711 | mp_config->cfg_state_mask); | |
712 | ||
713 | if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) { | |
714 | thread_wakeup((event_t) &kpc_xcall_sync); | |
715 | } | |
716 | } | |
717 | ||
718 | static uint32_t kpc_xread_sync; | |
719 | static void | |
720 | kpc_get_curcpu_counters_xcall(void *args) | |
721 | { | |
722 | struct kpc_get_counters_remote *handler = args; | |
723 | ||
724 | assert(handler != NULL); | |
725 | assert(handler->buf != NULL); | |
726 | ||
727 | int offset = cpu_number() * handler->buf_stride; | |
728 | int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]); | |
729 | ||
730 | /* number of counters added by this CPU, needs to be atomic */ | |
731 | os_atomic_add(&(handler->nb_counters), r, relaxed); | |
732 | ||
733 | if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) { | |
734 | thread_wakeup((event_t) &kpc_xread_sync); | |
735 | } | |
736 | } | |
737 | ||
738 | int | |
739 | kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) | |
740 | { | |
741 | assert(buf != NULL); | |
742 | ||
743 | int enabled = ml_set_interrupts_enabled(FALSE); | |
744 | ||
745 | /* grab counters and CPU number as close as possible */ | |
746 | if (curcpu) { | |
747 | *curcpu = cpu_number(); | |
748 | } | |
749 | ||
750 | struct kpc_get_counters_remote hdl = { | |
751 | .classes = classes, | |
752 | .nb_counters = 0, | |
753 | .buf = buf, | |
754 | .buf_stride = kpc_get_counter_count(classes) | |
755 | }; | |
756 | ||
757 | cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl); | |
758 | int offset = hdl.nb_counters; | |
759 | ||
760 | (void)ml_set_interrupts_enabled(enabled); | |
761 | ||
762 | return offset; | |
763 | } | |
764 | ||
765 | int | |
766 | kpc_get_fixed_counters(uint64_t *counterv) | |
767 | { | |
768 | #if MONOTONIC | |
769 | mt_fixed_counts(counterv); | |
770 | return 0; | |
771 | #else /* MONOTONIC */ | |
772 | #pragma unused(counterv) | |
773 | return ENOTSUP; | |
774 | #endif /* !MONOTONIC */ | |
775 | } | |
776 | ||
777 | int | |
778 | kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) | |
779 | { | |
780 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
781 | uint64_t ctr = 0ULL; | |
782 | ||
783 | assert(counterv); | |
784 | ||
785 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
786 | if (((1ULL << i) & pmc_mask) == 0) { | |
787 | continue; | |
788 | } | |
789 | ctr = read_counter(i + offset); | |
790 | ||
791 | if (ctr & KPC_ARM64_COUNTER_OVF_MASK) { | |
792 | ctr = CONFIGURABLE_SHADOW(i) + | |
793 | (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + | |
794 | (ctr & KPC_ARM64_COUNTER_MASK); | |
795 | } else { | |
796 | ctr = CONFIGURABLE_SHADOW(i) + | |
797 | (ctr - CONFIGURABLE_RELOAD(i)); | |
798 | } | |
799 | ||
800 | *counterv++ = ctr; | |
801 | } | |
802 | ||
803 | return 0; | |
804 | } | |
805 | ||
806 | int | |
807 | kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) | |
808 | { | |
809 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
810 | ||
811 | assert(configv); | |
812 | ||
813 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
814 | if ((1ULL << i) & pmc_mask) { | |
815 | *configv++ = get_counter_config(i + offset); | |
816 | } | |
817 | } | |
818 | return 0; | |
819 | } | |
820 | ||
821 | static int | |
822 | kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) | |
823 | { | |
824 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
825 | boolean_t enabled; | |
826 | ||
827 | assert(configv); | |
828 | ||
829 | enabled = ml_set_interrupts_enabled(FALSE); | |
830 | ||
831 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
832 | if (((1ULL << i) & pmc_mask) == 0) { | |
833 | continue; | |
834 | } | |
835 | assert(kpc_controls_counter(i + offset)); | |
836 | ||
837 | set_counter_config(i + offset, *configv++); | |
838 | } | |
839 | ||
840 | ml_set_interrupts_enabled(enabled); | |
841 | ||
842 | return 0; | |
843 | } | |
844 | ||
845 | static uint32_t kpc_config_sync; | |
846 | static void | |
847 | kpc_set_config_xcall(void *vmp_config) | |
848 | { | |
849 | struct kpc_config_remote *mp_config = vmp_config; | |
850 | kpc_config_t *new_config = NULL; | |
851 | uint32_t classes = 0ULL; | |
852 | ||
853 | assert(mp_config); | |
854 | assert(mp_config->configv); | |
855 | classes = mp_config->classes; | |
856 | new_config = mp_config->configv; | |
857 | ||
858 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
859 | kpc_set_configurable_config(new_config, mp_config->pmc_mask); | |
860 | new_config += kpc_popcount(mp_config->pmc_mask); | |
861 | } | |
862 | ||
863 | if (classes & KPC_CLASS_RAWPMU_MASK) { | |
864 | kpc_set_rawpmu_config(new_config); | |
865 | new_config += RAWPMU_CONFIG_COUNT; | |
866 | } | |
867 | ||
868 | if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) { | |
869 | thread_wakeup((event_t) &kpc_config_sync); | |
870 | } | |
871 | } | |
872 | ||
873 | static uint64_t | |
874 | kpc_reload_counter(uint32_t ctr) | |
875 | { | |
876 | assert(ctr < (kpc_configurable_count() + kpc_fixed_count())); | |
877 | ||
878 | uint64_t old = read_counter(ctr); | |
879 | ||
880 | if (kpc_controls_counter(ctr)) { | |
881 | write_counter(ctr, FIXED_RELOAD(ctr)); | |
882 | return old & KPC_ARM64_COUNTER_MASK; | |
883 | } else { | |
884 | /* | |
885 | * Unset the overflow bit to clear the condition that drives | |
886 | * PMIs. The power manager is not interested in handling PMIs. | |
887 | */ | |
888 | write_counter(ctr, old & KPC_ARM64_COUNTER_MASK); | |
889 | return 0; | |
890 | } | |
891 | } | |
892 | ||
893 | static uint32_t kpc_reload_sync; | |
894 | static void | |
895 | kpc_set_reload_xcall(void *vmp_config) | |
896 | { | |
897 | struct kpc_config_remote *mp_config = vmp_config; | |
898 | uint32_t classes = 0, count = 0, offset = kpc_fixed_count(); | |
899 | uint64_t *new_period = NULL, max = kpc_configurable_max(); | |
900 | boolean_t enabled; | |
901 | ||
902 | assert(mp_config); | |
903 | assert(mp_config->configv); | |
904 | classes = mp_config->classes; | |
905 | new_period = mp_config->configv; | |
906 | ||
907 | enabled = ml_set_interrupts_enabled(FALSE); | |
908 | ||
909 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
910 | /* | |
911 | * Update _all_ shadow counters, this cannot be done for only | |
912 | * selected PMCs. Otherwise, we would corrupt the configurable | |
913 | * shadow buffer since the PMCs are muxed according to the pmc | |
914 | * mask. | |
915 | */ | |
916 | uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1; | |
917 | kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask); | |
918 | ||
919 | /* set the new period */ | |
920 | count = kpc_configurable_count(); | |
921 | for (uint32_t i = 0; i < count; ++i) { | |
922 | /* ignore the counter */ | |
923 | if (((1ULL << i) & mp_config->pmc_mask) == 0) { | |
924 | continue; | |
925 | } | |
926 | if (*new_period == 0) { | |
927 | *new_period = kpc_configurable_max(); | |
928 | } | |
929 | CONFIGURABLE_RELOAD(i) = max - *new_period; | |
930 | /* reload the counter */ | |
931 | kpc_reload_counter(offset + i); | |
932 | /* next period value */ | |
933 | new_period++; | |
934 | } | |
935 | } | |
936 | ||
937 | ml_set_interrupts_enabled(enabled); | |
938 | ||
939 | if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) { | |
940 | thread_wakeup((event_t) &kpc_reload_sync); | |
941 | } | |
942 | } | |
943 | ||
944 | void | |
945 | kpc_pmi_handler(unsigned int ctr) | |
946 | { | |
947 | uint64_t extra = kpc_reload_counter(ctr); | |
948 | ||
949 | FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; | |
950 | ||
951 | if (FIXED_ACTIONID(ctr)) { | |
952 | uintptr_t pc = 0; | |
953 | bool kernel = true; | |
954 | struct arm_saved_state *state; | |
955 | state = getCpuDatap()->cpu_int_state; | |
956 | if (state) { | |
957 | kernel = !PSR64_IS_USER(get_saved_state_cpsr(state)); | |
958 | pc = get_saved_state_pc(state); | |
959 | if (kernel) { | |
960 | pc = VM_KERNEL_UNSLIDE(pc); | |
961 | } | |
962 | } | |
963 | ||
964 | uint64_t config = get_counter_config(ctr); | |
965 | kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0; | |
966 | bool custom_mode = false; | |
967 | if ((config & CFGWORD_EL0A32EN_MASK) || (config & CFGWORD_EL0A64EN_MASK)) { | |
968 | flags |= KPC_USER_COUNTING; | |
969 | custom_mode = true; | |
970 | } | |
971 | if ((config & CFGWORD_EL1EN_MASK)) { | |
972 | flags |= KPC_KERNEL_COUNTING; | |
973 | custom_mode = true; | |
974 | } | |
975 | /* | |
976 | * For backwards-compatibility. | |
977 | */ | |
978 | if (!custom_mode) { | |
979 | flags |= KPC_USER_COUNTING | KPC_KERNEL_COUNTING; | |
980 | } | |
981 | kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, config & 0xff, FIXED_SHADOW(ctr), | |
982 | pc, flags); | |
983 | } | |
984 | } | |
985 | ||
986 | uint32_t | |
987 | kpc_get_classes(void) | |
988 | { | |
989 | return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_RAWPMU_MASK; | |
990 | } | |
991 | ||
992 | int | |
993 | kpc_set_running_arch(struct kpc_running_remote *mp_config) | |
994 | { | |
995 | assert(mp_config != NULL); | |
996 | ||
997 | /* dispatch to all CPUs */ | |
998 | cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config); | |
999 | ||
1000 | kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask; | |
1001 | kpc_running_classes = mp_config->classes; | |
1002 | kpc_configured = 1; | |
1003 | ||
1004 | return 0; | |
1005 | } | |
1006 | ||
1007 | int | |
1008 | kpc_set_period_arch(struct kpc_config_remote *mp_config) | |
1009 | { | |
1010 | assert(mp_config); | |
1011 | ||
1012 | /* dispatch to all CPUs */ | |
1013 | cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config); | |
1014 | ||
1015 | kpc_configured = 1; | |
1016 | ||
1017 | return 0; | |
1018 | } | |
1019 | ||
1020 | int | |
1021 | kpc_set_config_arch(struct kpc_config_remote *mp_config) | |
1022 | { | |
1023 | uint32_t count = kpc_popcount(mp_config->pmc_mask); | |
1024 | ||
1025 | assert(mp_config); | |
1026 | assert(mp_config->configv); | |
1027 | ||
1028 | /* check config against whitelist for external devs */ | |
1029 | for (uint32_t i = 0; i < count; ++i) { | |
1030 | if (!whitelist_disabled && !config_in_whitelist(mp_config->configv[i])) { | |
1031 | return EPERM; | |
1032 | } | |
1033 | } | |
1034 | ||
1035 | /* dispatch to all CPUs */ | |
1036 | cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config); | |
1037 | ||
1038 | kpc_configured = 1; | |
1039 | ||
1040 | return 0; | |
1041 | } | |
1042 | ||
1043 | void | |
1044 | kpc_idle(void) | |
1045 | { | |
1046 | if (kpc_configured) { | |
1047 | save_regs(); | |
1048 | } | |
1049 | } | |
1050 | ||
1051 | void | |
1052 | kpc_idle_exit(void) | |
1053 | { | |
1054 | if (kpc_configured) { | |
1055 | restore_regs(); | |
1056 | } | |
1057 | } | |
1058 | ||
1059 | int | |
1060 | kpc_set_sw_inc( uint32_t mask __unused ) | |
1061 | { | |
1062 | return ENOTSUP; | |
1063 | } | |
1064 | ||
1065 | int | |
1066 | kpc_disable_whitelist( int val ) | |
1067 | { | |
1068 | whitelist_disabled = val; | |
1069 | return 0; | |
1070 | } | |
1071 | ||
1072 | int | |
1073 | kpc_get_whitelist_disabled( void ) | |
1074 | { | |
1075 | return whitelist_disabled; | |
1076 | } | |
1077 | ||
1078 | int | |
1079 | kpc_get_pmu_version(void) | |
1080 | { | |
1081 | return KPC_PMU_ARM_APPLE; | |
1082 | } | |
1083 | ||
1084 | #else /* APPLE_ARM64_ARCH_FAMILY */ | |
1085 | ||
1086 | /* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */ | |
1087 | ||
1088 | void | |
1089 | kpc_arch_init(void) | |
1090 | { | |
1091 | /* No-op */ | |
1092 | } | |
1093 | ||
1094 | uint32_t | |
1095 | kpc_get_classes(void) | |
1096 | { | |
1097 | return 0; | |
1098 | } | |
1099 | ||
1100 | uint32_t | |
1101 | kpc_fixed_count(void) | |
1102 | { | |
1103 | return 0; | |
1104 | } | |
1105 | ||
1106 | uint32_t | |
1107 | kpc_configurable_count(void) | |
1108 | { | |
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | uint32_t | |
1113 | kpc_fixed_config_count(void) | |
1114 | { | |
1115 | return 0; | |
1116 | } | |
1117 | ||
1118 | uint32_t | |
1119 | kpc_configurable_config_count(uint64_t pmc_mask __unused) | |
1120 | { | |
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | int | |
1125 | kpc_get_fixed_config(kpc_config_t *configv __unused) | |
1126 | { | |
1127 | return 0; | |
1128 | } | |
1129 | ||
1130 | uint64_t | |
1131 | kpc_fixed_max(void) | |
1132 | { | |
1133 | return 0; | |
1134 | } | |
1135 | ||
1136 | uint64_t | |
1137 | kpc_configurable_max(void) | |
1138 | { | |
1139 | return 0; | |
1140 | } | |
1141 | ||
1142 | int | |
1143 | kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused) | |
1144 | { | |
1145 | return ENOTSUP; | |
1146 | } | |
1147 | ||
1148 | int | |
1149 | kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused) | |
1150 | { | |
1151 | return ENOTSUP; | |
1152 | } | |
1153 | ||
1154 | int | |
1155 | kpc_get_fixed_counters(uint64_t *counterv __unused) | |
1156 | { | |
1157 | return 0; | |
1158 | } | |
1159 | ||
1160 | boolean_t | |
1161 | kpc_is_running_fixed(void) | |
1162 | { | |
1163 | return FALSE; | |
1164 | } | |
1165 | ||
1166 | boolean_t | |
1167 | kpc_is_running_configurable(uint64_t pmc_mask __unused) | |
1168 | { | |
1169 | return FALSE; | |
1170 | } | |
1171 | ||
1172 | int | |
1173 | kpc_set_running_arch(struct kpc_running_remote *mp_config __unused) | |
1174 | { | |
1175 | return ENOTSUP; | |
1176 | } | |
1177 | ||
1178 | int | |
1179 | kpc_set_period_arch(struct kpc_config_remote *mp_config __unused) | |
1180 | { | |
1181 | return ENOTSUP; | |
1182 | } | |
1183 | ||
1184 | int | |
1185 | kpc_set_config_arch(struct kpc_config_remote *mp_config __unused) | |
1186 | { | |
1187 | return ENOTSUP; | |
1188 | } | |
1189 | ||
1190 | void | |
1191 | kpc_idle(void) | |
1192 | { | |
1193 | // do nothing | |
1194 | } | |
1195 | ||
1196 | void | |
1197 | kpc_idle_exit(void) | |
1198 | { | |
1199 | // do nothing | |
1200 | } | |
1201 | ||
1202 | int | |
1203 | kpc_get_all_cpus_counters(uint32_t classes __unused, int *curcpu __unused, uint64_t *buf __unused) | |
1204 | { | |
1205 | return 0; | |
1206 | } | |
1207 | ||
1208 | int | |
1209 | kpc_set_sw_inc( uint32_t mask __unused ) | |
1210 | { | |
1211 | return ENOTSUP; | |
1212 | } | |
1213 | ||
1214 | int | |
1215 | kpc_get_pmu_version(void) | |
1216 | { | |
1217 | return KPC_PMU_ERROR; | |
1218 | } | |
1219 | ||
1220 | uint32_t | |
1221 | kpc_rawpmu_config_count(void) | |
1222 | { | |
1223 | return 0; | |
1224 | } | |
1225 | ||
1226 | int | |
1227 | kpc_get_rawpmu_config(__unused kpc_config_t *configv) | |
1228 | { | |
1229 | return 0; | |
1230 | } | |
1231 | ||
1232 | int | |
1233 | kpc_disable_whitelist( int val __unused ) | |
1234 | { | |
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | int | |
1239 | kpc_get_whitelist_disabled( void ) | |
1240 | { | |
1241 | return 0; | |
1242 | } | |
1243 | ||
1244 | #endif /* !APPLE_ARM64_ARCH_FAMILY */ |