]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2012-2016 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <arm/cpu_data_internal.h> | |
30 | #include <arm/cpu_internal.h> | |
31 | #include <kern/kalloc.h> | |
32 | #include <kern/kpc.h> | |
33 | #include <kern/thread.h> | |
34 | #include <kern/processor.h> | |
35 | #include <mach/mach_types.h> | |
36 | #include <machine/machine_routines.h> | |
37 | #include <stdint.h> | |
38 | #include <sys/errno.h> | |
39 | ||
40 | #if MONOTONIC | |
41 | #include <kern/monotonic.h> | |
42 | #endif /* MONOTONIC */ | |
43 | ||
d9a64523 A |
44 | void kpc_pmi_handler(unsigned int ctr); |
45 | ||
5ba3f43e A |
46 | /* |
47 | * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit | |
48 | * positions of the other PMCs, their configuration bits start at position 32. | |
49 | */ | |
50 | #define PMCR_PMC_8_9_OFFSET (32) | |
51 | #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET) | |
52 | #define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \ | |
53 | PMCR_PMC_8_9_SHIFT(PMC)) | |
54 | ||
55 | /* | |
56 | * PMCR0 controls enabling, interrupts, and overflow of performance counters. | |
57 | */ | |
58 | ||
59 | /* PMC is enabled */ | |
60 | #define PMCR0_PMC_ENABLE_MASK(PMC) (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC)) | |
61 | #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC)) | |
62 | ||
63 | /* how interrupts are generated on PMIs */ | |
64 | #define PMCR0_INTGEN_SHIFT (8) | |
65 | #define PMCR0_INTGEN_MASK (UINT64_C(0x7) << PMCR0_INTGEN_SHIFT) | |
66 | #define PMCR0_INTGEN_OFF (UINT64_C(0) << PMCR0_INTGEN_SHIFT) | |
67 | #define PMCR0_INTGEN_PMI (UINT64_C(1) << PMCR0_INTGEN_SHIFT) | |
68 | #define PMCR0_INTGEN_AIC (UINT64_C(2) << PMCR0_INTGEN_SHIFT) | |
69 | #define PMCR0_INTGEN_DBG_HLT (UINT64_C(3) << PMCR0_INTGEN_SHIFT) | |
70 | #define PMCR0_INTGEN_FIQ (UINT64_C(4) << PMCR0_INTGEN_SHIFT) | |
71 | ||
72 | /* 10 unused */ | |
73 | ||
74 | /* set by hardware if PMI was generated */ | |
75 | #define PMCR0_PMAI_SHIFT (11) | |
76 | #define PMCR0_PMAI_MASK (UINT64_C(1) << PMCR0_PMAI_SHIFT) | |
77 | ||
78 | /* overflow on a PMC generates an interrupt */ | |
79 | #define PMCR0_PMI_OFFSET (12) | |
80 | #define PMCR0_PMI_SHIFT(PMC) (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
81 | #define PMCR0_PMI_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC)) | |
82 | #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC)) | |
83 | ||
84 | /* disable counting when a PMI is signaled (except for AIC interrupts) */ | |
85 | #define PMCR0_DISCNT_SHIFT (20) | |
86 | #define PMCR0_DISCNT_ENABLE_MASK (UINT64_C(1) << PMCR0_DISCNT_SHIFT) | |
87 | #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK) | |
88 | ||
89 | /* 21 unused */ | |
90 | ||
91 | /* block PMIs until ERET retires */ | |
92 | #define PMCR0_WFRFE_SHIFT (22) | |
93 | #define PMCR0_WFRFE_ENABLE_MASK (UINT64_C(1) << PMCR0_WFRE_SHIFT) | |
94 | #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK) | |
95 | ||
96 | /* count global L2C events */ | |
97 | #define PMCR0_L2CGLOBAL_SHIFT (23) | |
98 | #define PMCR0_L2CGLOBAL_ENABLE_MASK (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT) | |
99 | #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK) | |
100 | ||
101 | /* allow user mode access to configuration registers */ | |
102 | #define PMCR0_USEREN_SHIFT (30) | |
103 | #define PMCR0_USEREN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT) | |
104 | #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK) | |
105 | ||
106 | /* force the CPMU clocks in case of a clocking bug */ | |
107 | #define PMCR0_CLKEN_SHIFT (31) | |
108 | #define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT) | |
109 | #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK) | |
110 | ||
111 | /* 32 - 44 mirror the low bits for PMCs 8 and 9 */ | |
112 | ||
113 | /* PMCR1 enables counters in different processor modes */ | |
114 | ||
115 | #define PMCR1_EL0_A32_OFFSET (0) | |
116 | #define PMCR1_EL0_A64_OFFSET (8) | |
117 | #define PMCR1_EL1_A64_OFFSET (16) | |
118 | #define PMCR1_EL3_A64_OFFSET (24) | |
119 | ||
120 | #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
121 | #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
122 | #define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
123 | #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC)) | |
124 | ||
125 | #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC)) | |
126 | #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC)) | |
127 | #define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC)) | |
128 | /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */ | |
129 | #if NO_MONITOR | |
130 | #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0) | |
131 | #else | |
132 | #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC)) | |
133 | #endif | |
134 | ||
135 | #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \ | |
136 | PMCR1_EL0_A64_ENABLE_MASK(PMC) | \ | |
137 | PMCR1_EL1_A64_ENABLE_MASK(PMC) | \ | |
138 | PMCR1_EL3_A64_ENABLE_MASK(PMC)) | |
139 | #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC)) | |
140 | ||
141 | /* PMESR0 and PMESR1 are event selection registers */ | |
142 | ||
143 | /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */ | |
144 | /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */ | |
145 | ||
146 | #define PMESR_PMC_WIDTH (8) | |
147 | #define PMESR_PMC_MASK (UINT8_MAX) | |
148 | #define PMESR_SHIFT(PMC, OFF) (8 * ((PMC) - (OFF))) | |
149 | #define PMESR_EVT_MASK(PMC, OFF) (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF)) | |
150 | #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF)) | |
151 | ||
152 | #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \ | |
153 | (((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK) | |
154 | #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \ | |
155 | (((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF)) | |
156 | ||
157 | /* system registers in the CPMU */ | |
158 | ||
159 | #define SREG_PMCR0 "S3_1_c15_c0_0" | |
160 | #define SREG_PMCR1 "S3_1_c15_c1_0" | |
161 | #define SREG_PMCR2 "S3_1_c15_c2_0" | |
162 | #define SREG_PMCR3 "S3_1_c15_c3_0" | |
163 | #define SREG_PMCR4 "S3_1_c15_c4_0" | |
164 | #define SREG_PMESR0 "S3_1_c15_c5_0" | |
165 | #define SREG_PMESR1 "S3_1_c15_c6_0" | |
166 | #define SREG_PMSR "S3_1_c15_c13_0" | |
167 | #define SREG_OPMAT0 "S3_1_c15_c7_0" | |
168 | #define SREG_OPMAT1 "S3_1_c15_c8_0" | |
169 | #define SREG_OPMSK0 "S3_1_c15_c9_0" | |
170 | #define SREG_OPMSK1 "S3_1_c15_c10_0" | |
171 | ||
172 | #define SREG_PMC0 "S3_2_c15_c0_0" | |
173 | #define SREG_PMC1 "S3_2_c15_c1_0" | |
174 | #define SREG_PMC2 "S3_2_c15_c2_0" | |
175 | #define SREG_PMC3 "S3_2_c15_c3_0" | |
176 | #define SREG_PMC4 "S3_2_c15_c4_0" | |
177 | #define SREG_PMC5 "S3_2_c15_c5_0" | |
178 | #define SREG_PMC6 "S3_2_c15_c6_0" | |
179 | #define SREG_PMC7 "S3_2_c15_c7_0" | |
180 | #define SREG_PMC8 "S3_2_c15_c9_0" | |
181 | #define SREG_PMC9 "S3_2_c15_c10_0" | |
182 | ||
183 | #if !defined(APPLECYCLONE) | |
184 | #define SREG_PMMMAP "S3_2_c15_c15_0" | |
185 | #define SREG_PMTRHLD2 "S3_2_c15_c14_0" | |
186 | #define SREG_PMTRHLD4 "S3_2_c15_c13_0" | |
187 | #define SREG_PMTRHLD6 "S3_2_c15_c12_0" | |
188 | #endif | |
189 | ||
190 | /* | |
191 | * The low 8 bits of a configuration words select the event to program on | |
192 | * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits. | |
193 | */ | |
194 | #define CFGWORD_EL0A32EN_MASK (0x10000) | |
195 | #define CFGWORD_EL0A64EN_MASK (0x20000) | |
196 | #define CFGWORD_EL1EN_MASK (0x40000) | |
197 | #define CFGWORD_EL3EN_MASK (0x80000) | |
198 | #define CFGWORD_ALLMODES_MASK (0xf0000) | |
199 | ||
200 | /* ACC offsets for PIO */ | |
201 | #define ACC_CPMU_PMC0_OFFSET (0x200) | |
202 | #define ACC_CPMU_PMC8_OFFSET (0x280) | |
203 | ||
204 | /* | |
205 | * Macros for reading and writing system registers. | |
206 | * | |
207 | * SR must be one of the SREG_* defines above. | |
208 | */ | |
209 | #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V)) | |
210 | #define SREG_READ(SR) ({ uint64_t VAL; \ | |
211 | __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \ | |
212 | VAL; }) | |
213 | ||
214 | /* | |
215 | * Configuration registers that can be controlled by RAWPMU: | |
216 | * | |
217 | * All: PMCR2-4, OPMAT0-1, OPMSK0-1. | |
218 | * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6. | |
219 | */ | |
220 | #if defined(APPLECYCLONE) | |
221 | #define RAWPMU_CONFIG_COUNT 7 | |
222 | #else | |
223 | #define RAWPMU_CONFIG_COUNT 11 | |
224 | #endif | |
225 | ||
226 | /* TODO: allocate dynamically */ | |
227 | static uint64_t saved_PMCR[MAX_CPUS][2]; | |
228 | static uint64_t saved_PMESR[MAX_CPUS][2]; | |
229 | static uint64_t saved_RAWPMU[MAX_CPUS][RAWPMU_CONFIG_COUNT]; | |
230 | static uint64_t saved_counter[MAX_CPUS][KPC_MAX_COUNTERS]; | |
231 | static uint64_t kpc_running_cfg_pmc_mask = 0; | |
232 | static uint32_t kpc_running_classes = 0; | |
233 | static uint32_t kpc_configured = 0; | |
234 | ||
5ba3f43e A |
235 | /* |
236 | * The whitelist is disabled by default on development/debug kernel. This can | |
237 | * be changed via the kpc.disable_whitelist sysctl. The whitelist is enabled on | |
238 | * release kernel and cannot be disabled. | |
239 | */ | |
240 | #if DEVELOPMENT || DEBUG | |
241 | static boolean_t whitelist_disabled = TRUE; | |
242 | #else | |
243 | static boolean_t whitelist_disabled = FALSE; | |
244 | #endif | |
245 | ||
246 | /* List of counter events that are allowed externally */ | |
247 | static kpc_config_t whitelist[] = { | |
248 | 0, /* NO_EVENT */ | |
249 | ||
250 | #if defined(APPLECYCLONE) | |
251 | 0x02, /* CORE_CYCLE */ | |
252 | 0x19, /* BIU_UPSTREAM_CYCLE */ | |
253 | 0x1a, /* BIU_DOWNSTREAM_CYCLE */ | |
254 | 0x22, /* L2C_AGENT_LD */ | |
255 | 0x23, /* L2C_AGENT_LD_MISS */ | |
256 | 0x24, /* L2C_AGENT_ST */ | |
257 | 0x25, /* L2C_AGENT_ST_MISS */ | |
258 | 0x78, /* INST_A32 */ | |
259 | 0x79, /* INST_THUMB */ | |
260 | 0x7a, /* INST_A64 */ | |
261 | 0x7b, /* INST_BRANCH */ | |
262 | 0xb4, /* SYNC_DC_LOAD_MISS */ | |
263 | 0xb5, /* SYNC_DC_STORE_MISS */ | |
264 | 0xb6, /* SYNC_DTLB_MISS */ | |
265 | 0xb9, /* SYNC_ST_HIT_YNGR_LD */ | |
266 | 0xc0, /* SYNC_BR_ANY_MISP */ | |
267 | 0xce, /* FED_IC_MISS_DEM */ | |
268 | 0xcf, /* FED_ITLB_MISS */ | |
269 | ||
270 | #elif defined(APPLETYPHOON) | |
271 | 0x02, /* CORE_CYCLE */ | |
272 | 0x13, /* BIU_UPSTREAM_CYCLE */ | |
273 | 0x14, /* BIU_DOWNSTREAM_CYCLE */ | |
274 | 0x1a, /* L2C_AGENT_LD */ | |
275 | 0x1b, /* L2C_AGENT_LD_MISS */ | |
276 | 0x1c, /* L2C_AGENT_ST */ | |
277 | 0x1d, /* L2C_AGENT_ST_MISS */ | |
278 | 0x8a, /* INST_A32 */ | |
279 | 0x8b, /* INST_THUMB */ | |
280 | 0x8c, /* INST_A64 */ | |
281 | 0x8d, /* INST_BRANCH */ | |
282 | 0xbf, /* SYNC_DC_LOAD_MISS */ | |
283 | 0xc0, /* SYNC_DC_STORE_MISS */ | |
284 | 0xc1, /* SYNC_DTLB_MISS */ | |
285 | 0xc4, /* SYNC_ST_HIT_YNGR_LD */ | |
286 | 0xcb, /* SYNC_BR_ANY_MISP */ | |
287 | 0xd3, /* FED_IC_MISS_DEM */ | |
288 | 0xd4, /* FED_ITLB_MISS */ | |
289 | ||
290 | #elif defined(APPLETWISTER) || defined(APPLEHURRICANE) | |
291 | 0x02, /* CORE_CYCLE */ | |
292 | 0x1a, /* L2C_AGENT_LD */ | |
293 | 0x1b, /* L2C_AGENT_LD_MISS */ | |
294 | 0x1c, /* L2C_AGENT_ST */ | |
295 | 0x1d, /* L2C_AGENT_ST_MISS */ | |
296 | 0x8a, /* INST_A32 */ | |
297 | 0x8b, /* INST_THUMB */ | |
298 | 0x8c, /* INST_A64 */ | |
299 | 0x8d, /* INST_BRANCH */ | |
300 | 0xbf, /* SYNC_DC_LOAD_MISS */ | |
301 | 0xc0, /* SYNC_DC_STORE_MISS */ | |
302 | 0xc1, /* SYNC_DTLB_MISS */ | |
303 | 0xc4, /* SYNC_ST_HIT_YNGR_LD */ | |
304 | 0xcb, /* SYNC_BR_ANY_MISP */ | |
305 | 0xd3, /* FED_IC_MISS_DEM */ | |
306 | 0xd4, /* FED_ITLB_MISS */ | |
307 | ||
d9a64523 A |
308 | #elif defined(APPLEMONSOON) |
309 | 0x02, /* CORE_CYCLE */ | |
310 | 0x8a, /* INST_A32 */ | |
311 | 0x8b, /* INST_THUMB */ | |
312 | 0x8c, /* INST_A64 */ | |
313 | 0x8d, /* INST_BRANCH */ | |
314 | 0xbf, /* SYNC_DC_LOAD_MISS */ | |
315 | 0xc0, /* SYNC_DC_STORE_MISS */ | |
316 | 0xc1, /* SYNC_DTLB_MISS */ | |
317 | 0xc4, /* SYNC_ST_HIT_YNGR_LD */ | |
318 | 0xcb, /* SYNC_BR_ANY_MISP */ | |
319 | 0xd3, /* FED_IC_MISS_DEM */ | |
320 | 0xd4, /* FED_ITLB_MISS */ | |
321 | ||
5ba3f43e A |
322 | #else |
323 | /* An unknown CPU gets a trivial { NO_EVENT } whitelist. */ | |
324 | #endif | |
325 | }; | |
326 | #define WHITELIST_COUNT (sizeof(whitelist)/sizeof(*whitelist)) | |
327 | ||
328 | static boolean_t | |
329 | config_in_whitelist(kpc_config_t cfg) | |
330 | { | |
331 | unsigned int i; | |
332 | ||
333 | for (i = 0; i < WHITELIST_COUNT; i++) { | |
334 | if (cfg == whitelist[i]) { | |
335 | return TRUE; | |
336 | } | |
337 | } | |
338 | ||
339 | return FALSE; | |
340 | } | |
341 | ||
342 | #ifdef KPC_DEBUG | |
343 | static void dump_regs(void) | |
344 | { | |
345 | uint64_t val; | |
346 | kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR0)); | |
347 | kprintf("PMCR1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR1)); | |
348 | kprintf("PMCR2 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR2)); | |
349 | kprintf("PMCR3 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR3)); | |
350 | kprintf("PMCR4 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR4)); | |
351 | kprintf("PMESR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMESR0)); | |
352 | kprintf("PMESR1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMESR1)); | |
353 | ||
354 | kprintf("PMC0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC0)); | |
355 | kprintf("PMC1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC1)); | |
356 | kprintf("PMC2 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC2)); | |
357 | kprintf("PMC3 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC3)); | |
358 | kprintf("PMC4 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC4)); | |
359 | kprintf("PMC5 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC5)); | |
360 | kprintf("PMC6 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC6)); | |
361 | kprintf("PMC7 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC7)); | |
362 | ||
363 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
364 | kprintf("PMC8 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC8)); | |
365 | kprintf("PMC9 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC9)); | |
366 | #endif | |
367 | } | |
368 | #endif | |
369 | ||
370 | static boolean_t | |
371 | enable_counter(uint32_t counter) | |
372 | { | |
373 | int cpuid = cpu_number(); | |
374 | uint64_t pmcr0 = 0, intgen_type; | |
375 | boolean_t counter_running, pmi_enabled, intgen_correct, enabled; | |
376 | ||
377 | pmcr0 = SREG_READ(SREG_PMCR0) | 0x3 /* leave the fixed counters enabled for monotonic */; | |
378 | ||
379 | counter_running = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0; | |
380 | pmi_enabled = (pmcr0 & PMCR0_PMI_ENABLE_MASK(counter)) != 0; | |
381 | ||
382 | /* TODO this should use the PMI path rather than AIC for the interrupt | |
383 | * as it is faster | |
384 | */ | |
385 | intgen_type = PMCR0_INTGEN_AIC; | |
386 | intgen_correct = (pmcr0 & PMCR0_INTGEN_MASK) == intgen_type; | |
387 | ||
388 | enabled = counter_running && pmi_enabled && intgen_correct; | |
389 | ||
390 | if (!enabled) { | |
391 | pmcr0 |= PMCR0_PMC_ENABLE_MASK(counter); | |
392 | pmcr0 |= PMCR0_PMI_ENABLE_MASK(counter); | |
393 | pmcr0 &= ~PMCR0_INTGEN_MASK; | |
394 | pmcr0 |= intgen_type; | |
395 | ||
396 | SREG_WRITE(SREG_PMCR0, pmcr0); | |
397 | } | |
398 | ||
399 | saved_PMCR[cpuid][0] = pmcr0; | |
400 | return enabled; | |
401 | } | |
402 | ||
403 | static boolean_t | |
404 | disable_counter(uint32_t counter) | |
405 | { | |
406 | uint64_t pmcr0; | |
407 | boolean_t enabled; | |
408 | int cpuid = cpu_number(); | |
409 | ||
410 | if (counter < 2) { | |
411 | return true; | |
412 | } | |
413 | ||
414 | pmcr0 = SREG_READ(SREG_PMCR0) | 0x3; | |
415 | enabled = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0; | |
416 | ||
417 | if (enabled) { | |
418 | pmcr0 &= PMCR0_PMC_DISABLE_MASK(counter); | |
419 | SREG_WRITE(SREG_PMCR0, pmcr0); | |
420 | } | |
421 | ||
422 | saved_PMCR[cpuid][0] = pmcr0; | |
423 | return enabled; | |
424 | } | |
425 | ||
426 | /* | |
427 | * Enable counter in processor modes determined by configuration word. | |
428 | */ | |
429 | static void | |
430 | set_modes(uint32_t counter, kpc_config_t cfgword) | |
431 | { | |
432 | uint64_t bits = 0; | |
433 | int cpuid = cpu_number(); | |
434 | ||
435 | if (cfgword & CFGWORD_EL0A32EN_MASK) { | |
436 | bits |= PMCR1_EL0_A32_ENABLE_MASK(counter); | |
437 | } | |
438 | if (cfgword & CFGWORD_EL0A64EN_MASK) { | |
439 | bits |= PMCR1_EL0_A64_ENABLE_MASK(counter); | |
440 | } | |
441 | if (cfgword & CFGWORD_EL1EN_MASK) { | |
442 | bits |= PMCR1_EL1_A64_ENABLE_MASK(counter); | |
443 | } | |
444 | #if !NO_MONITOR | |
445 | if (cfgword & CFGWORD_EL3EN_MASK) { | |
446 | bits |= PMCR1_EL3_A64_ENABLE_MASK(counter); | |
447 | } | |
448 | #endif | |
449 | ||
450 | /* | |
451 | * Backwards compatibility: Writing a non-zero configuration word with | |
452 | * all zeros in bits 16-19 is interpreted as enabling in all modes. | |
453 | * This matches the behavior when the PMCR1 bits weren't exposed. | |
454 | */ | |
455 | if (bits == 0 && cfgword != 0) { | |
456 | bits = PMCR1_EL_ALL_ENABLE_MASK(counter); | |
457 | } | |
458 | ||
459 | uint64_t pmcr1 = SREG_READ(SREG_PMCR1); | |
460 | pmcr1 &= PMCR1_EL_ALL_DISABLE_MASK(counter); | |
461 | pmcr1 |= bits; | |
462 | pmcr1 |= 0x30303; /* monotonic compatibility */ | |
463 | SREG_WRITE(SREG_PMCR1, pmcr1); | |
464 | saved_PMCR[cpuid][1] = pmcr1; | |
465 | } | |
466 | ||
467 | static uint64_t | |
468 | read_counter(uint32_t counter) | |
469 | { | |
470 | switch (counter) { | |
471 | // case 0: return SREG_READ(SREG_PMC0); | |
472 | // case 1: return SREG_READ(SREG_PMC1); | |
473 | case 2: return SREG_READ(SREG_PMC2); | |
474 | case 3: return SREG_READ(SREG_PMC3); | |
475 | case 4: return SREG_READ(SREG_PMC4); | |
476 | case 5: return SREG_READ(SREG_PMC5); | |
477 | case 6: return SREG_READ(SREG_PMC6); | |
478 | case 7: return SREG_READ(SREG_PMC7); | |
479 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
480 | case 8: return SREG_READ(SREG_PMC8); | |
481 | case 9: return SREG_READ(SREG_PMC9); | |
482 | #endif | |
483 | default: return 0; | |
484 | } | |
485 | } | |
486 | ||
487 | static void | |
488 | write_counter(uint32_t counter, uint64_t value) | |
489 | { | |
490 | switch (counter) { | |
491 | // case 0: SREG_WRITE(SREG_PMC0, value); break; | |
492 | // case 1: SREG_WRITE(SREG_PMC1, value); break; | |
493 | case 2: SREG_WRITE(SREG_PMC2, value); break; | |
494 | case 3: SREG_WRITE(SREG_PMC3, value); break; | |
495 | case 4: SREG_WRITE(SREG_PMC4, value); break; | |
496 | case 5: SREG_WRITE(SREG_PMC5, value); break; | |
497 | case 6: SREG_WRITE(SREG_PMC6, value); break; | |
498 | case 7: SREG_WRITE(SREG_PMC7, value); break; | |
499 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
500 | case 8: SREG_WRITE(SREG_PMC8, value); break; | |
501 | case 9: SREG_WRITE(SREG_PMC9, value); break; | |
502 | #endif | |
503 | default: break; | |
504 | } | |
505 | } | |
506 | ||
507 | uint32_t | |
508 | kpc_rawpmu_config_count(void) | |
509 | { | |
510 | return RAWPMU_CONFIG_COUNT; | |
511 | } | |
512 | ||
513 | int | |
514 | kpc_get_rawpmu_config(kpc_config_t *configv) | |
515 | { | |
516 | configv[0] = SREG_READ(SREG_PMCR2); | |
517 | configv[1] = SREG_READ(SREG_PMCR3); | |
518 | configv[2] = SREG_READ(SREG_PMCR4); | |
519 | configv[3] = SREG_READ(SREG_OPMAT0); | |
520 | configv[4] = SREG_READ(SREG_OPMAT1); | |
521 | configv[5] = SREG_READ(SREG_OPMSK0); | |
522 | configv[6] = SREG_READ(SREG_OPMSK1); | |
523 | #if RAWPMU_CONFIG_COUNT > 7 | |
524 | configv[7] = SREG_READ(SREG_PMMMAP); | |
525 | configv[8] = SREG_READ(SREG_PMTRHLD2); | |
526 | configv[9] = SREG_READ(SREG_PMTRHLD4); | |
527 | configv[10] = SREG_READ(SREG_PMTRHLD6); | |
528 | #endif | |
529 | return 0; | |
530 | } | |
531 | ||
532 | static int | |
533 | kpc_set_rawpmu_config(kpc_config_t *configv) | |
534 | { | |
535 | SREG_WRITE(SREG_PMCR2, configv[0]); | |
536 | SREG_WRITE(SREG_PMCR3, configv[1]); | |
537 | SREG_WRITE(SREG_PMCR4, configv[2]); | |
538 | SREG_WRITE(SREG_OPMAT0, configv[3]); | |
539 | SREG_WRITE(SREG_OPMAT1, configv[4]); | |
540 | SREG_WRITE(SREG_OPMSK0, configv[5]); | |
541 | SREG_WRITE(SREG_OPMSK1, configv[6]); | |
542 | #if RAWPMU_CONFIG_COUNT > 7 | |
543 | SREG_WRITE(SREG_PMMMAP, configv[7]); | |
544 | SREG_WRITE(SREG_PMTRHLD2, configv[8]); | |
545 | SREG_WRITE(SREG_PMTRHLD4, configv[9]); | |
546 | SREG_WRITE(SREG_PMTRHLD6, configv[10]); | |
547 | #endif | |
548 | return 0; | |
549 | } | |
550 | ||
551 | static void | |
552 | save_regs(void) | |
553 | { | |
554 | int cpuid = cpu_number(); | |
555 | ||
556 | __asm__ volatile("dmb ish"); | |
557 | ||
558 | assert(ml_get_interrupts_enabled() == FALSE); | |
559 | ||
560 | /* Save current PMCR0/1 values. PMCR2-4 are in the RAWPMU set. */ | |
561 | saved_PMCR[cpuid][0] = SREG_READ(SREG_PMCR0) | 0x3; | |
562 | ||
563 | /* Save event selections. */ | |
564 | saved_PMESR[cpuid][0] = SREG_READ(SREG_PMESR0); | |
565 | saved_PMESR[cpuid][1] = SREG_READ(SREG_PMESR1); | |
566 | ||
567 | kpc_get_rawpmu_config(saved_RAWPMU[cpuid]); | |
568 | ||
569 | /* Disable the counters. */ | |
570 | // SREG_WRITE(SREG_PMCR0, clear); | |
571 | ||
572 | /* Finally, save state for each counter*/ | |
573 | for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) { | |
574 | saved_counter[cpuid][i] = read_counter(i); | |
575 | } | |
576 | } | |
577 | ||
578 | static void | |
579 | restore_regs(void) | |
580 | { | |
581 | int cpuid = cpu_number(); | |
582 | ||
583 | /* Restore PMESR values. */ | |
584 | SREG_WRITE(SREG_PMESR0, saved_PMESR[cpuid][0]); | |
585 | SREG_WRITE(SREG_PMESR1, saved_PMESR[cpuid][1]); | |
586 | ||
587 | kpc_set_rawpmu_config(saved_RAWPMU[cpuid]); | |
588 | ||
589 | /* Restore counter values */ | |
590 | for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) { | |
591 | write_counter(i, saved_counter[cpuid][i]); | |
592 | } | |
593 | ||
594 | /* Restore PMCR0/1 values (with PMCR0 last to enable). */ | |
595 | SREG_WRITE(SREG_PMCR1, saved_PMCR[cpuid][1] | 0x30303); | |
596 | SREG_WRITE(SREG_PMCR0, saved_PMCR[cpuid][0] | 0x3); | |
597 | } | |
598 | ||
599 | static uint64_t | |
600 | get_counter_config(uint32_t counter) | |
601 | { | |
602 | uint64_t pmesr; | |
603 | ||
604 | switch (counter) { | |
605 | case 2: /* FALLTHROUGH */ | |
606 | case 3: /* FALLTHROUGH */ | |
607 | case 4: /* FALLTHROUGH */ | |
608 | case 5: | |
609 | pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0), counter, 2); | |
610 | break; | |
611 | case 6: /* FALLTHROUGH */ | |
612 | case 7: | |
613 | #if (KPC_ARM64_CONFIGURABLE_COUNT > 6) | |
614 | /* FALLTHROUGH */ | |
615 | case 8: /* FALLTHROUGH */ | |
616 | case 9: | |
617 | #endif | |
618 | pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1), counter, 6); | |
619 | break; | |
620 | default: | |
621 | pmesr = 0; | |
622 | break; | |
623 | } | |
624 | ||
625 | kpc_config_t config = pmesr; | |
626 | ||
627 | uint64_t pmcr1 = SREG_READ(SREG_PMCR1); | |
628 | ||
629 | if (pmcr1 & PMCR1_EL0_A32_ENABLE_MASK(counter)) { | |
630 | config |= CFGWORD_EL0A32EN_MASK; | |
631 | } | |
632 | if (pmcr1 & PMCR1_EL0_A64_ENABLE_MASK(counter)) { | |
633 | config |= CFGWORD_EL0A64EN_MASK; | |
634 | } | |
635 | if (pmcr1 & PMCR1_EL1_A64_ENABLE_MASK(counter)) { | |
636 | config |= CFGWORD_EL1EN_MASK; | |
637 | #if NO_MONITOR | |
638 | config |= CFGWORD_EL3EN_MASK; | |
639 | #endif | |
640 | } | |
641 | #if !NO_MONITOR | |
642 | if (pmcr1 & PMCR1_EL3_A64_ENABLE_MASK(counter)) { | |
643 | config |= CFGWORD_EL3EN_MASK; | |
644 | } | |
645 | #endif | |
646 | ||
647 | return config; | |
648 | } | |
649 | ||
650 | static void | |
651 | set_counter_config(uint32_t counter, uint64_t config) | |
652 | { | |
653 | int cpuid = cpu_number(); | |
654 | uint64_t pmesr = 0; | |
655 | ||
656 | switch (counter) { | |
657 | case 2: /* FALLTHROUGH */ | |
658 | case 3: /* FALLTHROUGH */ | |
659 | case 4: /* FALLTHROUGH */ | |
660 | case 5: | |
661 | pmesr = SREG_READ(SREG_PMESR0); | |
662 | pmesr &= PMESR_EVT_CLEAR(counter, 2); | |
663 | pmesr |= PMESR_EVT_ENCODE(config, counter, 2); | |
664 | SREG_WRITE(SREG_PMESR0, pmesr); | |
665 | saved_PMESR[cpuid][0] = pmesr; | |
666 | break; | |
667 | ||
668 | case 6: /* FALLTHROUGH */ | |
669 | case 7: | |
670 | #if KPC_ARM64_CONFIGURABLE_COUNT > 6 | |
671 | /* FALLTHROUGH */ | |
672 | case 8: /* FALLTHROUGH */ | |
673 | case 9: | |
674 | #endif | |
675 | pmesr = SREG_READ(SREG_PMESR1); | |
676 | pmesr &= PMESR_EVT_CLEAR(counter, 6); | |
677 | pmesr |= PMESR_EVT_ENCODE(config, counter, 6); | |
678 | SREG_WRITE(SREG_PMESR1, pmesr); | |
679 | saved_PMESR[cpuid][1] = pmesr; | |
680 | break; | |
681 | default: | |
682 | break; | |
683 | } | |
684 | ||
685 | set_modes(counter, config); | |
686 | } | |
687 | ||
688 | /* internal functions */ | |
689 | ||
690 | void | |
691 | kpc_arch_init(void) | |
692 | { | |
693 | } | |
694 | ||
695 | boolean_t | |
696 | kpc_is_running_fixed(void) | |
697 | { | |
698 | return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK; | |
699 | } | |
700 | ||
701 | boolean_t | |
702 | kpc_is_running_configurable(uint64_t pmc_mask) | |
703 | { | |
704 | assert(kpc_popcount(pmc_mask) <= kpc_configurable_count()); | |
705 | return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) && | |
706 | ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask); | |
707 | } | |
708 | ||
709 | uint32_t | |
710 | kpc_fixed_count(void) | |
711 | { | |
712 | return KPC_ARM64_FIXED_COUNT; | |
713 | } | |
714 | ||
715 | uint32_t | |
716 | kpc_configurable_count(void) | |
717 | { | |
718 | return KPC_ARM64_CONFIGURABLE_COUNT; | |
719 | } | |
720 | ||
721 | uint32_t | |
722 | kpc_fixed_config_count(void) | |
723 | { | |
724 | return 0; | |
725 | } | |
726 | ||
727 | uint32_t | |
728 | kpc_configurable_config_count(uint64_t pmc_mask) | |
729 | { | |
730 | assert(kpc_popcount(pmc_mask) <= kpc_configurable_count()); | |
731 | return kpc_popcount(pmc_mask); | |
732 | } | |
733 | ||
734 | int | |
735 | kpc_get_fixed_config(kpc_config_t *configv __unused) | |
736 | { | |
737 | return 0; | |
738 | } | |
739 | ||
740 | uint64_t | |
741 | kpc_fixed_max(void) | |
742 | { | |
743 | return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1; | |
744 | } | |
745 | ||
746 | uint64_t | |
747 | kpc_configurable_max(void) | |
748 | { | |
749 | return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1; | |
750 | } | |
751 | ||
752 | static void | |
753 | set_running_configurable(uint64_t target_mask, uint64_t state_mask) | |
754 | { | |
755 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
756 | boolean_t enabled; | |
757 | ||
758 | enabled = ml_set_interrupts_enabled(FALSE); | |
759 | ||
760 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
761 | if (((1ULL << i) & target_mask) == 0) | |
762 | continue; | |
763 | assert(kpc_controls_counter(offset + i)); | |
764 | ||
765 | if ((1ULL << i) & state_mask) { | |
766 | enable_counter(offset + i); | |
767 | } else { | |
768 | disable_counter(offset + i); | |
769 | } | |
770 | } | |
771 | ||
772 | ml_set_interrupts_enabled(enabled); | |
773 | } | |
774 | ||
775 | static uint32_t kpc_xcall_sync; | |
776 | static void | |
777 | kpc_set_running_xcall( void *vstate ) | |
778 | { | |
779 | struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate; | |
780 | assert(mp_config); | |
781 | ||
782 | set_running_configurable(mp_config->cfg_target_mask, | |
783 | mp_config->cfg_state_mask); | |
784 | ||
785 | if (hw_atomic_sub(&kpc_xcall_sync, 1) == 0) | |
786 | thread_wakeup((event_t) &kpc_xcall_sync); | |
787 | } | |
788 | ||
789 | static uint32_t kpc_xread_sync; | |
790 | static void | |
791 | kpc_get_curcpu_counters_xcall(void *args) | |
792 | { | |
793 | struct kpc_get_counters_remote *handler = args; | |
794 | ||
795 | assert(handler != NULL); | |
796 | assert(handler->buf != NULL); | |
797 | ||
798 | int offset = cpu_number() * handler->buf_stride; | |
799 | int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]); | |
800 | ||
801 | /* number of counters added by this CPU, needs to be atomic */ | |
802 | hw_atomic_add(&(handler->nb_counters), r); | |
803 | ||
804 | if (hw_atomic_sub(&kpc_xread_sync, 1) == 0) { | |
805 | thread_wakeup((event_t) &kpc_xread_sync); | |
806 | } | |
807 | } | |
808 | ||
809 | int | |
810 | kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf) | |
811 | { | |
812 | assert(buf != NULL); | |
813 | ||
814 | int enabled = ml_set_interrupts_enabled(FALSE); | |
815 | ||
816 | /* grab counters and CPU number as close as possible */ | |
817 | if (curcpu) { | |
818 | *curcpu = current_processor()->cpu_id; | |
819 | } | |
820 | ||
821 | struct kpc_get_counters_remote hdl = { | |
822 | .classes = classes, | |
823 | .nb_counters = 0, | |
824 | .buf = buf, | |
825 | .buf_stride = kpc_get_counter_count(classes) | |
826 | }; | |
827 | ||
828 | cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl); | |
829 | int offset = hdl.nb_counters; | |
830 | ||
831 | (void)ml_set_interrupts_enabled(enabled); | |
832 | ||
833 | return offset; | |
834 | } | |
835 | ||
836 | int | |
837 | kpc_get_fixed_counters(uint64_t *counterv) | |
838 | { | |
839 | #if MONOTONIC | |
840 | mt_fixed_counts(counterv); | |
841 | return 0; | |
842 | #else /* MONOTONIC */ | |
843 | #pragma unused(counterv) | |
844 | return ENOTSUP; | |
845 | #endif /* !MONOTONIC */ | |
846 | } | |
847 | ||
848 | int | |
849 | kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask) | |
850 | { | |
851 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
852 | uint64_t ctr = 0ULL; | |
853 | ||
854 | assert(counterv); | |
855 | ||
856 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
857 | if (((1ULL << i) & pmc_mask) == 0) | |
858 | continue; | |
859 | ctr = read_counter(i + offset); | |
860 | ||
861 | if (ctr & KPC_ARM64_COUNTER_OVF_MASK) { | |
862 | ctr = CONFIGURABLE_SHADOW(i) + | |
863 | (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) + | |
864 | (ctr & KPC_ARM64_COUNTER_MASK); | |
865 | } else { | |
866 | ctr = CONFIGURABLE_SHADOW(i) + | |
867 | (ctr - CONFIGURABLE_RELOAD(i)); | |
868 | } | |
869 | ||
870 | *counterv++ = ctr; | |
871 | } | |
872 | ||
873 | return 0; | |
874 | } | |
875 | ||
876 | int | |
877 | kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) | |
878 | { | |
879 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
880 | ||
881 | assert(configv); | |
882 | ||
883 | for (uint32_t i = 0; i < cfg_count; ++i) | |
884 | if ((1ULL << i) & pmc_mask) | |
885 | *configv++ = get_counter_config(i + offset); | |
886 | return 0; | |
887 | } | |
888 | ||
889 | static int | |
890 | kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask) | |
891 | { | |
892 | uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count(); | |
893 | boolean_t enabled; | |
894 | ||
895 | assert(configv); | |
896 | ||
897 | enabled = ml_set_interrupts_enabled(FALSE); | |
898 | ||
899 | for (uint32_t i = 0; i < cfg_count; ++i) { | |
900 | if (((1ULL << i) & pmc_mask) == 0) | |
901 | continue; | |
902 | assert(kpc_controls_counter(i + offset)); | |
903 | ||
904 | set_counter_config(i + offset, *configv++); | |
905 | } | |
906 | ||
907 | ml_set_interrupts_enabled(enabled); | |
908 | ||
909 | return 0; | |
910 | } | |
911 | ||
912 | static uint32_t kpc_config_sync; | |
913 | static void | |
914 | kpc_set_config_xcall(void *vmp_config) | |
915 | { | |
916 | struct kpc_config_remote *mp_config = vmp_config; | |
917 | kpc_config_t *new_config = NULL; | |
918 | uint32_t classes = 0ULL; | |
919 | ||
920 | assert(mp_config); | |
921 | assert(mp_config->configv); | |
922 | classes = mp_config->classes; | |
923 | new_config = mp_config->configv; | |
924 | ||
925 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
926 | kpc_set_configurable_config(new_config, mp_config->pmc_mask); | |
927 | new_config += kpc_popcount(mp_config->pmc_mask); | |
928 | } | |
929 | ||
930 | if (classes & KPC_CLASS_RAWPMU_MASK) { | |
931 | kpc_set_rawpmu_config(new_config); | |
932 | new_config += RAWPMU_CONFIG_COUNT; | |
933 | } | |
934 | ||
935 | if (hw_atomic_sub(&kpc_config_sync, 1) == 0) | |
936 | thread_wakeup((event_t) &kpc_config_sync); | |
937 | } | |
938 | ||
939 | static uint64_t | |
940 | kpc_reload_counter(uint32_t ctr) | |
941 | { | |
942 | assert(ctr < (kpc_configurable_count() + kpc_fixed_count())); | |
943 | ||
5ba3f43e | 944 | uint64_t old = read_counter(ctr); |
b226f5e5 A |
945 | |
946 | if (kpc_controls_counter(ctr)) { | |
947 | write_counter(ctr, FIXED_RELOAD(ctr)); | |
948 | return old & KPC_ARM64_COUNTER_MASK; | |
949 | } else { | |
950 | /* | |
951 | * Unset the overflow bit to clear the condition that drives | |
952 | * PMIs. The power manager is not interested in handling PMIs. | |
953 | */ | |
954 | write_counter(ctr, old & KPC_ARM64_COUNTER_MASK); | |
955 | return 0; | |
956 | } | |
5ba3f43e A |
957 | } |
958 | ||
959 | static uint32_t kpc_reload_sync; | |
960 | static void | |
961 | kpc_set_reload_xcall(void *vmp_config) | |
962 | { | |
963 | struct kpc_config_remote *mp_config = vmp_config; | |
964 | uint32_t classes = 0, count = 0, offset = kpc_fixed_count(); | |
965 | uint64_t *new_period = NULL, max = kpc_configurable_max(); | |
966 | boolean_t enabled; | |
967 | ||
968 | assert(mp_config); | |
969 | assert(mp_config->configv); | |
970 | classes = mp_config->classes; | |
971 | new_period = mp_config->configv; | |
972 | ||
973 | enabled = ml_set_interrupts_enabled(FALSE); | |
974 | ||
975 | if (classes & KPC_CLASS_CONFIGURABLE_MASK) { | |
976 | /* | |
977 | * Update _all_ shadow counters, this cannot be done for only | |
978 | * selected PMCs. Otherwise, we would corrupt the configurable | |
979 | * shadow buffer since the PMCs are muxed according to the pmc | |
980 | * mask. | |
981 | */ | |
982 | uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1; | |
983 | kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask); | |
984 | ||
985 | /* set the new period */ | |
986 | count = kpc_configurable_count(); | |
987 | for (uint32_t i = 0; i < count; ++i) { | |
988 | /* ignore the counter */ | |
989 | if (((1ULL << i) & mp_config->pmc_mask) == 0) | |
990 | continue; | |
991 | if (*new_period == 0) | |
992 | *new_period = kpc_configurable_max(); | |
993 | CONFIGURABLE_RELOAD(i) = max - *new_period; | |
994 | /* reload the counter */ | |
995 | kpc_reload_counter(offset + i); | |
996 | /* next period value */ | |
997 | new_period++; | |
998 | } | |
999 | } | |
1000 | ||
1001 | ml_set_interrupts_enabled(enabled); | |
1002 | ||
1003 | if (hw_atomic_sub(&kpc_reload_sync, 1) == 0) | |
1004 | thread_wakeup((event_t) &kpc_reload_sync); | |
1005 | } | |
1006 | ||
5ba3f43e | 1007 | void |
d9a64523 | 1008 | kpc_pmi_handler(unsigned int ctr) |
5ba3f43e | 1009 | { |
d9a64523 | 1010 | uint64_t extra = kpc_reload_counter(ctr); |
5ba3f43e | 1011 | |
d9a64523 | 1012 | FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra; |
5ba3f43e | 1013 | |
d9a64523 A |
1014 | if (FIXED_ACTIONID(ctr)) { |
1015 | kpc_sample_kperf(FIXED_ACTIONID(ctr)); | |
5ba3f43e | 1016 | } |
5ba3f43e A |
1017 | } |
1018 | ||
1019 | uint32_t | |
1020 | kpc_get_classes(void) | |
1021 | { | |
1022 | return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_RAWPMU_MASK; | |
1023 | } | |
1024 | ||
1025 | int | |
1026 | kpc_set_running_arch(struct kpc_running_remote *mp_config) | |
1027 | { | |
d9a64523 | 1028 | assert(mp_config != NULL); |
5ba3f43e A |
1029 | |
1030 | /* dispatch to all CPUs */ | |
1031 | cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config); | |
1032 | ||
1033 | kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask; | |
1034 | kpc_running_classes = mp_config->classes; | |
1035 | kpc_configured = 1; | |
1036 | ||
1037 | return 0; | |
1038 | } | |
1039 | ||
1040 | int | |
1041 | kpc_set_period_arch(struct kpc_config_remote *mp_config) | |
1042 | { | |
1043 | assert(mp_config); | |
1044 | ||
1045 | /* dispatch to all CPUs */ | |
1046 | cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config); | |
1047 | ||
1048 | kpc_configured = 1; | |
1049 | ||
1050 | return 0; | |
1051 | } | |
1052 | ||
1053 | int | |
1054 | kpc_set_config_arch(struct kpc_config_remote *mp_config) | |
1055 | { | |
1056 | uint32_t count = kpc_popcount(mp_config->pmc_mask); | |
1057 | ||
1058 | assert(mp_config); | |
1059 | assert(mp_config->configv); | |
1060 | ||
1061 | /* check config against whitelist for external devs */ | |
1062 | for (uint32_t i = 0; i < count; ++i) { | |
1063 | if (!whitelist_disabled && !config_in_whitelist(mp_config->configv[i])) { | |
1064 | return EPERM; | |
1065 | } | |
1066 | } | |
1067 | ||
1068 | /* dispatch to all CPUs */ | |
1069 | cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config); | |
1070 | ||
1071 | kpc_configured = 1; | |
1072 | ||
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | void | |
1077 | kpc_idle(void) | |
1078 | { | |
1079 | if (kpc_configured) { | |
1080 | save_regs(); | |
1081 | } | |
1082 | } | |
1083 | ||
1084 | void | |
1085 | kpc_idle_exit(void) | |
1086 | { | |
1087 | if (kpc_configured) { | |
1088 | restore_regs(); | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | int | |
1093 | kpc_set_sw_inc( uint32_t mask __unused ) | |
1094 | { | |
1095 | return ENOTSUP; | |
1096 | } | |
1097 | ||
1098 | int | |
1099 | kpc_disable_whitelist( int val ) | |
1100 | { | |
1101 | whitelist_disabled = val; | |
1102 | return 0; | |
1103 | } | |
1104 | ||
1105 | int | |
1106 | kpc_get_whitelist_disabled( void ) | |
1107 | { | |
1108 | return whitelist_disabled; | |
1109 | } | |
1110 | ||
1111 | int | |
1112 | kpc_get_pmu_version(void) | |
1113 | { | |
1114 | return KPC_PMU_ARM_APPLE; | |
1115 | } |