]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/kpc.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / arm64 / kpc.c
1 /*
2 * Copyright (c) 2012-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/cpu_internal.h>
31 #include <kern/cpu_number.h>
32 #include <kern/kpc.h>
33 #include <kern/thread.h>
34 #include <kern/processor.h>
35 #include <mach/mach_types.h>
36 #include <machine/machine_routines.h>
37 #include <stdint.h>
38 #include <sys/errno.h>
39
40 #if APPLE_ARM64_ARCH_FAMILY
41
42 #if MONOTONIC
43 #include <kern/monotonic.h>
44 #endif /* MONOTONIC */
45
46 void kpc_pmi_handler(unsigned int ctr);
47
48 /*
49 * PMCs 8 and 9 were added to Hurricane and to maintain the existing bit
50 * positions of the other PMCs, their configuration bits start at position 32.
51 */
52 #define PMCR_PMC_8_9_OFFSET (32)
53 #define PMCR_PMC_8_9_SHIFT(PMC) (((PMC) - 8) + PMCR_PMC_8_9_OFFSET)
54 #define PMCR_PMC_SHIFT(PMC) (((PMC) <= 7) ? (PMC) : \
55 PMCR_PMC_8_9_SHIFT(PMC))
56
57 /*
58 * PMCR0 controls enabling, interrupts, and overflow of performance counters.
59 */
60
61 /* PMC is enabled */
62 #define PMCR0_PMC_ENABLE_MASK(PMC) (UINT64_C(0x1) << PMCR_PMC_SHIFT(PMC))
63 #define PMCR0_PMC_DISABLE_MASK(PMC) (~PMCR0_PMC_ENABLE_MASK(PMC))
64
65 /* overflow on a PMC generates an interrupt */
66 #define PMCR0_PMI_OFFSET (12)
67 #define PMCR0_PMI_SHIFT(PMC) (PMCR0_PMI_OFFSET + PMCR_PMC_SHIFT(PMC))
68 #define PMCR0_PMI_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR0_PMI_SHIFT(PMC))
69 #define PMCR0_PMI_DISABLE_MASK(PMC) (~PMCR0_PMI_ENABLE_MASK(PMC))
70
71 /* disable counting when a PMI is signaled (except for AIC interrupts) */
72 #define PMCR0_DISCNT_SHIFT (20)
73 #define PMCR0_DISCNT_ENABLE_MASK (UINT64_C(1) << PMCR0_DISCNT_SHIFT)
74 #define PMCR0_DISCNT_DISABLE_MASK (~PMCR0_DISCNT_ENABLE_MASK)
75
76 /* 21 unused */
77
78 /* block PMIs until ERET retires */
79 #define PMCR0_WFRFE_SHIFT (22)
80 #define PMCR0_WFRFE_ENABLE_MASK (UINT64_C(1) << PMCR0_WFRE_SHIFT)
81 #define PMCR0_WFRFE_DISABLE_MASK (~PMCR0_WFRFE_ENABLE_MASK)
82
83 /* count global L2C events */
84 #define PMCR0_L2CGLOBAL_SHIFT (23)
85 #define PMCR0_L2CGLOBAL_ENABLE_MASK (UINT64_C(1) << PMCR0_L2CGLOBAL_SHIFT)
86 #define PMCR0_L2CGLOBAL_DISABLE_MASK (~PMCR0_L2CGLOBAL_ENABLE_MASK)
87
88 /* allow user mode access to configuration registers */
89 #define PMCR0_USEREN_SHIFT (30)
90 #define PMCR0_USEREN_ENABLE_MASK (UINT64_C(1) << PMCR0_USEREN_SHIFT)
91 #define PMCR0_USEREN_DISABLE_MASK (~PMCR0_USEREN_ENABLE_MASK)
92
93 /* force the CPMU clocks in case of a clocking bug */
94 #define PMCR0_CLKEN_SHIFT (31)
95 #define PMCR0_CLKEN_ENABLE_MASK (UINT64_C(1) << PMCR0_CLKEN_SHIFT)
96 #define PMCR0_CLKEN_DISABLE_MASK (~PMCR0_CLKEN_ENABLE_MASK)
97
98 /* 32 - 44 mirror the low bits for PMCs 8 and 9 */
99
100 /* PMCR1 enables counters in different processor modes */
101
102 #define PMCR1_EL0_A32_OFFSET (0)
103 #define PMCR1_EL0_A64_OFFSET (8)
104 #define PMCR1_EL1_A64_OFFSET (16)
105 #define PMCR1_EL3_A64_OFFSET (24)
106
107 #define PMCR1_EL0_A32_SHIFT(PMC) (PMCR1_EL0_A32_OFFSET + PMCR_PMC_SHIFT(PMC))
108 #define PMCR1_EL0_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
109 #define PMCR1_EL1_A64_SHIFT(PMC) (PMCR1_EL1_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
110 #define PMCR1_EL3_A64_SHIFT(PMC) (PMCR1_EL0_A64_OFFSET + PMCR_PMC_SHIFT(PMC))
111
112 #define PMCR1_EL0_A32_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A32_SHIFT(PMC))
113 #define PMCR1_EL0_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL0_A64_SHIFT(PMC))
114 #define PMCR1_EL1_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL1_A64_SHIFT(PMC))
115 /* PMCR1_EL3_A64 is not supported on PMCs 8 and 9 */
116 #if NO_MONITOR
117 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) UINT64_C(0)
118 #else
119 #define PMCR1_EL3_A64_ENABLE_MASK(PMC) (UINT64_C(1) << PMCR1_EL3_A64_SHIFT(PMC))
120 #endif
121
122 #define PMCR1_EL_ALL_ENABLE_MASK(PMC) (PMCR1_EL0_A32_ENABLE_MASK(PMC) | \
123 PMCR1_EL0_A64_ENABLE_MASK(PMC) | \
124 PMCR1_EL1_A64_ENABLE_MASK(PMC) | \
125 PMCR1_EL3_A64_ENABLE_MASK(PMC))
126 #define PMCR1_EL_ALL_DISABLE_MASK(PMC) (~PMCR1_EL_ALL_ENABLE_MASK(PMC))
127
128 /* PMESR0 and PMESR1 are event selection registers */
129
130 /* PMESR0 selects which event is counted on PMCs 2, 3, 4, and 5 */
131 /* PMESR1 selects which event is counted on PMCs 6, 7, 8, and 9 */
132
133 #define PMESR_PMC_WIDTH (8)
134 #define PMESR_PMC_MASK (UINT8_MAX)
135 #define PMESR_SHIFT(PMC, OFF) (8 * ((PMC) - (OFF)))
136 #define PMESR_EVT_MASK(PMC, OFF) (PMESR_PMC_MASK << PMESR_SHIFT(PMC, OFF))
137 #define PMESR_EVT_CLEAR(PMC, OFF) (~PMESR_EVT_MASK(PMC, OFF))
138
139 #define PMESR_EVT_DECODE(PMESR, PMC, OFF) \
140 (((PMESR) >> PMESR_SHIFT(PMC, OFF)) & PMESR_PMC_MASK)
141 #define PMESR_EVT_ENCODE(EVT, PMC, OFF) \
142 (((EVT) & PMESR_PMC_MASK) << PMESR_SHIFT(PMC, OFF))
143
144 /* system registers in the CPMU */
145
146 #define SREG_PMCR0 "S3_1_c15_c0_0"
147 #define SREG_PMCR1 "S3_1_c15_c1_0"
148 #define SREG_PMCR2 "S3_1_c15_c2_0"
149 #define SREG_PMCR3 "S3_1_c15_c3_0"
150 #define SREG_PMCR4 "S3_1_c15_c4_0"
151 #define SREG_PMESR0 "S3_1_c15_c5_0"
152 #define SREG_PMESR1 "S3_1_c15_c6_0"
153 #define SREG_PMSR "S3_1_c15_c13_0"
154 #define SREG_OPMAT0 "S3_1_c15_c7_0"
155 #define SREG_OPMAT1 "S3_1_c15_c8_0"
156 #define SREG_OPMSK0 "S3_1_c15_c9_0"
157 #define SREG_OPMSK1 "S3_1_c15_c10_0"
158
159 #define SREG_PMC0 "S3_2_c15_c0_0"
160 #define SREG_PMC1 "S3_2_c15_c1_0"
161 #define SREG_PMC2 "S3_2_c15_c2_0"
162 #define SREG_PMC3 "S3_2_c15_c3_0"
163 #define SREG_PMC4 "S3_2_c15_c4_0"
164 #define SREG_PMC5 "S3_2_c15_c5_0"
165 #define SREG_PMC6 "S3_2_c15_c6_0"
166 #define SREG_PMC7 "S3_2_c15_c7_0"
167 #define SREG_PMC8 "S3_2_c15_c9_0"
168 #define SREG_PMC9 "S3_2_c15_c10_0"
169
170 #define SREG_PMMMAP "S3_2_c15_c15_0"
171 #define SREG_PMTRHLD2 "S3_2_c15_c14_0"
172 #define SREG_PMTRHLD4 "S3_2_c15_c13_0"
173 #define SREG_PMTRHLD6 "S3_2_c15_c12_0"
174
175 /*
176 * The low 8 bits of a configuration words select the event to program on
177 * PMESR{0,1}. Bits 16-19 are mapped to PMCR1 bits.
178 */
179 #define CFGWORD_EL0A32EN_MASK (0x10000)
180 #define CFGWORD_EL0A64EN_MASK (0x20000)
181 #define CFGWORD_EL1EN_MASK (0x40000)
182 #define CFGWORD_EL3EN_MASK (0x80000)
183 #define CFGWORD_ALLMODES_MASK (0xf0000)
184
185 /* ACC offsets for PIO */
186 #define ACC_CPMU_PMC0_OFFSET (0x200)
187 #define ACC_CPMU_PMC8_OFFSET (0x280)
188
189 /*
190 * Macros for reading and writing system registers.
191 *
192 * SR must be one of the SREG_* defines above.
193 */
194 #define SREG_WRITE(SR, V) __asm__ volatile("msr " SR ", %0 ; isb" : : "r"(V))
195 #define SREG_READ(SR) ({ uint64_t VAL; \
196 __asm__ volatile("mrs %0, " SR : "=r"(VAL)); \
197 VAL; })
198
199 /*
200 * Configuration registers that can be controlled by RAWPMU:
201 *
202 * All: PMCR2-4, OPMAT0-1, OPMSK0-1.
203 * Typhoon/Twister/Hurricane: PMMMAP, PMTRHLD2/4/6.
204 */
205 #if HAS_EARLY_APPLE_CPMU
206 #define RAWPMU_CONFIG_COUNT 7
207 #else /* HAS_EARLY_APPLE_CPMU */
208 #define RAWPMU_CONFIG_COUNT 11
209 #endif /* !HAS_EARLY_APPLE_CPMU */
210
211 /* TODO: allocate dynamically */
212 static uint64_t saved_PMCR[MAX_CPUS][2];
213 static uint64_t saved_PMESR[MAX_CPUS][2];
214 static uint64_t saved_RAWPMU[MAX_CPUS][RAWPMU_CONFIG_COUNT];
215 static uint64_t saved_counter[MAX_CPUS][KPC_MAX_COUNTERS];
216 static uint64_t kpc_running_cfg_pmc_mask = 0;
217 static uint32_t kpc_running_classes = 0;
218 static uint32_t kpc_configured = 0;
219
220 /*
221 * The whitelist is disabled by default on development/debug kernel. This can
222 * be changed via the kpc.disable_whitelist sysctl. The whitelist is enabled on
223 * release kernel and cannot be disabled.
224 */
225 #if DEVELOPMENT || DEBUG
226 static boolean_t whitelist_disabled = TRUE;
227 #else
228 static boolean_t whitelist_disabled = FALSE;
229 #endif
230
231 #define CPMU_CORE_CYCLE 0x02
232
233 #if HAS_EARLY_APPLE_CPMU
234
235 #define CPMU_BIU_UPSTREAM_CYCLE 0x19
236 #define CPMU_BIU_DOWNSTREAM_CYCLE 0x1a
237 #define CPMU_L2C_AGENT_LD 0x22
238 #define CPMU_L2C_AGENT_LD_MISS 0x23
239 #define CPMU_L2C_AGENT_ST 0x24
240 #define CPMU_L2C_AGENT_ST_MISS 0x25
241 #define CPMU_INST_A32 0x78
242 #define CPMU_INST_THUMB 0x79
243 #define CPMU_INST_A64 0x7a
244 #define CPMU_INST_BRANCH 0x7b
245 #define CPMU_SYNC_DC_LOAD_MISS 0xb4
246 #define CPMU_SYNC_DC_STORE_MISS 0xb5
247 #define CPMU_SYNC_DTLB_MISS 0xb6
248 #define CPMU_SYNC_ST_HIT_YNGR_LD 0xb9
249 #define CPMU_SYNC_BR_ANY_MISP 0xc0
250 #define CPMU_FED_IC_MISS_DEM 0xce
251 #define CPMU_FED_ITLB_MISS 0xcf
252
253 #else /* HAS_EARLY_APPLE_CPMU */
254
255 #if HAS_CPMU_BIU_EVENTS
256 #define CPMU_BIU_UPSTREAM_CYCLE 0x13
257 #define CPMU_BIU_DOWNSTREAM_CYCLE 0x14
258 #endif /* HAS_CPMU_BIU_EVENTS */
259
260 #if HAS_CPMU_L2C_EVENTS
261 #define CPMU_L2C_AGENT_LD 0x1a
262 #define CPMU_L2C_AGENT_LD_MISS 0x1b
263 #define CPMU_L2C_AGENT_ST 0x1c
264 #define CPMU_L2C_AGENT_ST_MISS 0x1d
265 #endif /* HAS_CPMU_L2C_EVENTS */
266
267 #define CPMU_INST_A32 0x8a
268 #define CPMU_INST_THUMB 0x8b
269 #define CPMU_INST_A64 0x8c
270 #define CPMU_INST_BRANCH 0x8d
271 #define CPMU_SYNC_DC_LOAD_MISS 0xbf
272 #define CPMU_SYNC_DC_STORE_MISS 0xc0
273 #define CPMU_SYNC_DTLB_MISS 0xc1
274 #define CPMU_SYNC_ST_HIT_YNGR_LD 0xc4
275 #define CPMU_SYNC_BR_ANY_MISP 0xcb
276 #define CPMU_FED_IC_MISS_DEM 0xd3
277 #define CPMU_FED_ITLB_MISS 0xd4
278
279 #endif /* !HAS_EARLY_APPLE_CPMU */
280
281 /* List of counter events that are allowed to be used by 3rd-parties. */
282 static kpc_config_t whitelist[] = {
283 0, /* NO_EVENT */
284
285 CPMU_CORE_CYCLE,
286
287 #if HAS_CPMU_BIU_EVENTS
288 CPMU_BIU_UPSTREAM_CYCLE, CPMU_BIU_DOWNSTREAM_CYCLE,
289 #endif /* HAS_CPMU_BIU_EVENTS */
290
291 #if HAS_CPMU_L2C_EVENTS
292 CPMU_L2C_AGENT_LD, CPMU_L2C_AGENT_LD_MISS, CPMU_L2C_AGENT_ST,
293 CPMU_L2C_AGENT_ST_MISS,
294 #endif /* HAS_CPMU_L2C_EVENTS */
295
296 CPMU_INST_A32, CPMU_INST_THUMB, CPMU_INST_A64, CPMU_INST_BRANCH,
297 CPMU_SYNC_DC_LOAD_MISS, CPMU_SYNC_DC_STORE_MISS,
298 CPMU_SYNC_DTLB_MISS, CPMU_SYNC_ST_HIT_YNGR_LD,
299 CPMU_SYNC_BR_ANY_MISP, CPMU_FED_IC_MISS_DEM, CPMU_FED_ITLB_MISS,
300 };
301 #define WHITELIST_COUNT (sizeof(whitelist) / sizeof(whitelist[0]))
302 #define EVENT_MASK 0xff
303
304 static bool
305 config_in_whitelist(kpc_config_t cfg)
306 {
307 for (unsigned int i = 0; i < WHITELIST_COUNT; i++) {
308 /* Strip off any EL configuration bits -- just look at the event. */
309 if ((cfg & EVENT_MASK) == whitelist[i]) {
310 return true;
311 }
312 }
313 return false;
314 }
315
316 #ifdef KPC_DEBUG
317 static void
318 dump_regs(void)
319 {
320 uint64_t val;
321 kprintf("PMCR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR0));
322 kprintf("PMCR1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR1));
323 kprintf("PMCR2 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR2));
324 kprintf("PMCR3 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR3));
325 kprintf("PMCR4 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMCR4));
326 kprintf("PMESR0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMESR0));
327 kprintf("PMESR1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMESR1));
328
329 kprintf("PMC0 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC0));
330 kprintf("PMC1 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC1));
331 kprintf("PMC2 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC2));
332 kprintf("PMC3 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC3));
333 kprintf("PMC4 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC4));
334 kprintf("PMC5 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC5));
335 kprintf("PMC6 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC6));
336 kprintf("PMC7 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC7));
337
338 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
339 kprintf("PMC8 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC8));
340 kprintf("PMC9 = 0x%" PRIx64 "\n", SREG_READ(SREG_PMC9));
341 #endif
342 }
343 #endif
344
345 static boolean_t
346 enable_counter(uint32_t counter)
347 {
348 uint64_t pmcr0 = 0;
349 boolean_t counter_running, pmi_enabled, enabled;
350
351 pmcr0 = SREG_READ(SREG_PMCR0) | 0x3 /* leave the fixed counters enabled for monotonic */;
352
353 counter_running = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
354 pmi_enabled = (pmcr0 & PMCR0_PMI_ENABLE_MASK(counter)) != 0;
355
356 enabled = counter_running && pmi_enabled;
357
358 if (!enabled) {
359 pmcr0 |= PMCR0_PMC_ENABLE_MASK(counter);
360 pmcr0 |= PMCR0_PMI_ENABLE_MASK(counter);
361 SREG_WRITE(SREG_PMCR0, pmcr0);
362 }
363
364 return enabled;
365 }
366
367 static boolean_t
368 disable_counter(uint32_t counter)
369 {
370 uint64_t pmcr0;
371 boolean_t enabled;
372
373 if (counter < 2) {
374 return true;
375 }
376
377 pmcr0 = SREG_READ(SREG_PMCR0) | 0x3;
378 enabled = (pmcr0 & PMCR0_PMC_ENABLE_MASK(counter)) != 0;
379
380 if (enabled) {
381 pmcr0 &= PMCR0_PMC_DISABLE_MASK(counter);
382 SREG_WRITE(SREG_PMCR0, pmcr0);
383 }
384
385 return enabled;
386 }
387
388 /*
389 * Enable counter in processor modes determined by configuration word.
390 */
391 static void
392 set_modes(uint32_t counter, kpc_config_t cfgword)
393 {
394 uint64_t bits = 0;
395 int cpuid = cpu_number();
396
397 if (cfgword & CFGWORD_EL0A32EN_MASK) {
398 bits |= PMCR1_EL0_A32_ENABLE_MASK(counter);
399 }
400 if (cfgword & CFGWORD_EL0A64EN_MASK) {
401 bits |= PMCR1_EL0_A64_ENABLE_MASK(counter);
402 }
403 if (cfgword & CFGWORD_EL1EN_MASK) {
404 bits |= PMCR1_EL1_A64_ENABLE_MASK(counter);
405 }
406 #if !NO_MONITOR
407 if (cfgword & CFGWORD_EL3EN_MASK) {
408 bits |= PMCR1_EL3_A64_ENABLE_MASK(counter);
409 }
410 #endif
411
412 /*
413 * Backwards compatibility: Writing a non-zero configuration word with
414 * all zeros in bits 16-19 is interpreted as enabling in all modes.
415 * This matches the behavior when the PMCR1 bits weren't exposed.
416 */
417 if (bits == 0 && cfgword != 0) {
418 bits = PMCR1_EL_ALL_ENABLE_MASK(counter);
419 }
420
421 uint64_t pmcr1 = SREG_READ(SREG_PMCR1);
422 pmcr1 &= PMCR1_EL_ALL_DISABLE_MASK(counter);
423 pmcr1 |= bits;
424 pmcr1 |= 0x30303; /* monotonic compatibility */
425 SREG_WRITE(SREG_PMCR1, pmcr1);
426 saved_PMCR[cpuid][1] = pmcr1;
427 }
428
429 static uint64_t
430 read_counter(uint32_t counter)
431 {
432 switch (counter) {
433 // case 0: return SREG_READ(SREG_PMC0);
434 // case 1: return SREG_READ(SREG_PMC1);
435 case 2: return SREG_READ(SREG_PMC2);
436 case 3: return SREG_READ(SREG_PMC3);
437 case 4: return SREG_READ(SREG_PMC4);
438 case 5: return SREG_READ(SREG_PMC5);
439 case 6: return SREG_READ(SREG_PMC6);
440 case 7: return SREG_READ(SREG_PMC7);
441 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
442 case 8: return SREG_READ(SREG_PMC8);
443 case 9: return SREG_READ(SREG_PMC9);
444 #endif
445 default: return 0;
446 }
447 }
448
449 static void
450 write_counter(uint32_t counter, uint64_t value)
451 {
452 switch (counter) {
453 // case 0: SREG_WRITE(SREG_PMC0, value); break;
454 // case 1: SREG_WRITE(SREG_PMC1, value); break;
455 case 2: SREG_WRITE(SREG_PMC2, value); break;
456 case 3: SREG_WRITE(SREG_PMC3, value); break;
457 case 4: SREG_WRITE(SREG_PMC4, value); break;
458 case 5: SREG_WRITE(SREG_PMC5, value); break;
459 case 6: SREG_WRITE(SREG_PMC6, value); break;
460 case 7: SREG_WRITE(SREG_PMC7, value); break;
461 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
462 case 8: SREG_WRITE(SREG_PMC8, value); break;
463 case 9: SREG_WRITE(SREG_PMC9, value); break;
464 #endif
465 default: break;
466 }
467 }
468
469 uint32_t
470 kpc_rawpmu_config_count(void)
471 {
472 return RAWPMU_CONFIG_COUNT;
473 }
474
475 int
476 kpc_get_rawpmu_config(kpc_config_t *configv)
477 {
478 configv[0] = SREG_READ(SREG_PMCR2);
479 configv[1] = SREG_READ(SREG_PMCR3);
480 configv[2] = SREG_READ(SREG_PMCR4);
481 configv[3] = SREG_READ(SREG_OPMAT0);
482 configv[4] = SREG_READ(SREG_OPMAT1);
483 configv[5] = SREG_READ(SREG_OPMSK0);
484 configv[6] = SREG_READ(SREG_OPMSK1);
485 #if RAWPMU_CONFIG_COUNT > 7
486 configv[7] = SREG_READ(SREG_PMMMAP);
487 configv[8] = SREG_READ(SREG_PMTRHLD2);
488 configv[9] = SREG_READ(SREG_PMTRHLD4);
489 configv[10] = SREG_READ(SREG_PMTRHLD6);
490 #endif
491 return 0;
492 }
493
494 static int
495 kpc_set_rawpmu_config(kpc_config_t *configv)
496 {
497 SREG_WRITE(SREG_PMCR2, configv[0]);
498 SREG_WRITE(SREG_PMCR3, configv[1]);
499 SREG_WRITE(SREG_PMCR4, configv[2]);
500 SREG_WRITE(SREG_OPMAT0, configv[3]);
501 SREG_WRITE(SREG_OPMAT1, configv[4]);
502 SREG_WRITE(SREG_OPMSK0, configv[5]);
503 SREG_WRITE(SREG_OPMSK1, configv[6]);
504 #if RAWPMU_CONFIG_COUNT > 7
505 SREG_WRITE(SREG_PMMMAP, configv[7]);
506 SREG_WRITE(SREG_PMTRHLD2, configv[8]);
507 SREG_WRITE(SREG_PMTRHLD4, configv[9]);
508 SREG_WRITE(SREG_PMTRHLD6, configv[10]);
509 #endif
510 return 0;
511 }
512
513 static void
514 save_regs(void)
515 {
516 int cpuid = cpu_number();
517
518 __asm__ volatile ("dmb ish");
519
520 assert(ml_get_interrupts_enabled() == FALSE);
521
522 /* Save event selections. */
523 saved_PMESR[cpuid][0] = SREG_READ(SREG_PMESR0);
524 saved_PMESR[cpuid][1] = SREG_READ(SREG_PMESR1);
525
526 kpc_get_rawpmu_config(saved_RAWPMU[cpuid]);
527
528 /* Disable the counters. */
529 // SREG_WRITE(SREG_PMCR0, clear);
530
531 /* Finally, save state for each counter*/
532 for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
533 saved_counter[cpuid][i] = read_counter(i);
534 }
535 }
536
537 static void
538 restore_regs(void)
539 {
540 int cpuid = cpu_number();
541
542 /* Restore PMESR values. */
543 SREG_WRITE(SREG_PMESR0, saved_PMESR[cpuid][0]);
544 SREG_WRITE(SREG_PMESR1, saved_PMESR[cpuid][1]);
545
546 kpc_set_rawpmu_config(saved_RAWPMU[cpuid]);
547
548 /* Restore counter values */
549 for (int i = 2; i < KPC_ARM64_PMC_COUNT; i++) {
550 write_counter(i, saved_counter[cpuid][i]);
551 }
552
553 /* Restore PMCR0/1 values (with PMCR0 last to enable). */
554 SREG_WRITE(SREG_PMCR1, saved_PMCR[cpuid][1] | 0x30303);
555 }
556
557 static uint64_t
558 get_counter_config(uint32_t counter)
559 {
560 uint64_t pmesr;
561
562 switch (counter) {
563 case 2: /* FALLTHROUGH */
564 case 3: /* FALLTHROUGH */
565 case 4: /* FALLTHROUGH */
566 case 5:
567 pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR0), counter, 2);
568 break;
569 case 6: /* FALLTHROUGH */
570 case 7:
571 #if (KPC_ARM64_CONFIGURABLE_COUNT > 6)
572 /* FALLTHROUGH */
573 case 8: /* FALLTHROUGH */
574 case 9:
575 #endif
576 pmesr = PMESR_EVT_DECODE(SREG_READ(SREG_PMESR1), counter, 6);
577 break;
578 default:
579 pmesr = 0;
580 break;
581 }
582
583 kpc_config_t config = pmesr;
584
585 uint64_t pmcr1 = SREG_READ(SREG_PMCR1);
586
587 if (pmcr1 & PMCR1_EL0_A32_ENABLE_MASK(counter)) {
588 config |= CFGWORD_EL0A32EN_MASK;
589 }
590 if (pmcr1 & PMCR1_EL0_A64_ENABLE_MASK(counter)) {
591 config |= CFGWORD_EL0A64EN_MASK;
592 }
593 if (pmcr1 & PMCR1_EL1_A64_ENABLE_MASK(counter)) {
594 config |= CFGWORD_EL1EN_MASK;
595 #if NO_MONITOR
596 config |= CFGWORD_EL3EN_MASK;
597 #endif
598 }
599 #if !NO_MONITOR
600 if (pmcr1 & PMCR1_EL3_A64_ENABLE_MASK(counter)) {
601 config |= CFGWORD_EL3EN_MASK;
602 }
603 #endif
604
605 return config;
606 }
607
608 static void
609 set_counter_config(uint32_t counter, uint64_t config)
610 {
611 int cpuid = cpu_number();
612 uint64_t pmesr = 0;
613
614 switch (counter) {
615 case 2: /* FALLTHROUGH */
616 case 3: /* FALLTHROUGH */
617 case 4: /* FALLTHROUGH */
618 case 5:
619 pmesr = SREG_READ(SREG_PMESR0);
620 pmesr &= PMESR_EVT_CLEAR(counter, 2);
621 pmesr |= PMESR_EVT_ENCODE(config, counter, 2);
622 SREG_WRITE(SREG_PMESR0, pmesr);
623 saved_PMESR[cpuid][0] = pmesr;
624 break;
625
626 case 6: /* FALLTHROUGH */
627 case 7:
628 #if KPC_ARM64_CONFIGURABLE_COUNT > 6
629 /* FALLTHROUGH */
630 case 8: /* FALLTHROUGH */
631 case 9:
632 #endif
633 pmesr = SREG_READ(SREG_PMESR1);
634 pmesr &= PMESR_EVT_CLEAR(counter, 6);
635 pmesr |= PMESR_EVT_ENCODE(config, counter, 6);
636 SREG_WRITE(SREG_PMESR1, pmesr);
637 saved_PMESR[cpuid][1] = pmesr;
638 break;
639 default:
640 break;
641 }
642
643 set_modes(counter, config);
644 }
645
646 /* internal functions */
647
648 void
649 kpc_arch_init(void)
650 {
651 }
652
653 boolean_t
654 kpc_is_running_fixed(void)
655 {
656 return (kpc_running_classes & KPC_CLASS_FIXED_MASK) == KPC_CLASS_FIXED_MASK;
657 }
658
659 boolean_t
660 kpc_is_running_configurable(uint64_t pmc_mask)
661 {
662 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
663 return ((kpc_running_classes & KPC_CLASS_CONFIGURABLE_MASK) == KPC_CLASS_CONFIGURABLE_MASK) &&
664 ((kpc_running_cfg_pmc_mask & pmc_mask) == pmc_mask);
665 }
666
667 uint32_t
668 kpc_fixed_count(void)
669 {
670 return KPC_ARM64_FIXED_COUNT;
671 }
672
673 uint32_t
674 kpc_configurable_count(void)
675 {
676 return KPC_ARM64_CONFIGURABLE_COUNT;
677 }
678
679 uint32_t
680 kpc_fixed_config_count(void)
681 {
682 return 0;
683 }
684
685 uint32_t
686 kpc_configurable_config_count(uint64_t pmc_mask)
687 {
688 assert(kpc_popcount(pmc_mask) <= kpc_configurable_count());
689 return kpc_popcount(pmc_mask);
690 }
691
692 int
693 kpc_get_fixed_config(kpc_config_t *configv __unused)
694 {
695 return 0;
696 }
697
698 uint64_t
699 kpc_fixed_max(void)
700 {
701 return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
702 }
703
704 uint64_t
705 kpc_configurable_max(void)
706 {
707 return (1ULL << KPC_ARM64_COUNTER_WIDTH) - 1;
708 }
709
710 static void
711 set_running_configurable(uint64_t target_mask, uint64_t state_mask)
712 {
713 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
714 boolean_t enabled;
715
716 enabled = ml_set_interrupts_enabled(FALSE);
717
718 for (uint32_t i = 0; i < cfg_count; ++i) {
719 if (((1ULL << i) & target_mask) == 0) {
720 continue;
721 }
722 assert(kpc_controls_counter(offset + i));
723
724 if ((1ULL << i) & state_mask) {
725 enable_counter(offset + i);
726 } else {
727 disable_counter(offset + i);
728 }
729 }
730
731 ml_set_interrupts_enabled(enabled);
732 }
733
734 static uint32_t kpc_xcall_sync;
735 static void
736 kpc_set_running_xcall( void *vstate )
737 {
738 struct kpc_running_remote *mp_config = (struct kpc_running_remote*) vstate;
739 assert(mp_config);
740
741 set_running_configurable(mp_config->cfg_target_mask,
742 mp_config->cfg_state_mask);
743
744 if (os_atomic_dec(&kpc_xcall_sync, relaxed) == 0) {
745 thread_wakeup((event_t) &kpc_xcall_sync);
746 }
747 }
748
749 static uint32_t kpc_xread_sync;
750 static void
751 kpc_get_curcpu_counters_xcall(void *args)
752 {
753 struct kpc_get_counters_remote *handler = args;
754
755 assert(handler != NULL);
756 assert(handler->buf != NULL);
757
758 int offset = cpu_number() * handler->buf_stride;
759 int r = kpc_get_curcpu_counters(handler->classes, NULL, &handler->buf[offset]);
760
761 /* number of counters added by this CPU, needs to be atomic */
762 os_atomic_add(&(handler->nb_counters), r, relaxed);
763
764 if (os_atomic_dec(&kpc_xread_sync, relaxed) == 0) {
765 thread_wakeup((event_t) &kpc_xread_sync);
766 }
767 }
768
769 int
770 kpc_get_all_cpus_counters(uint32_t classes, int *curcpu, uint64_t *buf)
771 {
772 assert(buf != NULL);
773
774 int enabled = ml_set_interrupts_enabled(FALSE);
775
776 /* grab counters and CPU number as close as possible */
777 if (curcpu) {
778 *curcpu = cpu_number();
779 }
780
781 struct kpc_get_counters_remote hdl = {
782 .classes = classes,
783 .nb_counters = 0,
784 .buf = buf,
785 .buf_stride = kpc_get_counter_count(classes)
786 };
787
788 cpu_broadcast_xcall(&kpc_xread_sync, TRUE, kpc_get_curcpu_counters_xcall, &hdl);
789 int offset = hdl.nb_counters;
790
791 (void)ml_set_interrupts_enabled(enabled);
792
793 return offset;
794 }
795
796 int
797 kpc_get_fixed_counters(uint64_t *counterv)
798 {
799 #if MONOTONIC
800 mt_fixed_counts(counterv);
801 return 0;
802 #else /* MONOTONIC */
803 #pragma unused(counterv)
804 return ENOTSUP;
805 #endif /* !MONOTONIC */
806 }
807
808 int
809 kpc_get_configurable_counters(uint64_t *counterv, uint64_t pmc_mask)
810 {
811 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
812 uint64_t ctr = 0ULL;
813
814 assert(counterv);
815
816 for (uint32_t i = 0; i < cfg_count; ++i) {
817 if (((1ULL << i) & pmc_mask) == 0) {
818 continue;
819 }
820 ctr = read_counter(i + offset);
821
822 if (ctr & KPC_ARM64_COUNTER_OVF_MASK) {
823 ctr = CONFIGURABLE_SHADOW(i) +
824 (kpc_configurable_max() - CONFIGURABLE_RELOAD(i) + 1 /* Wrap */) +
825 (ctr & KPC_ARM64_COUNTER_MASK);
826 } else {
827 ctr = CONFIGURABLE_SHADOW(i) +
828 (ctr - CONFIGURABLE_RELOAD(i));
829 }
830
831 *counterv++ = ctr;
832 }
833
834 return 0;
835 }
836
837 int
838 kpc_get_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
839 {
840 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
841
842 assert(configv);
843
844 for (uint32_t i = 0; i < cfg_count; ++i) {
845 if ((1ULL << i) & pmc_mask) {
846 *configv++ = get_counter_config(i + offset);
847 }
848 }
849 return 0;
850 }
851
852 static int
853 kpc_set_configurable_config(kpc_config_t *configv, uint64_t pmc_mask)
854 {
855 uint32_t cfg_count = kpc_configurable_count(), offset = kpc_fixed_count();
856 boolean_t enabled;
857
858 assert(configv);
859
860 enabled = ml_set_interrupts_enabled(FALSE);
861
862 for (uint32_t i = 0; i < cfg_count; ++i) {
863 if (((1ULL << i) & pmc_mask) == 0) {
864 continue;
865 }
866 assert(kpc_controls_counter(i + offset));
867
868 set_counter_config(i + offset, *configv++);
869 }
870
871 ml_set_interrupts_enabled(enabled);
872
873 return 0;
874 }
875
876 static uint32_t kpc_config_sync;
877 static void
878 kpc_set_config_xcall(void *vmp_config)
879 {
880 struct kpc_config_remote *mp_config = vmp_config;
881 kpc_config_t *new_config = NULL;
882 uint32_t classes = 0ULL;
883
884 assert(mp_config);
885 assert(mp_config->configv);
886 classes = mp_config->classes;
887 new_config = mp_config->configv;
888
889 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
890 kpc_set_configurable_config(new_config, mp_config->pmc_mask);
891 new_config += kpc_popcount(mp_config->pmc_mask);
892 }
893
894 if (classes & KPC_CLASS_RAWPMU_MASK) {
895 kpc_set_rawpmu_config(new_config);
896 new_config += RAWPMU_CONFIG_COUNT;
897 }
898
899 if (os_atomic_dec(&kpc_config_sync, relaxed) == 0) {
900 thread_wakeup((event_t) &kpc_config_sync);
901 }
902 }
903
904 static uint64_t
905 kpc_reload_counter(uint32_t ctr)
906 {
907 assert(ctr < (kpc_configurable_count() + kpc_fixed_count()));
908
909 uint64_t old = read_counter(ctr);
910
911 if (kpc_controls_counter(ctr)) {
912 write_counter(ctr, FIXED_RELOAD(ctr));
913 return old & KPC_ARM64_COUNTER_MASK;
914 } else {
915 /*
916 * Unset the overflow bit to clear the condition that drives
917 * PMIs. The power manager is not interested in handling PMIs.
918 */
919 write_counter(ctr, old & KPC_ARM64_COUNTER_MASK);
920 return 0;
921 }
922 }
923
924 static uint32_t kpc_reload_sync;
925 static void
926 kpc_set_reload_xcall(void *vmp_config)
927 {
928 struct kpc_config_remote *mp_config = vmp_config;
929 uint32_t classes = 0, count = 0, offset = kpc_fixed_count();
930 uint64_t *new_period = NULL, max = kpc_configurable_max();
931 boolean_t enabled;
932
933 assert(mp_config);
934 assert(mp_config->configv);
935 classes = mp_config->classes;
936 new_period = mp_config->configv;
937
938 enabled = ml_set_interrupts_enabled(FALSE);
939
940 if (classes & KPC_CLASS_CONFIGURABLE_MASK) {
941 /*
942 * Update _all_ shadow counters, this cannot be done for only
943 * selected PMCs. Otherwise, we would corrupt the configurable
944 * shadow buffer since the PMCs are muxed according to the pmc
945 * mask.
946 */
947 uint64_t all_cfg_mask = (1ULL << kpc_configurable_count()) - 1;
948 kpc_get_configurable_counters(&CONFIGURABLE_SHADOW(0), all_cfg_mask);
949
950 /* set the new period */
951 count = kpc_configurable_count();
952 for (uint32_t i = 0; i < count; ++i) {
953 /* ignore the counter */
954 if (((1ULL << i) & mp_config->pmc_mask) == 0) {
955 continue;
956 }
957 if (*new_period == 0) {
958 *new_period = kpc_configurable_max();
959 }
960 CONFIGURABLE_RELOAD(i) = max - *new_period;
961 /* reload the counter */
962 kpc_reload_counter(offset + i);
963 /* next period value */
964 new_period++;
965 }
966 }
967
968 ml_set_interrupts_enabled(enabled);
969
970 if (os_atomic_dec(&kpc_reload_sync, relaxed) == 0) {
971 thread_wakeup((event_t) &kpc_reload_sync);
972 }
973 }
974
975 void
976 kpc_pmi_handler(unsigned int ctr)
977 {
978 uint64_t extra = kpc_reload_counter(ctr);
979
980 FIXED_SHADOW(ctr) += (kpc_fixed_max() - FIXED_RELOAD(ctr) + 1 /* Wrap */) + extra;
981
982 if (FIXED_ACTIONID(ctr)) {
983 uintptr_t pc = 0;
984 bool kernel = true;
985 struct arm_saved_state *state;
986 state = getCpuDatap()->cpu_int_state;
987 if (state) {
988 kernel = !PSR64_IS_USER(get_saved_state_cpsr(state));
989 pc = get_saved_state_pc(state);
990 if (kernel) {
991 pc = VM_KERNEL_UNSLIDE(pc);
992 }
993 }
994
995 uint64_t config = get_counter_config(ctr);
996 kperf_kpc_flags_t flags = kernel ? KPC_KERNEL_PC : 0;
997 bool custom_mode = false;
998 if ((config & CFGWORD_EL0A32EN_MASK) || (config & CFGWORD_EL0A64EN_MASK)) {
999 flags |= KPC_USER_COUNTING;
1000 custom_mode = true;
1001 }
1002 if ((config & CFGWORD_EL1EN_MASK)) {
1003 flags |= KPC_KERNEL_COUNTING;
1004 custom_mode = true;
1005 }
1006 /*
1007 * For backwards-compatibility.
1008 */
1009 if (!custom_mode) {
1010 flags |= KPC_USER_COUNTING | KPC_KERNEL_COUNTING;
1011 }
1012 kpc_sample_kperf(FIXED_ACTIONID(ctr), ctr, config & 0xff, FIXED_SHADOW(ctr),
1013 pc, flags);
1014 }
1015 }
1016
1017 uint32_t
1018 kpc_get_classes(void)
1019 {
1020 return KPC_CLASS_FIXED_MASK | KPC_CLASS_CONFIGURABLE_MASK | KPC_CLASS_RAWPMU_MASK;
1021 }
1022
1023 int
1024 kpc_set_running_arch(struct kpc_running_remote *mp_config)
1025 {
1026 assert(mp_config != NULL);
1027
1028 /* dispatch to all CPUs */
1029 cpu_broadcast_xcall(&kpc_xcall_sync, TRUE, kpc_set_running_xcall, mp_config);
1030
1031 kpc_running_cfg_pmc_mask = mp_config->cfg_state_mask;
1032 kpc_running_classes = mp_config->classes;
1033 kpc_configured = 1;
1034
1035 return 0;
1036 }
1037
1038 int
1039 kpc_set_period_arch(struct kpc_config_remote *mp_config)
1040 {
1041 assert(mp_config);
1042
1043 /* dispatch to all CPUs */
1044 cpu_broadcast_xcall(&kpc_reload_sync, TRUE, kpc_set_reload_xcall, mp_config);
1045
1046 kpc_configured = 1;
1047
1048 return 0;
1049 }
1050
1051 int
1052 kpc_set_config_arch(struct kpc_config_remote *mp_config)
1053 {
1054 uint32_t count = kpc_popcount(mp_config->pmc_mask);
1055
1056 assert(mp_config);
1057 assert(mp_config->configv);
1058
1059 /* check config against whitelist for external devs */
1060 for (uint32_t i = 0; i < count; ++i) {
1061 if (!whitelist_disabled && !config_in_whitelist(mp_config->configv[i])) {
1062 return EPERM;
1063 }
1064 }
1065
1066 /* dispatch to all CPUs */
1067 cpu_broadcast_xcall(&kpc_config_sync, TRUE, kpc_set_config_xcall, mp_config);
1068
1069 kpc_configured = 1;
1070
1071 return 0;
1072 }
1073
1074 void
1075 kpc_idle(void)
1076 {
1077 if (kpc_configured) {
1078 save_regs();
1079 }
1080 }
1081
1082 void
1083 kpc_idle_exit(void)
1084 {
1085 if (kpc_configured) {
1086 restore_regs();
1087 }
1088 }
1089
1090 int
1091 kpc_set_sw_inc( uint32_t mask __unused )
1092 {
1093 return ENOTSUP;
1094 }
1095
1096 int
1097 kpc_disable_whitelist( int val )
1098 {
1099 whitelist_disabled = val;
1100 return 0;
1101 }
1102
1103 int
1104 kpc_get_whitelist_disabled( void )
1105 {
1106 return whitelist_disabled;
1107 }
1108
1109 int
1110 kpc_get_pmu_version(void)
1111 {
1112 return KPC_PMU_ARM_APPLE;
1113 }
1114
1115 #else /* APPLE_ARM64_ARCH_FAMILY */
1116
1117 /* We don't currently support non-Apple arm64 PMU configurations like PMUv3 */
1118
1119 void
1120 kpc_arch_init(void)
1121 {
1122 /* No-op */
1123 }
1124
1125 uint32_t
1126 kpc_get_classes(void)
1127 {
1128 return 0;
1129 }
1130
1131 uint32_t
1132 kpc_fixed_count(void)
1133 {
1134 return 0;
1135 }
1136
1137 uint32_t
1138 kpc_configurable_count(void)
1139 {
1140 return 0;
1141 }
1142
1143 uint32_t
1144 kpc_fixed_config_count(void)
1145 {
1146 return 0;
1147 }
1148
1149 uint32_t
1150 kpc_configurable_config_count(uint64_t pmc_mask __unused)
1151 {
1152 return 0;
1153 }
1154
1155 int
1156 kpc_get_fixed_config(kpc_config_t *configv __unused)
1157 {
1158 return 0;
1159 }
1160
1161 uint64_t
1162 kpc_fixed_max(void)
1163 {
1164 return 0;
1165 }
1166
1167 uint64_t
1168 kpc_configurable_max(void)
1169 {
1170 return 0;
1171 }
1172
1173 int
1174 kpc_get_configurable_config(kpc_config_t *configv __unused, uint64_t pmc_mask __unused)
1175 {
1176 return ENOTSUP;
1177 }
1178
1179 int
1180 kpc_get_configurable_counters(uint64_t *counterv __unused, uint64_t pmc_mask __unused)
1181 {
1182 return ENOTSUP;
1183 }
1184
1185 int
1186 kpc_get_fixed_counters(uint64_t *counterv __unused)
1187 {
1188 return 0;
1189 }
1190
1191 boolean_t
1192 kpc_is_running_fixed(void)
1193 {
1194 return FALSE;
1195 }
1196
1197 boolean_t
1198 kpc_is_running_configurable(uint64_t pmc_mask __unused)
1199 {
1200 return FALSE;
1201 }
1202
1203 int
1204 kpc_set_running_arch(struct kpc_running_remote *mp_config __unused)
1205 {
1206 return ENOTSUP;
1207 }
1208
1209 int
1210 kpc_set_period_arch(struct kpc_config_remote *mp_config __unused)
1211 {
1212 return ENOTSUP;
1213 }
1214
1215 int
1216 kpc_set_config_arch(struct kpc_config_remote *mp_config __unused)
1217 {
1218 return ENOTSUP;
1219 }
1220
1221 void
1222 kpc_idle(void)
1223 {
1224 // do nothing
1225 }
1226
1227 void
1228 kpc_idle_exit(void)
1229 {
1230 // do nothing
1231 }
1232
1233 int
1234 kpc_get_all_cpus_counters(uint32_t classes __unused, int *curcpu __unused, uint64_t *buf __unused)
1235 {
1236 return 0;
1237 }
1238
1239 int
1240 kpc_set_sw_inc( uint32_t mask __unused )
1241 {
1242 return ENOTSUP;
1243 }
1244
1245 int
1246 kpc_get_pmu_version(void)
1247 {
1248 return KPC_PMU_ERROR;
1249 }
1250
1251 uint32_t
1252 kpc_rawpmu_config_count(void)
1253 {
1254 return 0;
1255 }
1256
1257 int
1258 kpc_get_rawpmu_config(__unused kpc_config_t *configv)
1259 {
1260 return 0;
1261 }
1262
1263 int
1264 kpc_disable_whitelist( int val __unused )
1265 {
1266 return 0;
1267 }
1268
1269 int
1270 kpc_get_whitelist_disabled( void )
1271 {
1272 return 0;
1273 }
1274
1275 #endif /* !APPLE_ARM64_ARCH_FAMILY */