]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/monotonic_arm64.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / arm64 / monotonic_arm64.c
1 /*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/cpu_data_internal.h>
30 #include <arm/machine_routines.h>
31 #include <arm64/monotonic.h>
32 #include <kern/assert.h> /* static_assert, assert */
33 #include <kern/debug.h> /* panic */
34 #include <kern/monotonic.h>
35 #include <machine/limits.h> /* CHAR_BIT */
36 #include <stdatomic.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <sys/errno.h>
40 #include <sys/monotonic.h>
41 #include <pexpert/arm64/board_config.h>
42 #include <pexpert/pexpert.h>
43
44 #pragma mark core counters
45
46 bool mt_core_supported = true;
47 void mt_fiq_internal(uint64_t upmsr);
48
49 /*
50 * PMC[0-1] are the 48-bit fixed counters -- PMC0 is cycles and PMC1 is
51 * instructions (see arm64/monotonic.h).
52 *
53 * PMC2+ are currently handled by kpc.
54 */
55
56 #define PMC0 "s3_2_c15_c0_0"
57 #define PMC1 "s3_2_c15_c1_0"
58 #define PMC2 "s3_2_c15_c2_0"
59 #define PMC3 "s3_2_c15_c3_0"
60 #define PMC4 "s3_2_c15_c4_0"
61 #define PMC5 "s3_2_c15_c5_0"
62 #define PMC6 "s3_2_c15_c6_0"
63 #define PMC7 "s3_2_c15_c7_0"
64 #define PMC8 "s3_2_c15_c9_0"
65 #define PMC9 "s3_2_c15_c10_0"
66
67 #define CYCLES 0
68 #define INSTRS 1
69
70 /*
71 * PMC0's offset into a core's PIO range.
72 *
73 * This allows cores to remotely query another core's counters.
74 */
75
76 #define PIO_PMC0_OFFSET (0x200)
77
78 /*
79 * The offset of the counter in the configuration registers. Post-Hurricane
80 * devices have additional counters that need a larger shift than the original
81 * counters.
82 *
83 * XXX For now, just support the lower-numbered counters.
84 */
85 #define CTR_POS(CTR) (CTR)
86
87 /*
88 * PMCR0 is the main control register for the performance monitor. It
89 * controls whether the counters are enabled, how they deliver interrupts, and
90 * other features.
91 */
92
93 #define PMCR0 "s3_1_c15_c0_0"
94
95 #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
96 #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
97 /* how interrupts are delivered on a PMI */
98 enum {
99 PMCR0_INTGEN_OFF = 0,
100 PMCR0_INTGEN_PMI = 1,
101 PMCR0_INTGEN_AIC = 2,
102 PMCR0_INTGEN_HALT = 3,
103 PMCR0_INTGEN_FIQ = 4,
104 };
105 #define PMCR0_INTGEN_SET(INT) ((uint64_t)(INT) << 8)
106 /* use AIC for backwards compatibility with kpc */
107 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_AIC)
108 /* set by hardware if a PMI was delivered */
109 #define PMCR0_PMAI (UINT64_C(1) << 11)
110 #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (12 + CTR_POS(CTR)))
111 /* fixed counters are always counting XXX probably need to just set this to all true */
112 #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
113 /* disable counting on a PMI (except for AIC interrupts) */
114 #define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
115 /* block PMIs until ERET retires */
116 #define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
117 /* count global (not just core-local) L2C events */
118 #define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
119 /* user mode access to configuration registers */
120 #define PMCR0_USEREN_EN (UINT64_C(1) << 30)
121
122 /* XXX this needs to be synchronized with kpc... */
123 #define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_DISCNT_EN)
124
125 /*
126 * PMCR1 controls which execution modes count events.
127 */
128
129 #define PMCR1 "s3_1_c15_c1_0"
130
131 #define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
132 #define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
133 #define PMCR1_EL1A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
134 /* PMCR1_EL3A64 is not supported on systems with no monitor */
135 #if defined(APPLEHURRICANE)
136 #define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
137 #else
138 #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
139 #endif
140 #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
141 PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
142
143 /* fixed counters always count in all modes */
144 #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
145
146 static inline void
147 core_init_execution_modes(void)
148 {
149 uint64_t pmcr1;
150
151 pmcr1 = __builtin_arm_rsr64(PMCR1);
152 pmcr1 |= PMCR1_INIT;
153 __builtin_arm_wsr64(PMCR1, pmcr1);
154 }
155
156 /*
157 * PMSR reports the overflow status of all counters.
158 */
159
160 #define PMSR "s3_1_c15_c13_0"
161
162 #define PMSR_OVF(CTR) (UINT64_C(1) << (CTR))
163
164 /*
165 * PMCR2 controls watchpoint registers.
166 *
167 * PMCR3 controls breakpoints and address matching.
168 *
169 * PMCR4 controls opcode matching.
170 */
171
172 #define PMCR2 "s3_1_c15_c2_0"
173 #define PMCR3 "s3_1_c15_c3_0"
174 #define PMCR4 "s3_1_c15_c4_0"
175
176 /*
177 * PMCR_AFFINITY does ??? XXX.
178 */
179
180 #define PMCR_AFFINITY "s3_1_c15_c11_0"
181
182 void
183 mt_init(void)
184 {
185 }
186
187 static int
188 core_init(void)
189 {
190 /* the dev node interface to the core counters is still unsupported */
191 return ENOTSUP;
192 }
193
194 struct mt_cpu *
195 mt_cur_cpu(void)
196 {
197 return &getCpuDatap()->cpu_monotonic;
198 }
199
200 uint64_t
201 mt_core_snap(unsigned int ctr)
202 {
203 switch (ctr) {
204 case 0:
205 return __builtin_arm_rsr64(PMC0);
206 case 1:
207 return __builtin_arm_rsr64(PMC1);
208 default:
209 panic("monotonic: invalid core counter read: %u", ctr);
210 __builtin_trap();
211 }
212 }
213
214 void
215 mt_core_set_snap(unsigned int ctr, uint64_t count)
216 {
217 switch (ctr) {
218 case 0:
219 __builtin_arm_wsr64(PMC0, count);
220 break;
221 case 1:
222 __builtin_arm_wsr64(PMC1, count);
223 break;
224 default:
225 panic("monotonic: invalid core counter %u write %llu", ctr, count);
226 __builtin_trap();
227 }
228 }
229
230 static void
231 core_set_enabled(void)
232 {
233 uint64_t pmcr0;
234
235 pmcr0 = __builtin_arm_rsr64(PMCR0);
236 pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
237 __builtin_arm_wsr64(PMCR0, pmcr0);
238 }
239
240 static void
241 core_idle(__unused cpu_data_t *cpu)
242 {
243 assert(cpu != NULL);
244 assert(ml_get_interrupts_enabled() == FALSE);
245
246 #if DEBUG
247 uint64_t pmcr0 = __builtin_arm_rsr64(PMCR0);
248 if ((pmcr0 & PMCR0_FIXED_EN) == 0) {
249 panic("monotonic: counters disabled while idling, pmcr0 = 0x%llx\n", pmcr0);
250 }
251 uint64_t pmcr1 = __builtin_arm_rsr64(PMCR1);
252 if ((pmcr1 & PMCR1_INIT) == 0) {
253 panic("monotonic: counter modes disabled while idling, pmcr1 = 0x%llx\n", pmcr1);
254 }
255 #endif /* DEBUG */
256
257 /* disable counters before updating */
258 __builtin_arm_wsr64(PMCR0, PMCR0_INIT);
259
260 mt_update_fixed_counts();
261 }
262
263 static void
264 core_run(cpu_data_t *cpu)
265 {
266 uint64_t pmcr0;
267 struct mt_cpu *mtc;
268
269 assert(cpu != NULL);
270 assert(ml_get_interrupts_enabled() == FALSE);
271
272 mtc = &cpu->cpu_monotonic;
273
274 for (int i = 0; i < MT_CORE_NFIXED; i++) {
275 mt_core_set_snap(i, mtc->mtc_snaps[i]);
276 }
277
278 /* re-enable the counters */
279 core_init_execution_modes();
280
281 pmcr0 = __builtin_arm_rsr64(PMCR0);
282 pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
283 __builtin_arm_wsr64(PMCR0, pmcr0);
284 }
285
286 static void
287 core_up(__unused cpu_data_t *cpu)
288 {
289 assert(ml_get_interrupts_enabled() == FALSE);
290
291 core_init_execution_modes();
292 }
293
294 #pragma mark uncore counters
295
296
297 static void
298 uncore_sleep(void)
299 {
300 }
301
302 static void
303 uncore_wake(void)
304 {
305 }
306
307 static void
308 uncore_fiq(uint64_t upmsr)
309 {
310 #pragma unused(upmsr)
311 }
312
313 #pragma mark common hooks
314
315 void
316 mt_cpu_idle(cpu_data_t *cpu)
317 {
318 core_idle(cpu);
319 }
320
321 void
322 mt_cpu_run(cpu_data_t *cpu)
323 {
324 core_run(cpu);
325 }
326
327 void
328 mt_cpu_down(cpu_data_t *cpu)
329 {
330 mt_cpu_idle(cpu);
331 }
332
333 void
334 mt_cpu_up(cpu_data_t *cpu)
335 {
336 core_up(cpu);
337 mt_cpu_run(cpu);
338 }
339
340 void
341 mt_sleep(void)
342 {
343 uncore_sleep();
344 }
345
346 void
347 mt_wake(void)
348 {
349 uncore_wake();
350 }
351
352 void
353 mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmsr)
354 {
355 bool found_overflow = false;
356
357 assert(cpu != NULL);
358 assert(ml_get_interrupts_enabled() == FALSE);
359
360 (void)atomic_fetch_add_explicit(&mt_pmis, 1, memory_order_relaxed);
361
362 for (int i = 0; i < MT_CORE_NFIXED; i++) {
363 if (pmsr & PMSR_OVF(i)) {
364 mt_cpu_update_count(cpu, i);
365 mt_core_set_snap(i, 0);
366 found_overflow = true;
367 }
368 }
369
370 assert(found_overflow);
371 core_set_enabled();
372 }
373
374 void
375 mt_fiq_internal(uint64_t upmsr)
376 {
377 uncore_fiq(upmsr);
378 }
379
380 #pragma mark dev nodes
381
382 const struct monotonic_dev monotonic_devs[] = {
383 [0] = {
384 .mtd_name = "monotonic/core",
385 .mtd_init = core_init,
386 },
387 };
388
389 static_assert(
390 (sizeof(monotonic_devs) / sizeof(monotonic_devs[0])) == MT_NDEVS,
391 "MT_NDEVS macro should be same as the length of monotonic_devs");