]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/monotonic_arm64.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / arm64 / monotonic_arm64.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <arm/cpu_data_internal.h>
30#include <arm/machine_routines.h>
31#include <arm64/monotonic.h>
d9a64523 32#include <kern/assert.h>
5ba3f43e
A
33#include <kern/debug.h> /* panic */
34#include <kern/monotonic.h>
35#include <machine/limits.h> /* CHAR_BIT */
36#include <stdatomic.h>
37#include <stdint.h>
38#include <string.h>
39#include <sys/errno.h>
40#include <sys/monotonic.h>
41#include <pexpert/arm64/board_config.h>
d9a64523 42#include <pexpert/device_tree.h> /* DTFindEntry */
5ba3f43e
A
43#include <pexpert/pexpert.h>
44
45#pragma mark core counters
46
47bool mt_core_supported = true;
5ba3f43e
A
48
49/*
50 * PMC[0-1] are the 48-bit fixed counters -- PMC0 is cycles and PMC1 is
51 * instructions (see arm64/monotonic.h).
52 *
53 * PMC2+ are currently handled by kpc.
54 */
55
56#define PMC0 "s3_2_c15_c0_0"
57#define PMC1 "s3_2_c15_c1_0"
58#define PMC2 "s3_2_c15_c2_0"
59#define PMC3 "s3_2_c15_c3_0"
60#define PMC4 "s3_2_c15_c4_0"
61#define PMC5 "s3_2_c15_c5_0"
62#define PMC6 "s3_2_c15_c6_0"
63#define PMC7 "s3_2_c15_c7_0"
64#define PMC8 "s3_2_c15_c9_0"
65#define PMC9 "s3_2_c15_c10_0"
66
d9a64523
A
67#define CTR_MAX ((UINT64_C(1) << 47) - 1)
68
5ba3f43e
A
69#define CYCLES 0
70#define INSTRS 1
71
72/*
73 * PMC0's offset into a core's PIO range.
74 *
75 * This allows cores to remotely query another core's counters.
76 */
77
78#define PIO_PMC0_OFFSET (0x200)
79
80/*
81 * The offset of the counter in the configuration registers. Post-Hurricane
82 * devices have additional counters that need a larger shift than the original
83 * counters.
84 *
85 * XXX For now, just support the lower-numbered counters.
86 */
87#define CTR_POS(CTR) (CTR)
88
89/*
90 * PMCR0 is the main control register for the performance monitor. It
91 * controls whether the counters are enabled, how they deliver interrupts, and
92 * other features.
93 */
94
95#define PMCR0 "s3_1_c15_c0_0"
96
97#define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
98#define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
99/* how interrupts are delivered on a PMI */
100enum {
101 PMCR0_INTGEN_OFF = 0,
102 PMCR0_INTGEN_PMI = 1,
103 PMCR0_INTGEN_AIC = 2,
104 PMCR0_INTGEN_HALT = 3,
105 PMCR0_INTGEN_FIQ = 4,
106};
107#define PMCR0_INTGEN_SET(INT) ((uint64_t)(INT) << 8)
d9a64523 108#define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ)
5ba3f43e
A
109/* set by hardware if a PMI was delivered */
110#define PMCR0_PMAI (UINT64_C(1) << 11)
111#define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (12 + CTR_POS(CTR)))
d9a64523 112/* fixed counters are always counting */
5ba3f43e 113#define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
d9a64523 114/* disable counting on a PMI */
5ba3f43e
A
115#define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
116/* block PMIs until ERET retires */
117#define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
118/* count global (not just core-local) L2C events */
119#define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
120/* user mode access to configuration registers */
121#define PMCR0_USEREN_EN (UINT64_C(1) << 30)
122
5ba3f43e
A
123#define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_DISCNT_EN)
124
125/*
126 * PMCR1 controls which execution modes count events.
127 */
128
129#define PMCR1 "s3_1_c15_c1_0"
130
131#define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
132#define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
133#define PMCR1_EL1A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
134/* PMCR1_EL3A64 is not supported on systems with no monitor */
135#if defined(APPLEHURRICANE)
136#define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
137#else
138#define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
139#endif
140#define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
141 PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
142
143/* fixed counters always count in all modes */
144#define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
145
146static inline void
147core_init_execution_modes(void)
148{
149 uint64_t pmcr1;
150
151 pmcr1 = __builtin_arm_rsr64(PMCR1);
152 pmcr1 |= PMCR1_INIT;
153 __builtin_arm_wsr64(PMCR1, pmcr1);
154}
155
5ba3f43e
A
156/*
157 * PMCR2 controls watchpoint registers.
158 *
159 * PMCR3 controls breakpoints and address matching.
160 *
161 * PMCR4 controls opcode matching.
162 */
163
164#define PMCR2 "s3_1_c15_c2_0"
165#define PMCR3 "s3_1_c15_c3_0"
166#define PMCR4 "s3_1_c15_c4_0"
167
d9a64523 168#define PMSR_OVF(CTR) (1ULL << (CTR))
5ba3f43e
A
169
170void
d9a64523 171mt_early_init(void)
5ba3f43e
A
172{
173}
174
175static int
d9a64523 176core_init(__unused mt_device_t dev)
5ba3f43e
A
177{
178 /* the dev node interface to the core counters is still unsupported */
179 return ENOTSUP;
180}
181
182struct mt_cpu *
183mt_cur_cpu(void)
184{
185 return &getCpuDatap()->cpu_monotonic;
186}
187
188uint64_t
189mt_core_snap(unsigned int ctr)
190{
191 switch (ctr) {
192 case 0:
193 return __builtin_arm_rsr64(PMC0);
194 case 1:
195 return __builtin_arm_rsr64(PMC1);
196 default:
197 panic("monotonic: invalid core counter read: %u", ctr);
d9a64523 198 __builtin_unreachable();
5ba3f43e
A
199 }
200}
201
202void
203mt_core_set_snap(unsigned int ctr, uint64_t count)
204{
205 switch (ctr) {
206 case 0:
207 __builtin_arm_wsr64(PMC0, count);
208 break;
209 case 1:
210 __builtin_arm_wsr64(PMC1, count);
211 break;
212 default:
213 panic("monotonic: invalid core counter %u write %llu", ctr, count);
d9a64523 214 __builtin_unreachable();
5ba3f43e
A
215 }
216}
217
218static void
219core_set_enabled(void)
220{
221 uint64_t pmcr0;
222
223 pmcr0 = __builtin_arm_rsr64(PMCR0);
224 pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
225 __builtin_arm_wsr64(PMCR0, pmcr0);
226}
227
228static void
229core_idle(__unused cpu_data_t *cpu)
230{
231 assert(cpu != NULL);
232 assert(ml_get_interrupts_enabled() == FALSE);
233
234#if DEBUG
235 uint64_t pmcr0 = __builtin_arm_rsr64(PMCR0);
236 if ((pmcr0 & PMCR0_FIXED_EN) == 0) {
237 panic("monotonic: counters disabled while idling, pmcr0 = 0x%llx\n", pmcr0);
238 }
239 uint64_t pmcr1 = __builtin_arm_rsr64(PMCR1);
240 if ((pmcr1 & PMCR1_INIT) == 0) {
241 panic("monotonic: counter modes disabled while idling, pmcr1 = 0x%llx\n", pmcr1);
242 }
243#endif /* DEBUG */
244
245 /* disable counters before updating */
246 __builtin_arm_wsr64(PMCR0, PMCR0_INIT);
247
248 mt_update_fixed_counts();
249}
250
d9a64523
A
251#pragma mark uncore performance monitor
252
253
254#pragma mark common hooks
255
256void
257mt_cpu_idle(cpu_data_t *cpu)
258{
259 core_idle(cpu);
260}
261
262void
263mt_cpu_run(cpu_data_t *cpu)
5ba3f43e
A
264{
265 uint64_t pmcr0;
266 struct mt_cpu *mtc;
267
268 assert(cpu != NULL);
269 assert(ml_get_interrupts_enabled() == FALSE);
270
271 mtc = &cpu->cpu_monotonic;
272
273 for (int i = 0; i < MT_CORE_NFIXED; i++) {
274 mt_core_set_snap(i, mtc->mtc_snaps[i]);
275 }
276
277 /* re-enable the counters */
278 core_init_execution_modes();
279
280 pmcr0 = __builtin_arm_rsr64(PMCR0);
281 pmcr0 |= PMCR0_INIT | PMCR0_FIXED_EN;
282 __builtin_arm_wsr64(PMCR0, pmcr0);
283}
284
5ba3f43e
A
285void
286mt_cpu_down(cpu_data_t *cpu)
287{
288 mt_cpu_idle(cpu);
289}
290
291void
292mt_cpu_up(cpu_data_t *cpu)
293{
5ba3f43e
A
294 mt_cpu_run(cpu);
295}
296
297void
298mt_sleep(void)
299{
5ba3f43e
A
300}
301
302void
d9a64523 303mt_wake_per_core(void)
5ba3f43e 304{
5ba3f43e
A
305}
306
d9a64523 307static void
5ba3f43e
A
308mt_cpu_pmi(cpu_data_t *cpu, uint64_t pmsr)
309{
5ba3f43e
A
310 assert(cpu != NULL);
311 assert(ml_get_interrupts_enabled() == FALSE);
312
313 (void)atomic_fetch_add_explicit(&mt_pmis, 1, memory_order_relaxed);
314
d9a64523
A
315 /*
316 * monotonic handles any fixed counter PMIs.
317 */
318 for (unsigned int i = 0; i < MT_CORE_NFIXED; i++) {
319 if ((pmsr & PMSR_OVF(i)) == 0) {
320 continue;
321 }
322
323 uint64_t count = mt_cpu_update_count(cpu, i);
324 cpu->cpu_monotonic.mtc_counts[i] += count;
325 mt_core_set_snap(i, mt_core_reset_values[i]);
326 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
327
328 if (mt_microstackshots && mt_microstackshot_ctr == i) {
329 bool user_mode = false;
330 arm_saved_state_t *state = get_user_regs(current_thread());
331 if (state) {
332 user_mode = PSR64_IS_USER(get_saved_state_cpsr(state));
333 }
334 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1),
335 mt_microstackshot_ctr, user_mode);
336 mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx);
337 }
338 }
339
340 /*
341 * KPC handles the configurable counter PMIs.
342 */
343 for (unsigned int i = MT_CORE_NFIXED; i < CORE_NCTRS; i++) {
5ba3f43e 344 if (pmsr & PMSR_OVF(i)) {
d9a64523
A
345 extern void kpc_pmi_handler(unsigned int ctr);
346 kpc_pmi_handler(i);
5ba3f43e
A
347 }
348 }
349
5ba3f43e
A
350 core_set_enabled();
351}
352
353void
d9a64523
A
354mt_fiq(void *cpu, uint64_t pmsr, uint64_t upmsr)
355{
356 mt_cpu_pmi(cpu, pmsr);
357
358#pragma unused(upmsr)
359}
360
361static uint32_t mt_xc_sync;
362
363static void
364mt_microstackshot_start_remote(__unused void *arg)
365{
366 cpu_data_t *cpu = getCpuDatap();
367
368 __builtin_arm_wsr64(PMCR0, PMCR0_INIT);
369
370 for (int i = 0; i < MT_CORE_NFIXED; i++) {
371 uint64_t count = mt_cpu_update_count(cpu, i);
372 cpu->cpu_monotonic.mtc_counts[i] += count;
373 mt_core_set_snap(i, mt_core_reset_values[i]);
374 cpu->cpu_monotonic.mtc_snaps[i] = mt_core_reset_values[i];
375 }
376
377 core_set_enabled();
378
379 if (hw_atomic_sub(&mt_xc_sync, 1) == 0) {
380 thread_wakeup((event_t)&mt_xc_sync);
381 }
382}
383
384int
385mt_microstackshot_start_arch(uint64_t period)
5ba3f43e 386{
d9a64523
A
387 mt_core_reset_values[mt_microstackshot_ctr] = CTR_MAX - period;
388 cpu_broadcast_xcall(&mt_xc_sync, TRUE, mt_microstackshot_start_remote,
389 mt_microstackshot_start_remote /* cannot pass NULL */);
390 return 0;
5ba3f43e
A
391}
392
393#pragma mark dev nodes
394
d9a64523 395struct mt_device mt_devices[] = {
5ba3f43e 396 [0] = {
d9a64523 397 .mtd_name = "core",
5ba3f43e
A
398 .mtd_init = core_init,
399 },
400};
401
402static_assert(
d9a64523
A
403 (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS,
404 "MT_NDEVS macro should be same as the length of mt_devices");