2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/cpu_data_internal.h>
30 #include <arm/machine_routines.h>
31 #include <arm64/monotonic.h>
32 #include <kern/assert.h>
33 #include <kern/debug.h> /* panic */
34 #include <kern/monotonic.h>
35 #include <machine/limits.h> /* CHAR_BIT */
36 #include <stdatomic.h>
39 #include <sys/errno.h>
40 #include <sys/monotonic.h>
41 #include <pexpert/arm64/board_config.h>
42 #include <pexpert/device_tree.h> /* DTFindEntry */
43 #include <pexpert/pexpert.h>
45 #pragma mark core counters
47 bool mt_core_supported
= true;
50 * PMC[0-1] are the 48-bit fixed counters -- PMC0 is cycles and PMC1 is
51 * instructions (see arm64/monotonic.h).
53 * PMC2+ are currently handled by kpc.
56 #define PMC0 "s3_2_c15_c0_0"
57 #define PMC1 "s3_2_c15_c1_0"
58 #define PMC2 "s3_2_c15_c2_0"
59 #define PMC3 "s3_2_c15_c3_0"
60 #define PMC4 "s3_2_c15_c4_0"
61 #define PMC5 "s3_2_c15_c5_0"
62 #define PMC6 "s3_2_c15_c6_0"
63 #define PMC7 "s3_2_c15_c7_0"
64 #define PMC8 "s3_2_c15_c9_0"
65 #define PMC9 "s3_2_c15_c10_0"
67 #define CTR_MAX ((UINT64_C(1) << 47) - 1)
73 * PMC0's offset into a core's PIO range.
75 * This allows cores to remotely query another core's counters.
78 #define PIO_PMC0_OFFSET (0x200)
81 * The offset of the counter in the configuration registers. Post-Hurricane
82 * devices have additional counters that need a larger shift than the original
85 * XXX For now, just support the lower-numbered counters.
87 #define CTR_POS(CTR) (CTR)
90 * PMCR0 is the main control register for the performance monitor. It
91 * controls whether the counters are enabled, how they deliver interrupts, and
95 #define PMCR0 "s3_1_c15_c0_0"
97 #define PMCR0_CTR_EN(CTR) (UINT64_C(1) << CTR_POS(CTR))
98 #define PMCR0_FIXED_EN (PMCR0_CTR_EN(CYCLES) | PMCR0_CTR_EN(INSTRS))
99 /* how interrupts are delivered on a PMI */
101 PMCR0_INTGEN_OFF
= 0,
102 PMCR0_INTGEN_PMI
= 1,
103 PMCR0_INTGEN_AIC
= 2,
104 PMCR0_INTGEN_HALT
= 3,
105 PMCR0_INTGEN_FIQ
= 4,
107 #define PMCR0_INTGEN_SET(INT) ((uint64_t)(INT) << 8)
108 #define PMCR0_INTGEN_INIT PMCR0_INTGEN_SET(PMCR0_INTGEN_FIQ)
109 /* set by hardware if a PMI was delivered */
110 #define PMCR0_PMAI (UINT64_C(1) << 11)
111 #define PMCR0_PMI_EN(CTR) (UINT64_C(1) << (12 + CTR_POS(CTR)))
112 /* fixed counters are always counting */
113 #define PMCR0_PMI_INIT (PMCR0_PMI_EN(CYCLES) | PMCR0_PMI_EN(INSTRS))
114 /* disable counting on a PMI */
115 #define PMCR0_DISCNT_EN (UINT64_C(1) << 20)
116 /* block PMIs until ERET retires */
117 #define PMCR0_WFRFE_EN (UINT64_C(1) << 22)
118 /* count global (not just core-local) L2C events */
119 #define PMCR0_L2CGLOBAL_EN (UINT64_C(1) << 23)
120 /* user mode access to configuration registers */
121 #define PMCR0_USEREN_EN (UINT64_C(1) << 30)
123 #define PMCR0_INIT (PMCR0_INTGEN_INIT | PMCR0_PMI_INIT | PMCR0_DISCNT_EN)
126 * PMCR1 controls which execution modes count events.
129 #define PMCR1 "s3_1_c15_c1_0"
131 #define PMCR1_EL0A32_EN(CTR) (UINT64_C(1) << (0 + CTR_POS(CTR)))
132 #define PMCR1_EL0A64_EN(CTR) (UINT64_C(1) << (8 + CTR_POS(CTR)))
133 #define PMCR1_EL1A64_EN(CTR) (UINT64_C(1) << (16 + CTR_POS(CTR)))
134 /* PMCR1_EL3A64 is not supported on systems with no monitor */
135 #if defined(APPLEHURRICANE)
136 #define PMCR1_EL3A64_EN(CTR) UINT64_C(0)
138 #define PMCR1_EL3A64_EN(CTR) (UINT64_C(1) << (24 + CTR_POS(CTR)))
140 #define PMCR1_ALL_EN(CTR) (PMCR1_EL0A32_EN(CTR) | PMCR1_EL0A64_EN(CTR) | \
141 PMCR1_EL1A64_EN(CTR) | PMCR1_EL3A64_EN(CTR))
143 /* fixed counters always count in all modes */
144 #define PMCR1_INIT (PMCR1_ALL_EN(CYCLES) | PMCR1_ALL_EN(INSTRS))
147 core_init_execution_modes(void)
151 pmcr1
= __builtin_arm_rsr64(PMCR1
);
153 __builtin_arm_wsr64(PMCR1
, pmcr1
);
157 * PMCR2 controls watchpoint registers.
159 * PMCR3 controls breakpoints and address matching.
161 * PMCR4 controls opcode matching.
164 #define PMCR2 "s3_1_c15_c2_0"
165 #define PMCR3 "s3_1_c15_c3_0"
166 #define PMCR4 "s3_1_c15_c4_0"
168 #define PMSR_OVF(CTR) (1ULL << (CTR))
176 core_init(__unused mt_device_t dev
)
178 /* the dev node interface to the core counters is still unsupported */
185 return &getCpuDatap()->cpu_monotonic
;
189 mt_core_snap(unsigned int ctr
)
193 return __builtin_arm_rsr64(PMC0
);
195 return __builtin_arm_rsr64(PMC1
);
197 panic("monotonic: invalid core counter read: %u", ctr
);
198 __builtin_unreachable();
203 mt_core_set_snap(unsigned int ctr
, uint64_t count
)
207 __builtin_arm_wsr64(PMC0
, count
);
210 __builtin_arm_wsr64(PMC1
, count
);
213 panic("monotonic: invalid core counter %u write %llu", ctr
, count
);
214 __builtin_unreachable();
219 core_set_enabled(void)
223 pmcr0
= __builtin_arm_rsr64(PMCR0
);
224 pmcr0
|= PMCR0_INIT
| PMCR0_FIXED_EN
;
225 __builtin_arm_wsr64(PMCR0
, pmcr0
);
229 core_idle(__unused cpu_data_t
*cpu
)
232 assert(ml_get_interrupts_enabled() == FALSE
);
235 uint64_t pmcr0
= __builtin_arm_rsr64(PMCR0
);
236 if ((pmcr0
& PMCR0_FIXED_EN
) == 0) {
237 panic("monotonic: counters disabled while idling, pmcr0 = 0x%llx\n", pmcr0
);
239 uint64_t pmcr1
= __builtin_arm_rsr64(PMCR1
);
240 if ((pmcr1
& PMCR1_INIT
) == 0) {
241 panic("monotonic: counter modes disabled while idling, pmcr1 = 0x%llx\n", pmcr1
);
245 /* disable counters before updating */
246 __builtin_arm_wsr64(PMCR0
, PMCR0_INIT
);
248 mt_update_fixed_counts();
251 #pragma mark uncore performance monitor
254 #pragma mark common hooks
257 mt_cpu_idle(cpu_data_t
*cpu
)
263 mt_cpu_run(cpu_data_t
*cpu
)
269 assert(ml_get_interrupts_enabled() == FALSE
);
271 mtc
= &cpu
->cpu_monotonic
;
273 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
274 mt_core_set_snap(i
, mtc
->mtc_snaps
[i
]);
277 /* re-enable the counters */
278 core_init_execution_modes();
280 pmcr0
= __builtin_arm_rsr64(PMCR0
);
281 pmcr0
|= PMCR0_INIT
| PMCR0_FIXED_EN
;
282 __builtin_arm_wsr64(PMCR0
, pmcr0
);
286 mt_cpu_down(cpu_data_t
*cpu
)
292 mt_cpu_up(cpu_data_t
*cpu
)
303 mt_wake_per_core(void)
308 mt_cpu_pmi(cpu_data_t
*cpu
, uint64_t pmsr
)
311 assert(ml_get_interrupts_enabled() == FALSE
);
313 (void)atomic_fetch_add_explicit(&mt_pmis
, 1, memory_order_relaxed
);
316 * monotonic handles any fixed counter PMIs.
318 for (unsigned int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
319 if ((pmsr
& PMSR_OVF(i
)) == 0) {
323 uint64_t count
= mt_cpu_update_count(cpu
, i
);
324 cpu
->cpu_monotonic
.mtc_counts
[i
] += count
;
325 mt_core_set_snap(i
, mt_core_reset_values
[i
]);
326 cpu
->cpu_monotonic
.mtc_snaps
[i
] = mt_core_reset_values
[i
];
328 if (mt_microstackshots
&& mt_microstackshot_ctr
== i
) {
329 bool user_mode
= false;
330 arm_saved_state_t
*state
= get_user_regs(current_thread());
332 user_mode
= PSR64_IS_USER(get_saved_state_cpsr(state
));
334 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC
, DBG_MT_DEBUG
, 1),
335 mt_microstackshot_ctr
, user_mode
);
336 mt_microstackshot_pmi_handler(user_mode
, mt_microstackshot_ctx
);
341 * KPC handles the configurable counter PMIs.
343 for (unsigned int i
= MT_CORE_NFIXED
; i
< CORE_NCTRS
; i
++) {
344 if (pmsr
& PMSR_OVF(i
)) {
345 extern void kpc_pmi_handler(unsigned int ctr
);
354 mt_fiq(void *cpu
, uint64_t pmsr
, uint64_t upmsr
)
356 mt_cpu_pmi(cpu
, pmsr
);
358 #pragma unused(upmsr)
361 static uint32_t mt_xc_sync
;
364 mt_microstackshot_start_remote(__unused
void *arg
)
366 cpu_data_t
*cpu
= getCpuDatap();
368 __builtin_arm_wsr64(PMCR0
, PMCR0_INIT
);
370 for (int i
= 0; i
< MT_CORE_NFIXED
; i
++) {
371 uint64_t count
= mt_cpu_update_count(cpu
, i
);
372 cpu
->cpu_monotonic
.mtc_counts
[i
] += count
;
373 mt_core_set_snap(i
, mt_core_reset_values
[i
]);
374 cpu
->cpu_monotonic
.mtc_snaps
[i
] = mt_core_reset_values
[i
];
379 if (hw_atomic_sub(&mt_xc_sync
, 1) == 0) {
380 thread_wakeup((event_t
)&mt_xc_sync
);
385 mt_microstackshot_start_arch(uint64_t period
)
387 mt_core_reset_values
[mt_microstackshot_ctr
] = CTR_MAX
- period
;
388 cpu_broadcast_xcall(&mt_xc_sync
, TRUE
, mt_microstackshot_start_remote
,
389 mt_microstackshot_start_remote
/* cannot pass NULL */);
393 #pragma mark dev nodes
395 struct mt_device mt_devices
[] = {
398 .mtd_init
= core_init
,
403 (sizeof(mt_devices
) / sizeof(mt_devices
[0])) == MT_NDEVS
,
404 "MT_NDEVS macro should be same as the length of mt_devices");