]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/monotonic_x86_64.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / x86_64 / monotonic_x86_64.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <i386/cpu_data.h>
30#include <i386/cpuid.h>
31#include <i386/lapic.h>
d9a64523 32#include <i386/mp.h>
5ba3f43e
A
33#include <i386/proc_reg.h>
34#include <kern/assert.h> /* static_assert, assert */
35#include <kern/monotonic.h>
36#include <x86_64/monotonic.h>
37#include <sys/errno.h>
38#include <sys/monotonic.h>
39
40/*
41 * Sanity check the compiler.
42 */
43
44#ifndef __has_builtin
45#define __has_builtin(x) 0
46#endif /* !defined(__has_builtin) */
47#if !__has_builtin(__builtin_ia32_rdpmc)
48#error requires __builtin_ia32_rdpmc builtin
49#endif /* !__has_builtin(__builtin_ia32_rdpmc) */
50
51#pragma mark core counters
52
53bool mt_core_supported = false;
54
55/*
56 * PMC[0-2]_{RD,WR} allow reading and writing the fixed PMCs.
57 *
58 * There are separate defines for access type because the read side goes through
59 * the rdpmc instruction, which has a different counter encoding than the msr
60 * path.
61 */
62#define PMC_FIXED_RD(CTR) ((UINT64_C(1) << 30) | (CTR))
63#define PMC_FIXED_WR(CTR) (MSR_IA32_PERF_FIXED_CTR0 + (CTR))
64#define PMC0_RD PMC_FIXED_RD(0)
65#define PMC0_WR PMC_FIXED_WR(0)
66#define PMC1_RD PMC_FIXED_RD(1)
67#define PMC1_WR PMC_FIXED_WR(1)
68#define PMC2_RD PMC_FIXED_RD(2)
69#define PMC2_WR PMC_FIXED_WR(2)
70
71struct mt_cpu *
72mt_cur_cpu(void)
73{
74 return &current_cpu_datap()->cpu_monotonic;
75}
76
77uint64_t
78mt_core_snap(unsigned int ctr)
79{
80 if (!mt_core_supported) {
81 return 0;
82 }
83
84 switch (ctr) {
85 case 0:
86 return __builtin_ia32_rdpmc(PMC0_RD);
87 case 1:
88 return __builtin_ia32_rdpmc(PMC1_RD);
89 case 2:
90 return __builtin_ia32_rdpmc(PMC2_RD);
91 default:
92 panic("monotonic: invalid core counter read: %u", ctr);
d9a64523 93 __builtin_unreachable();
5ba3f43e
A
94 }
95}
96
97void
98mt_core_set_snap(unsigned int ctr, uint64_t count)
99{
100 if (!mt_core_supported) {
101 return;
102 }
103
104 switch (ctr) {
105 case 0:
106 wrmsr64(PMC0_WR, count);
107 break;
108 case 1:
109 wrmsr64(PMC1_WR, count);
110 break;
111 case 2:
112 wrmsr64(PMC2_WR, count);
113 break;
114 default:
115 panic("monotonic: invalid core counter write: %u", ctr);
d9a64523 116 __builtin_unreachable();
5ba3f43e
A
117 }
118}
119
120/*
121 * FIXED_CTR_CTRL controls which rings fixed counters are enabled in and if they
122 * deliver PMIs.
123 *
124 * Each fixed counters has 4 bits: [0:1] controls which ring it's enabled in,
125 * [2] counts all hardware threads in each logical core (we don't want this),
126 * and [3] enables PMIs on overflow.
127 */
128
129#define FIXED_CTR_CTRL 0x38d
130
131/*
132 * Fixed counters are enabled in all rings, so hard-code this register state to
133 * enable in all rings and deliver PMIs.
134 */
d9a64523
A
135#define FIXED_CTR_CTRL_INIT (0x888)
136#define FIXED_CTR_CTRL_ENABLE (0x333)
5ba3f43e
A
137
138/*
139 * GLOBAL_CTRL controls which counters are enabled -- the high 32-bits control
140 * the fixed counters and the lower half is for the configurable counters.
141 */
142
143#define GLOBAL_CTRL 0x38f
144
145/*
146 * Fixed counters are always enabled -- and there are three of them.
147 */
148#define GLOBAL_CTRL_FIXED_EN (((UINT64_C(1) << 3) - 1) << 32)
149
150/*
151 * GLOBAL_STATUS reports the state of counters, like those that have overflowed.
152 */
153#define GLOBAL_STATUS 0x38e
154
155#define CTR_MAX ((UINT64_C(1) << 48) - 1)
156#define CTR_FIX_POS(CTR) ((UINT64_C(1) << (CTR)) << 32)
157
158#define GLOBAL_OVF 0x390
159
160static void
161core_down(cpu_data_t *cpu)
162{
163 if (!mt_core_supported) {
164 return;
165 }
166
167 assert(ml_get_interrupts_enabled() == FALSE);
168
169 wrmsr64(GLOBAL_CTRL, 0);
170 mt_mtc_update_fixed_counts(&cpu->cpu_monotonic, NULL, NULL);
171}
172
173static void
174core_up(cpu_data_t *cpu)
175{
176 struct mt_cpu *mtc;
177
178 if (!mt_core_supported) {
179 return;
180 }
181
182 assert(ml_get_interrupts_enabled() == FALSE);
183
184 mtc = &cpu->cpu_monotonic;
185
186 for (int i = 0; i < MT_CORE_NFIXED; i++) {
187 mt_core_set_snap(i, mtc->mtc_snaps[i]);
188 }
d9a64523 189 wrmsr64(FIXED_CTR_CTRL, FIXED_CTR_CTRL_INIT | FIXED_CTR_CTRL_ENABLE);
5ba3f43e
A
190 wrmsr64(GLOBAL_CTRL, GLOBAL_CTRL_FIXED_EN);
191}
192
193void
194mt_cpu_down(cpu_data_t *cpu)
195{
196 core_down(cpu);
197}
198
199void
200mt_cpu_up(cpu_data_t *cpu)
201{
202 boolean_t intrs_en;
203 intrs_en = ml_set_interrupts_enabled(FALSE);
204 core_up(cpu);
205 ml_set_interrupts_enabled(intrs_en);
206}
207
208static int
209mt_pmi_x86_64(x86_saved_state_t *state)
210{
211 uint64_t status;
212 struct mt_cpu *mtc;
5ba3f43e
A
213
214 assert(ml_get_interrupts_enabled() == FALSE);
215 mtc = mt_cur_cpu();
216 status = rdmsr64(GLOBAL_STATUS);
217
218 (void)atomic_fetch_add_explicit(&mt_pmis, 1, memory_order_relaxed);
219
d9a64523 220 for (unsigned int i = 0; i < MT_CORE_NFIXED; i++) {
5ba3f43e 221 if (status & CTR_FIX_POS(i)) {
d9a64523 222 uint64_t prior = CTR_MAX - mtc->mtc_snaps[i];
5ba3f43e
A
223 assert(prior <= CTR_MAX);
224 prior += 1; /* wrapped */
225
d9a64523
A
226 uint64_t delta = mt_mtc_update_count(mtc, i);
227 mtc->mtc_counts[i] += delta;
228
229 if (mt_microstackshots && mt_microstackshot_ctr == i) {
230 x86_saved_state64_t *state64 = saved_state64(state);
231 bool user_mode = (state64->isf.cs & 0x3) ? true : false;
232 KDBG_RELEASE(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 1),
233 mt_microstackshot_ctr, user_mode);
234 mt_microstackshot_pmi_handler(user_mode, mt_microstackshot_ctx);
235 } else if (mt_debug) {
236 KDBG(KDBG_EVENTID(DBG_MONOTONIC, DBG_MT_DEBUG, 2),
237 mt_microstackshot_ctr, i);
238 }
239
240 mtc->mtc_snaps[i] = mt_core_reset_values[i];
241 mt_core_set_snap(i, mt_core_reset_values[i]);
5ba3f43e
A
242 }
243 }
244
245 /* if any of the configurable counters overflowed, tell kpc */
246 if (status & ((UINT64_C(1) << 4) - 1)) {
247 extern void kpc_pmi_handler(x86_saved_state_t *state);
248 kpc_pmi_handler(state);
249 }
250 return 0;
251}
252
d9a64523
A
253static void
254mt_microstackshot_start_remote(__unused void *arg)
255{
256 struct mt_cpu *mtc = mt_cur_cpu();
257
258 wrmsr64(FIXED_CTR_CTRL, FIXED_CTR_CTRL_INIT);
259
260 for (int i = 0; i < MT_CORE_NFIXED; i++) {
261 uint64_t delta = mt_mtc_update_count(mtc, i);
262 mtc->mtc_counts[i] += delta;
263 mt_core_set_snap(i, mt_core_reset_values[i]);
264 mtc->mtc_snaps[i] = mt_core_reset_values[i];
265 }
266
267 wrmsr64(FIXED_CTR_CTRL, FIXED_CTR_CTRL_INIT | FIXED_CTR_CTRL_ENABLE);
268}
269
270int
271mt_microstackshot_start_arch(uint64_t period)
5ba3f43e 272{
d9a64523
A
273 if (!mt_core_supported) {
274 return ENOTSUP;
275 }
5ba3f43e 276
d9a64523
A
277 mt_core_reset_values[mt_microstackshot_ctr] = CTR_MAX - period;
278 mp_cpus_call(CPUMASK_ALL, ASYNC, mt_microstackshot_start_remote,
279 NULL);
280 return 0;
281}
5ba3f43e 282
d9a64523
A
283void
284mt_early_init(void)
285{
286 i386_cpu_info_t *info = cpuid_info();
287 if (info->cpuid_arch_perf_leaf.version >= 2) {
5ba3f43e
A
288 lapic_set_pmi_func((i386_intr_func_t)mt_pmi_x86_64);
289 mt_core_supported = true;
290 }
291}
292
293static int
d9a64523 294core_init(__unused mt_device_t dev)
5ba3f43e
A
295{
296 return ENOTSUP;
297}
298
299#pragma mark common hooks
300
d9a64523 301struct mt_device mt_devices[] = {
5ba3f43e 302 [0] = {
d9a64523 303 .mtd_name = "core",
5ba3f43e
A
304 .mtd_init = core_init
305 }
306};
307
308static_assert(
d9a64523
A
309 (sizeof(mt_devices) / sizeof(mt_devices[0])) == MT_NDEVS,
310 "MT_NDEVS macro should be same as the length of mt_devices");