]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2017 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <i386/cpu_data.h> | |
30 | #include <i386/cpuid.h> | |
31 | #include <i386/lapic.h> | |
32 | #include <i386/proc_reg.h> | |
33 | #include <kern/assert.h> /* static_assert, assert */ | |
34 | #include <kern/monotonic.h> | |
35 | #include <x86_64/monotonic.h> | |
36 | #include <sys/errno.h> | |
37 | #include <sys/monotonic.h> | |
38 | ||
39 | /* | |
40 | * Sanity check the compiler. | |
41 | */ | |
42 | ||
43 | #ifndef __has_builtin | |
44 | #define __has_builtin(x) 0 | |
45 | #endif /* !defined(__has_builtin) */ | |
46 | #if !__has_builtin(__builtin_ia32_rdpmc) | |
47 | #error requires __builtin_ia32_rdpmc builtin | |
48 | #endif /* !__has_builtin(__builtin_ia32_rdpmc) */ | |
49 | ||
50 | #pragma mark core counters | |
51 | ||
52 | bool mt_core_supported = false; | |
53 | ||
54 | /* | |
55 | * PMC[0-2]_{RD,WR} allow reading and writing the fixed PMCs. | |
56 | * | |
57 | * There are separate defines for access type because the read side goes through | |
58 | * the rdpmc instruction, which has a different counter encoding than the msr | |
59 | * path. | |
60 | */ | |
61 | #define PMC_FIXED_RD(CTR) ((UINT64_C(1) << 30) | (CTR)) | |
62 | #define PMC_FIXED_WR(CTR) (MSR_IA32_PERF_FIXED_CTR0 + (CTR)) | |
63 | #define PMC0_RD PMC_FIXED_RD(0) | |
64 | #define PMC0_WR PMC_FIXED_WR(0) | |
65 | #define PMC1_RD PMC_FIXED_RD(1) | |
66 | #define PMC1_WR PMC_FIXED_WR(1) | |
67 | #define PMC2_RD PMC_FIXED_RD(2) | |
68 | #define PMC2_WR PMC_FIXED_WR(2) | |
69 | ||
70 | struct mt_cpu * | |
71 | mt_cur_cpu(void) | |
72 | { | |
73 | return ¤t_cpu_datap()->cpu_monotonic; | |
74 | } | |
75 | ||
76 | uint64_t | |
77 | mt_core_snap(unsigned int ctr) | |
78 | { | |
79 | if (!mt_core_supported) { | |
80 | return 0; | |
81 | } | |
82 | ||
83 | switch (ctr) { | |
84 | case 0: | |
85 | return __builtin_ia32_rdpmc(PMC0_RD); | |
86 | case 1: | |
87 | return __builtin_ia32_rdpmc(PMC1_RD); | |
88 | case 2: | |
89 | return __builtin_ia32_rdpmc(PMC2_RD); | |
90 | default: | |
91 | panic("monotonic: invalid core counter read: %u", ctr); | |
92 | __builtin_trap(); | |
93 | } | |
94 | } | |
95 | ||
96 | void | |
97 | mt_core_set_snap(unsigned int ctr, uint64_t count) | |
98 | { | |
99 | if (!mt_core_supported) { | |
100 | return; | |
101 | } | |
102 | ||
103 | switch (ctr) { | |
104 | case 0: | |
105 | wrmsr64(PMC0_WR, count); | |
106 | break; | |
107 | case 1: | |
108 | wrmsr64(PMC1_WR, count); | |
109 | break; | |
110 | case 2: | |
111 | wrmsr64(PMC2_WR, count); | |
112 | break; | |
113 | default: | |
114 | panic("monotonic: invalid core counter write: %u", ctr); | |
115 | __builtin_trap(); | |
116 | } | |
117 | } | |
118 | ||
119 | /* | |
120 | * FIXED_CTR_CTRL controls which rings fixed counters are enabled in and if they | |
121 | * deliver PMIs. | |
122 | * | |
123 | * Each fixed counters has 4 bits: [0:1] controls which ring it's enabled in, | |
124 | * [2] counts all hardware threads in each logical core (we don't want this), | |
125 | * and [3] enables PMIs on overflow. | |
126 | */ | |
127 | ||
128 | #define FIXED_CTR_CTRL 0x38d | |
129 | ||
130 | /* | |
131 | * Fixed counters are enabled in all rings, so hard-code this register state to | |
132 | * enable in all rings and deliver PMIs. | |
133 | */ | |
134 | #define FIXED_CTR_CTRL_INIT (0x888 | 0x333) | |
135 | ||
136 | /* | |
137 | * GLOBAL_CTRL controls which counters are enabled -- the high 32-bits control | |
138 | * the fixed counters and the lower half is for the configurable counters. | |
139 | */ | |
140 | ||
141 | #define GLOBAL_CTRL 0x38f | |
142 | ||
143 | /* | |
144 | * Fixed counters are always enabled -- and there are three of them. | |
145 | */ | |
146 | #define GLOBAL_CTRL_FIXED_EN (((UINT64_C(1) << 3) - 1) << 32) | |
147 | ||
148 | /* | |
149 | * GLOBAL_STATUS reports the state of counters, like those that have overflowed. | |
150 | */ | |
151 | #define GLOBAL_STATUS 0x38e | |
152 | ||
153 | #define CTR_MAX ((UINT64_C(1) << 48) - 1) | |
154 | #define CTR_FIX_POS(CTR) ((UINT64_C(1) << (CTR)) << 32) | |
155 | ||
156 | #define GLOBAL_OVF 0x390 | |
157 | ||
158 | static void | |
159 | core_down(cpu_data_t *cpu) | |
160 | { | |
161 | if (!mt_core_supported) { | |
162 | return; | |
163 | } | |
164 | ||
165 | assert(ml_get_interrupts_enabled() == FALSE); | |
166 | ||
167 | wrmsr64(GLOBAL_CTRL, 0); | |
168 | mt_mtc_update_fixed_counts(&cpu->cpu_monotonic, NULL, NULL); | |
169 | } | |
170 | ||
171 | static void | |
172 | core_up(cpu_data_t *cpu) | |
173 | { | |
174 | struct mt_cpu *mtc; | |
175 | ||
176 | if (!mt_core_supported) { | |
177 | return; | |
178 | } | |
179 | ||
180 | assert(ml_get_interrupts_enabled() == FALSE); | |
181 | ||
182 | mtc = &cpu->cpu_monotonic; | |
183 | ||
184 | for (int i = 0; i < MT_CORE_NFIXED; i++) { | |
185 | mt_core_set_snap(i, mtc->mtc_snaps[i]); | |
186 | } | |
187 | wrmsr64(FIXED_CTR_CTRL, FIXED_CTR_CTRL_INIT); | |
188 | wrmsr64(GLOBAL_CTRL, GLOBAL_CTRL_FIXED_EN); | |
189 | } | |
190 | ||
191 | void | |
192 | mt_cpu_down(cpu_data_t *cpu) | |
193 | { | |
194 | core_down(cpu); | |
195 | } | |
196 | ||
197 | void | |
198 | mt_cpu_up(cpu_data_t *cpu) | |
199 | { | |
200 | boolean_t intrs_en; | |
201 | intrs_en = ml_set_interrupts_enabled(FALSE); | |
202 | core_up(cpu); | |
203 | ml_set_interrupts_enabled(intrs_en); | |
204 | } | |
205 | ||
206 | static int | |
207 | mt_pmi_x86_64(x86_saved_state_t *state) | |
208 | { | |
209 | uint64_t status; | |
210 | struct mt_cpu *mtc; | |
211 | bool fixed_ovf = false; | |
212 | ||
213 | assert(ml_get_interrupts_enabled() == FALSE); | |
214 | mtc = mt_cur_cpu(); | |
215 | status = rdmsr64(GLOBAL_STATUS); | |
216 | ||
217 | (void)atomic_fetch_add_explicit(&mt_pmis, 1, memory_order_relaxed); | |
218 | ||
219 | for (int i = 0; i < MT_CORE_NFIXED; i++) { | |
220 | if (status & CTR_FIX_POS(i)) { | |
221 | fixed_ovf = true; | |
222 | uint64_t prior; | |
223 | ||
224 | prior = CTR_MAX - mtc->mtc_snaps[i]; | |
225 | assert(prior <= CTR_MAX); | |
226 | prior += 1; /* wrapped */ | |
227 | ||
228 | mtc->mtc_counts[i] += prior; | |
229 | mtc->mtc_snaps[i] = 0; | |
230 | mt_mtc_update_count(mtc, i); | |
231 | } | |
232 | } | |
233 | ||
234 | /* if any of the configurable counters overflowed, tell kpc */ | |
235 | if (status & ((UINT64_C(1) << 4) - 1)) { | |
236 | extern void kpc_pmi_handler(x86_saved_state_t *state); | |
237 | kpc_pmi_handler(state); | |
238 | } | |
239 | return 0; | |
240 | } | |
241 | ||
242 | void | |
243 | mt_init(void) | |
244 | { | |
245 | uint32_t cpuinfo[4]; | |
246 | ||
247 | do_cpuid(0xA, cpuinfo); | |
248 | ||
249 | if ((cpuinfo[0] & 0xff) >= 2) { | |
250 | lapic_set_pmi_func((i386_intr_func_t)mt_pmi_x86_64); | |
251 | mt_core_supported = true; | |
252 | } | |
253 | } | |
254 | ||
255 | static int | |
256 | core_init(void) | |
257 | { | |
258 | return ENOTSUP; | |
259 | } | |
260 | ||
261 | #pragma mark common hooks | |
262 | ||
263 | const struct monotonic_dev monotonic_devs[] = { | |
264 | [0] = { | |
265 | .mtd_name = "monotonic/core", | |
266 | .mtd_init = core_init | |
267 | } | |
268 | }; | |
269 | ||
270 | static_assert( | |
271 | (sizeof(monotonic_devs) / sizeof(monotonic_devs[0])) == MT_NDEVS, | |
272 | "MT_NDEVS macro should be same as the length of monotonic_devs"); |