]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/machine_check.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_check.c
1 /*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/zalloc.h>
30 #include <mach/mach_time.h>
31 #include <i386/cpu_data.h>
32 #include <i386/cpuid.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/lapic.h>
36 #include <i386/machine_cpu.h>
37 #include <i386/machine_check.h>
38 #include <i386/proc_reg.h>
39
40 /*
41 * At the time of the machine-check exception, all hardware-threads panic.
42 * Each thread saves the state of its MCA registers to its per-cpu data area.
43 *
44 * State reporting is serialized so one thread dumps all valid state for all
45 * threads to the panic log. This may entail spinning waiting for other
46 * threads to complete saving state to memory. A timeout applies to this wait
47 * -- in particular, a 3-strikes timeout may prevent a thread from taking
48 * part is the affair.
49 */
50
51 #define IF(bool, str) ((bool) ? (str) : "")
52
53 static boolean_t mca_initialized = FALSE;
54 static boolean_t mca_MCE_present = FALSE;
55 static boolean_t mca_MCA_present = FALSE;
56 static uint32_t mca_family = 0;
57 static unsigned int mca_error_bank_count = 0;
58 static boolean_t mca_control_MSR_present = FALSE;
59 static boolean_t mca_cmci_present = FALSE;
60 static ia32_mcg_cap_t ia32_mcg_cap;
61 decl_simple_lock_data(static, mca_lock);
62
63 typedef struct {
64 ia32_mci_ctl_t mca_mci_ctl;
65 ia32_mci_status_t mca_mci_status;
66 ia32_mci_misc_t mca_mci_misc;
67 ia32_mci_addr_t mca_mci_addr;
68 } mca_mci_bank_t;
69
70 typedef struct mca_state {
71 boolean_t mca_is_saved;
72 boolean_t mca_is_valid; /* some state is valid */
73 ia32_mcg_ctl_t mca_mcg_ctl;
74 ia32_mcg_status_t mca_mcg_status;
75 mca_mci_bank_t mca_error_bank[0];
76 } mca_state_t;
77
78 typedef enum {
79 CLEAR,
80 DUMPING,
81 DUMPED
82 } mca_dump_state_t;
83 static volatile mca_dump_state_t mca_dump_state = CLEAR;
84
85 static void
86 mca_get_availability(void)
87 {
88 uint64_t features = cpuid_info()->cpuid_features;
89 uint32_t family = cpuid_info()->cpuid_family;
90 uint32_t model = cpuid_info()->cpuid_model;
91 uint32_t stepping = cpuid_info()->cpuid_stepping;
92
93 if ((model == CPUID_MODEL_HASWELL && stepping < 3) ||
94 (model == CPUID_MODEL_HASWELL_ULT && stepping < 1) ||
95 (model == CPUID_MODEL_CRYSTALWELL && stepping < 1)) {
96 panic("Haswell pre-C0 steppings are not supported");
97 }
98
99 mca_MCE_present = (features & CPUID_FEATURE_MCE) != 0;
100 mca_MCA_present = (features & CPUID_FEATURE_MCA) != 0;
101 mca_family = family;
102
103 /*
104 * If MCA, the number of banks etc is reported by the IA32_MCG_CAP MSR.
105 */
106 if (mca_MCA_present) {
107 ia32_mcg_cap.u64 = rdmsr64(IA32_MCG_CAP);
108 mca_error_bank_count = ia32_mcg_cap.bits.count;
109 mca_control_MSR_present = ia32_mcg_cap.bits.mcg_ctl_p;
110 mca_cmci_present = ia32_mcg_cap.bits.mcg_ext_corr_err_p;
111 }
112 }
113
114 void
115 mca_cpu_init(void)
116 {
117 unsigned int i;
118
119 /*
120 * The first (boot) processor is responsible for discovering the
121 * machine check architecture present on this machine.
122 */
123 if (!mca_initialized) {
124 mca_get_availability();
125 mca_initialized = TRUE;
126 simple_lock_init(&mca_lock, 0);
127 }
128
129 if (mca_MCA_present) {
130 /* Enable all MCA features */
131 if (mca_control_MSR_present) {
132 wrmsr64(IA32_MCG_CTL, IA32_MCG_CTL_ENABLE);
133 }
134
135 switch (mca_family) {
136 case 0x06:
137 /* Enable all but mc0 */
138 for (i = 1; i < mca_error_bank_count; i++) {
139 wrmsr64(IA32_MCi_CTL(i), 0xFFFFFFFFFFFFFFFFULL);
140 }
141
142 /* Clear all errors */
143 for (i = 0; i < mca_error_bank_count; i++) {
144 wrmsr64(IA32_MCi_STATUS(i), 0ULL);
145 }
146 break;
147 case 0x0F:
148 /* Enable all banks */
149 for (i = 0; i < mca_error_bank_count; i++) {
150 wrmsr64(IA32_MCi_CTL(i), 0xFFFFFFFFFFFFFFFFULL);
151 }
152
153 /* Clear all errors */
154 for (i = 0; i < mca_error_bank_count; i++) {
155 wrmsr64(IA32_MCi_STATUS(i), 0ULL);
156 }
157 break;
158 }
159 }
160
161 /* Enable machine check exception handling if available */
162 if (mca_MCE_present) {
163 set_cr4(get_cr4() | CR4_MCE);
164 }
165 }
166
167 boolean_t
168 mca_is_cmci_present(void)
169 {
170 if (!mca_initialized) {
171 mca_cpu_init();
172 }
173 return mca_cmci_present;
174 }
175
176 void
177 mca_cpu_alloc(cpu_data_t *cdp)
178 {
179 vm_size_t mca_state_size;
180
181 /*
182 * Allocate space for an array of error banks.
183 */
184 mca_state_size = sizeof(mca_state_t) +
185 sizeof(mca_mci_bank_t) * mca_error_bank_count;
186 cdp->cpu_mca_state = zalloc_permanent(mca_state_size, ZALIGN_PTR);
187 if (cdp->cpu_mca_state == NULL) {
188 printf("mca_cpu_alloc() failed for cpu %d\n", cdp->cpu_number);
189 return;
190 }
191
192 /*
193 * If the boot processor is yet have its allocation made,
194 * do this now.
195 */
196 if (cpu_datap(master_cpu)->cpu_mca_state == NULL) {
197 mca_cpu_alloc(cpu_datap(master_cpu));
198 }
199 }
200
201 static void
202 mca_save_state(mca_state_t *mca_state)
203 {
204 mca_mci_bank_t *bank;
205 unsigned int i;
206
207 assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0);
208
209 if (mca_state == NULL) {
210 return;
211 }
212
213 mca_state->mca_mcg_ctl = mca_control_MSR_present ?
214 rdmsr64(IA32_MCG_CTL) : 0ULL;
215 mca_state->mca_mcg_status.u64 = rdmsr64(IA32_MCG_STATUS);
216
217 bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0];
218 for (i = 0; i < mca_error_bank_count; i++, bank++) {
219 bank->mca_mci_ctl = rdmsr64(IA32_MCi_CTL(i));
220 bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i));
221 if (!bank->mca_mci_status.bits.val) {
222 continue;
223 }
224 bank->mca_mci_misc = (bank->mca_mci_status.bits.miscv)?
225 rdmsr64(IA32_MCi_MISC(i)) : 0ULL;
226 bank->mca_mci_addr = (bank->mca_mci_status.bits.addrv)?
227 rdmsr64(IA32_MCi_ADDR(i)) : 0ULL;
228 mca_state->mca_is_valid = TRUE;
229 }
230
231 /*
232 * If we're the first thread with MCA state, point our package to it
233 * and don't care about races
234 */
235 if (x86_package()->mca_state == NULL) {
236 x86_package()->mca_state = mca_state;
237 }
238
239 mca_state->mca_is_saved = TRUE;
240 }
241
242 void
243 mca_check_save(void)
244 {
245 if (mca_dump_state > CLEAR) {
246 mca_save_state(current_cpu_datap()->cpu_mca_state);
247 }
248 }
249
250 static void
251 mca_report_cpu_info(void)
252 {
253 i386_cpu_info_t *infop = cpuid_info();
254
255 paniclog_append_noflush(" family: %d model: %d stepping: %d microcode: %d\n",
256 infop->cpuid_family,
257 infop->cpuid_model,
258 infop->cpuid_stepping,
259 infop->cpuid_microcode_version);
260 paniclog_append_noflush(" signature: 0x%x\n",
261 infop->cpuid_signature);
262 paniclog_append_noflush(" %s\n",
263 infop->cpuid_brand_string);
264 }
265
266 static void
267 mca_dump_bank(mca_state_t *state, int i)
268 {
269 mca_mci_bank_t *bank;
270 ia32_mci_status_t status;
271
272 bank = &state->mca_error_bank[i];
273 status = bank->mca_mci_status;
274 if (!status.bits.val) {
275 return;
276 }
277
278 paniclog_append_noflush(" IA32_MC%d_STATUS(0x%x): 0x%016qx\n",
279 i, IA32_MCi_STATUS(i), status.u64);
280
281 if (status.bits.addrv) {
282 paniclog_append_noflush(" IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
283 i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
284 }
285
286 if (status.bits.miscv) {
287 paniclog_append_noflush(" IA32_MC%d_MISC(0x%x): 0x%016qx\n",
288 i, IA32_MCi_MISC(i), bank->mca_mci_misc);
289 }
290 }
291
292 static void
293 mca_cpu_dump_error_banks(mca_state_t *state)
294 {
295 unsigned int i;
296
297 if (!state->mca_is_valid) {
298 return;
299 }
300
301 for (i = 0; i < mca_error_bank_count; i++) {
302 mca_dump_bank(state, i);
303 }
304 }
305
306 void
307 mca_dump(void)
308 {
309 mca_state_t *mca_state = current_cpu_datap()->cpu_mca_state;
310 uint64_t deadline;
311 unsigned int i = 0;
312
313 /*
314 * Capture local MCA registers to per-cpu data.
315 */
316 mca_save_state(mca_state);
317
318 /*
319 * Serialize: the first caller controls dumping MCA registers,
320 * other threads spin meantime.
321 */
322 simple_lock(&mca_lock, LCK_GRP_NULL);
323 if (mca_dump_state > CLEAR) {
324 simple_unlock(&mca_lock);
325 while (mca_dump_state == DUMPING) {
326 cpu_pause();
327 }
328 return;
329 }
330 mca_dump_state = DUMPING;
331 simple_unlock(&mca_lock);
332
333 /*
334 * Wait for all other hardware threads to save their state.
335 * Or timeout.
336 */
337 deadline = mach_absolute_time() + LockTimeOut;
338 while (mach_absolute_time() < deadline && i < real_ncpus) {
339 if (!cpu_datap(i)->cpu_mca_state->mca_is_saved) {
340 cpu_pause();
341 continue;
342 }
343 i += 1;
344 }
345
346 /*
347 * Report machine-check capabilities:
348 */
349 paniclog_append_noflush("Machine-check capabilities: 0x%016qx\n", ia32_mcg_cap.u64);
350
351 mca_report_cpu_info();
352
353 paniclog_append_noflush(" %d error-reporting banks\n", mca_error_bank_count);
354
355 /*
356 * Dump all processor state:
357 */
358 for (i = 0; i < real_ncpus; i++) {
359 mca_state_t *mcsp = cpu_datap(i)->cpu_mca_state;
360 ia32_mcg_status_t status;
361
362 if (mcsp == NULL ||
363 mcsp->mca_is_saved == FALSE ||
364 mcsp->mca_mcg_status.u64 == 0 ||
365 !mcsp->mca_is_valid) {
366 continue;
367 }
368 status = mcsp->mca_mcg_status;
369 paniclog_append_noflush("Processor %d: IA32_MCG_STATUS: 0x%016qx\n",
370 i, status.u64);
371 mca_cpu_dump_error_banks(mcsp);
372 }
373
374 /* Update state to release any other threads. */
375 mca_dump_state = DUMPED;
376 }
377
378
379 #if DEVELOPMENT || DEBUG
380 extern void mca_exception_panic(void);
381 extern void lapic_trigger_MC(void);
382 void
383 mca_exception_panic(void)
384 {
385 lapic_trigger_MC();
386 }
387 #endif