2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/kalloc.h>
30 #include <mach/mach_time.h>
31 #include <i386/cpu_data.h>
32 #include <i386/cpuid.h>
33 #include <i386/cpu_topology.h>
34 #include <i386/cpu_threads.h>
35 #include <i386/machine_cpu.h>
36 #include <i386/machine_check.h>
37 #include <i386/proc_reg.h>
40 * At the time of the machine-check exception, all hardware-threads panic.
41 * Each thread saves the state of its MCA registers to its per-cpu data area.
43 * State reporting is serialized so one thread dumps all valid state for all
44 * threads to the panic log. This may entail spinning waiting for other
45 * threads to complete saving state to memory. A timeout applies to this wait
46 * -- in particular, a 3-strikes timeout may prevent a thread from taking
50 #define IF(bool,str) ((bool) ? (str) : "")
52 static boolean_t mca_initialized
= FALSE
;
53 static boolean_t mca_MCE_present
= FALSE
;
54 static boolean_t mca_MCA_present
= FALSE
;
55 static uint32_t mca_family
= 0;
56 static unsigned int mca_error_bank_count
= 0;
57 static boolean_t mca_control_MSR_present
= FALSE
;
58 static boolean_t mca_threshold_status_present
= FALSE
;
59 static boolean_t mca_sw_error_recovery_present
= FALSE
;
60 static boolean_t mca_extended_MSRs_present
= FALSE
;
61 static unsigned int mca_extended_MSRs_count
= 0;
62 static boolean_t mca_cmci_present
= FALSE
;
63 static ia32_mcg_cap_t ia32_mcg_cap
;
64 decl_simple_lock_data(static, mca_lock
);
67 ia32_mci_ctl_t mca_mci_ctl
;
68 ia32_mci_status_t mca_mci_status
;
69 ia32_mci_misc_t mca_mci_misc
;
70 ia32_mci_addr_t mca_mci_addr
;
73 typedef struct mca_state
{
74 boolean_t mca_is_saved
;
75 boolean_t mca_is_valid
; /* some state is valid */
76 ia32_mcg_ctl_t mca_mcg_ctl
;
77 ia32_mcg_status_t mca_mcg_status
;
78 mca_mci_bank_t mca_error_bank
[0];
86 static volatile mca_dump_state_t mca_dump_state
= CLEAR
;
89 mca_get_availability(void)
91 uint64_t features
= cpuid_info()->cpuid_features
;
92 uint32_t family
= cpuid_info()->cpuid_family
;
93 uint32_t model
= cpuid_info()->cpuid_model
;
94 uint32_t stepping
= cpuid_info()->cpuid_stepping
;
96 mca_MCE_present
= (features
& CPUID_FEATURE_MCE
) != 0;
97 mca_MCA_present
= (features
& CPUID_FEATURE_MCA
) != 0;
100 if ((model
== CPUID_MODEL_HASWELL
&& stepping
< 3) ||
101 (model
== CPUID_MODEL_HASWELL_ULT
&& stepping
< 1) ||
102 (model
== CPUID_MODEL_CRYSTALWELL
&& stepping
< 1))
103 panic("Haswell pre-C0 steppings are not supported");
106 * If MCA, the number of banks etc is reported by the IA32_MCG_CAP MSR.
108 if (mca_MCA_present
) {
109 ia32_mcg_cap
.u64
= rdmsr64(IA32_MCG_CAP
);
110 mca_error_bank_count
= ia32_mcg_cap
.bits
.count
;
111 mca_control_MSR_present
= ia32_mcg_cap
.bits
.mcg_ctl_p
;
112 mca_threshold_status_present
= ia32_mcg_cap
.bits
.mcg_tes_p
;
113 mca_sw_error_recovery_present
= ia32_mcg_cap
.bits
.mcg_ser_p
;
114 mca_cmci_present
= ia32_mcg_cap
.bits
.mcg_ext_corr_err_p
;
115 if (family
== 0x0F) {
116 mca_extended_MSRs_present
= ia32_mcg_cap
.bits
.mcg_ext_p
;
117 mca_extended_MSRs_count
= ia32_mcg_cap
.bits
.mcg_ext_cnt
;
128 * The first (boot) processor is responsible for discovering the
129 * machine check architecture present on this machine.
131 if (!mca_initialized
) {
132 mca_get_availability();
133 mca_initialized
= TRUE
;
134 simple_lock_init(&mca_lock
, 0);
137 if (mca_MCA_present
) {
139 /* Enable all MCA features */
140 if (mca_control_MSR_present
)
141 wrmsr64(IA32_MCG_CTL
, IA32_MCG_CTL_ENABLE
);
143 switch (mca_family
) {
145 /* Enable all but mc0 */
146 for (i
= 1; i
< mca_error_bank_count
; i
++)
147 wrmsr64(IA32_MCi_CTL(i
),0xFFFFFFFFFFFFFFFFULL
);
149 /* Clear all errors */
150 for (i
= 0; i
< mca_error_bank_count
; i
++)
151 wrmsr64(IA32_MCi_STATUS(i
), 0ULL);
154 /* Enable all banks */
155 for (i
= 0; i
< mca_error_bank_count
; i
++)
156 wrmsr64(IA32_MCi_CTL(i
),0xFFFFFFFFFFFFFFFFULL
);
158 /* Clear all errors */
159 for (i
= 0; i
< mca_error_bank_count
; i
++)
160 wrmsr64(IA32_MCi_STATUS(i
), 0ULL);
165 /* Enable machine check exception handling if available */
166 if (mca_MCE_present
) {
167 set_cr4(get_cr4()|CR4_MCE
);
172 mca_is_cmci_present(void)
174 if (!mca_initialized
)
176 return mca_cmci_present
;
180 mca_cpu_alloc(cpu_data_t
*cdp
)
182 vm_size_t mca_state_size
;
185 * Allocate space for an array of error banks.
187 mca_state_size
= sizeof(mca_state_t
) +
188 sizeof(mca_mci_bank_t
) * mca_error_bank_count
;
189 cdp
->cpu_mca_state
= kalloc(mca_state_size
);
190 if (cdp
->cpu_mca_state
== NULL
) {
191 printf("mca_cpu_alloc() failed for cpu %d\n", cdp
->cpu_number
);
194 bzero((void *) cdp
->cpu_mca_state
, mca_state_size
);
197 * If the boot processor is yet have its allocation made,
200 if (cpu_datap(master_cpu
)->cpu_mca_state
== NULL
)
201 mca_cpu_alloc(cpu_datap(master_cpu
));
205 mca_save_state(mca_state_t
*mca_state
)
207 mca_mci_bank_t
*bank
;
210 assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0);
212 if (mca_state
== NULL
)
215 mca_state
->mca_mcg_ctl
= mca_control_MSR_present
?
216 rdmsr64(IA32_MCG_CTL
) : 0ULL;
217 mca_state
->mca_mcg_status
.u64
= rdmsr64(IA32_MCG_STATUS
);
219 bank
= (mca_mci_bank_t
*) &mca_state
->mca_error_bank
[0];
220 for (i
= 0; i
< mca_error_bank_count
; i
++, bank
++) {
221 bank
->mca_mci_ctl
= rdmsr64(IA32_MCi_CTL(i
));
222 bank
->mca_mci_status
.u64
= rdmsr64(IA32_MCi_STATUS(i
));
223 if (!bank
->mca_mci_status
.bits
.val
)
225 bank
->mca_mci_misc
= (bank
->mca_mci_status
.bits
.miscv
)?
226 rdmsr64(IA32_MCi_MISC(i
)) : 0ULL;
227 bank
->mca_mci_addr
= (bank
->mca_mci_status
.bits
.addrv
)?
228 rdmsr64(IA32_MCi_ADDR(i
)) : 0ULL;
229 mca_state
->mca_is_valid
= TRUE
;
233 * If we're the first thread with MCA state, point our package to it
234 * and don't care about races
236 if (x86_package()->mca_state
== NULL
)
237 x86_package()->mca_state
= mca_state
;
239 mca_state
->mca_is_saved
= TRUE
;
245 if (mca_dump_state
> CLEAR
)
246 mca_save_state(current_cpu_datap()->cpu_mca_state
);
249 static void mca_dump_64bit_state(void)
251 kdb_printf("Extended Machine Check State:\n");
252 kdb_printf(" IA32_MCG_RAX: 0x%016qx\n", rdmsr64(IA32_MCG_RAX
));
253 kdb_printf(" IA32_MCG_RBX: 0x%016qx\n", rdmsr64(IA32_MCG_RBX
));
254 kdb_printf(" IA32_MCG_RCX: 0x%016qx\n", rdmsr64(IA32_MCG_RCX
));
255 kdb_printf(" IA32_MCG_RDX: 0x%016qx\n", rdmsr64(IA32_MCG_RDX
));
256 kdb_printf(" IA32_MCG_RSI: 0x%016qx\n", rdmsr64(IA32_MCG_RSI
));
257 kdb_printf(" IA32_MCG_RDI: 0x%016qx\n", rdmsr64(IA32_MCG_RDI
));
258 kdb_printf(" IA32_MCG_RBP: 0x%016qx\n", rdmsr64(IA32_MCG_RBP
));
259 kdb_printf(" IA32_MCG_RSP: 0x%016qx\n", rdmsr64(IA32_MCG_RSP
));
260 kdb_printf(" IA32_MCG_RFLAGS: 0x%016qx\n", rdmsr64(IA32_MCG_RFLAGS
));
261 kdb_printf(" IA32_MCG_RIP: 0x%016qx\n", rdmsr64(IA32_MCG_RIP
));
262 kdb_printf(" IA32_MCG_MISC: 0x%016qx\n", rdmsr64(IA32_MCG_MISC
));
263 kdb_printf(" IA32_MCG_R8: 0x%016qx\n", rdmsr64(IA32_MCG_R8
));
264 kdb_printf(" IA32_MCG_R9: 0x%016qx\n", rdmsr64(IA32_MCG_R9
));
265 kdb_printf(" IA32_MCG_R10: 0x%016qx\n", rdmsr64(IA32_MCG_R10
));
266 kdb_printf(" IA32_MCG_R11: 0x%016qx\n", rdmsr64(IA32_MCG_R11
));
267 kdb_printf(" IA32_MCG_R12: 0x%016qx\n", rdmsr64(IA32_MCG_R12
));
268 kdb_printf(" IA32_MCG_R13: 0x%016qx\n", rdmsr64(IA32_MCG_R13
));
269 kdb_printf(" IA32_MCG_R14: 0x%016qx\n", rdmsr64(IA32_MCG_R14
));
270 kdb_printf(" IA32_MCG_R15: 0x%016qx\n", rdmsr64(IA32_MCG_R15
));
273 static uint32_t rdmsr32(uint32_t msr
)
275 return (uint32_t) rdmsr64(msr
);
278 static void mca_dump_32bit_state(void)
280 kdb_printf("Extended Machine Check State:\n");
281 kdb_printf(" IA32_MCG_EAX: 0x%08x\n", rdmsr32(IA32_MCG_EAX
));
282 kdb_printf(" IA32_MCG_EBX: 0x%08x\n", rdmsr32(IA32_MCG_EBX
));
283 kdb_printf(" IA32_MCG_ECX: 0x%08x\n", rdmsr32(IA32_MCG_ECX
));
284 kdb_printf(" IA32_MCG_EDX: 0x%08x\n", rdmsr32(IA32_MCG_EDX
));
285 kdb_printf(" IA32_MCG_ESI: 0x%08x\n", rdmsr32(IA32_MCG_ESI
));
286 kdb_printf(" IA32_MCG_EDI: 0x%08x\n", rdmsr32(IA32_MCG_EDI
));
287 kdb_printf(" IA32_MCG_EBP: 0x%08x\n", rdmsr32(IA32_MCG_EBP
));
288 kdb_printf(" IA32_MCG_ESP: 0x%08x\n", rdmsr32(IA32_MCG_ESP
));
289 kdb_printf(" IA32_MCG_EFLAGS: 0x%08x\n", rdmsr32(IA32_MCG_EFLAGS
));
290 kdb_printf(" IA32_MCG_EIP: 0x%08x\n", rdmsr32(IA32_MCG_EIP
));
291 kdb_printf(" IA32_MCG_MISC: 0x%08x\n", rdmsr32(IA32_MCG_MISC
));
295 mca_report_cpu_info(void)
297 i386_cpu_info_t
*infop
= cpuid_info();
299 kdb_printf(" family: %d model: %d stepping: %d microcode: %d\n",
302 infop
->cpuid_stepping
,
303 infop
->cpuid_microcode_version
);
304 kdb_printf(" %s\n", infop
->cpuid_brand_string
);
307 static const char *mc8_memory_operation
[] = {
308 [MC8_MMM_GENERIC
] = "generic",
309 [MC8_MMM_READ
] = "read",
310 [MC8_MMM_WRITE
] = "write",
311 [MC8_MMM_ADDRESS_COMMAND
] = "address/command",
312 [MC8_MMM_RESERVED
] = "reserved"
316 mca_dump_bank_mc8(mca_state_t
*state
, int i
)
318 mca_mci_bank_t
*bank
;
319 ia32_mci_status_t status
;
320 struct ia32_mc8_specific mc8
;
323 bank
= &state
->mca_error_bank
[i
];
324 status
= bank
->mca_mci_status
;
325 mc8
= status
.bits_mc8
;
326 mmm
= MIN(mc8
.memory_operation
, MC8_MMM_RESERVED
);
329 " IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
330 i
, IA32_MCi_STATUS(i
), status
.u64
, IF(!status
.bits
.val
, "in"));
331 if (!status
.bits
.val
)
335 " Channel number: %d%s\n"
336 " Memory Operation: %s\n"
337 " Machine-specific error: %s%s%s%s%s%s%s%s%s\n"
338 " COR_ERR_CNT: %d\n",
340 IF(mc8
.channel_number
== 15, " (unknown)"),
341 mc8_memory_operation
[mmm
],
342 IF(mc8
.read_ecc
, "Read ECC "),
343 IF(mc8
.ecc_on_a_scrub
, "ECC on scrub "),
344 IF(mc8
.write_parity
, "Write parity "),
345 IF(mc8
.redundant_memory
, "Redundant memory "),
346 IF(mc8
.sparing
, "Sparing/Resilvering "),
347 IF(mc8
.access_out_of_range
, "Access out of Range "),
348 IF(mc8
.rtid_out_of_range
, "RTID out of Range "),
349 IF(mc8
.address_parity
, "Address Parity "),
350 IF(mc8
.byte_enable_parity
, "Byte Enable Parity "),
353 " Status bits:\n%s%s%s%s%s%s",
354 IF(status
.bits
.pcc
, " Processor context corrupt\n"),
355 IF(status
.bits
.addrv
, " ADDR register valid\n"),
356 IF(status
.bits
.miscv
, " MISC register valid\n"),
357 IF(status
.bits
.en
, " Error enabled\n"),
358 IF(status
.bits
.uc
, " Uncorrected error\n"),
359 IF(status
.bits
.over
, " Error overflow\n"));
360 if (status
.bits
.addrv
)
362 " IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
363 i
, IA32_MCi_ADDR(i
), bank
->mca_mci_addr
);
364 if (status
.bits
.miscv
) {
365 ia32_mc8_misc_t mc8_misc
;
367 mc8_misc
.u64
= bank
->mca_mci_misc
;
369 " IA32_MC%d_MISC(0x%x): 0x%016qx\n"
374 i
, IA32_MCi_MISC(i
), mc8_misc
.u64
,
377 mc8_misc
.bits
.channel
,
378 (int) mc8_misc
.bits
.syndrome
);
382 static const char *mca_threshold_status
[] = {
383 [THRESHOLD_STATUS_NO_TRACKING
] = "No tracking",
384 [THRESHOLD_STATUS_GREEN
] = "Green",
385 [THRESHOLD_STATUS_YELLOW
] = "Yellow",
386 [THRESHOLD_STATUS_RESERVED
] = "Reserved"
390 mca_dump_bank(mca_state_t
*state
, int i
)
392 mca_mci_bank_t
*bank
;
393 ia32_mci_status_t status
;
395 bank
= &state
->mca_error_bank
[i
];
396 status
= bank
->mca_mci_status
;
398 " IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
399 i
, IA32_MCi_STATUS(i
), status
.u64
, IF(!status
.bits
.val
, "in"));
400 if (!status
.bits
.val
)
404 " MCA error code: 0x%04x\n",
405 status
.bits
.mca_error
);
407 " Model specific error code: 0x%04x\n",
408 status
.bits
.model_specific_error
);
409 if (!mca_threshold_status_present
) {
411 " Other information: 0x%08x\n",
412 status
.bits
.other_information
);
414 int threshold
= status
.bits_tes_p
.threshold
;
416 " Other information: 0x%08x\n"
417 " Threshold-based status: %s\n",
418 status
.bits_tes_p
.other_information
,
419 (status
.bits_tes_p
.uc
== 0) ?
420 mca_threshold_status
[threshold
] :
423 if (mca_threshold_status_present
&&
424 mca_sw_error_recovery_present
) {
426 " Software Error Recovery:\n%s%s",
427 IF(status
.bits_tes_p
.ar
, " Recovery action reqd\n"),
428 IF(status
.bits_tes_p
.s
, " Signaling UCR error\n"));
431 " Status bits:\n%s%s%s%s%s%s",
432 IF(status
.bits
.pcc
, " Processor context corrupt\n"),
433 IF(status
.bits
.addrv
, " ADDR register valid\n"),
434 IF(status
.bits
.miscv
, " MISC register valid\n"),
435 IF(status
.bits
.en
, " Error enabled\n"),
436 IF(status
.bits
.uc
, " Uncorrected error\n"),
437 IF(status
.bits
.over
, " Error overflow\n"));
438 if (status
.bits
.addrv
)
440 " IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
441 i
, IA32_MCi_ADDR(i
), bank
->mca_mci_addr
);
442 if (status
.bits
.miscv
)
444 " IA32_MC%d_MISC(0x%x): 0x%016qx\n",
445 i
, IA32_MCi_MISC(i
), bank
->mca_mci_misc
);
449 mca_cpu_dump_error_banks(mca_state_t
*state
)
453 if (!state
->mca_is_valid
)
456 kdb_printf("MCA error-reporting registers:\n");
457 for (i
= 0; i
< mca_error_bank_count
; i
++ ) {
458 if (i
== 8 && state
== x86_package()->mca_state
) {
463 /* Dump MC8 for this package */
464 kdb_printf(" Package %d logged:\n",
465 x86_package()->ppkg_num
);
466 mca_dump_bank_mc8(state
, 8);
469 mca_dump_bank(state
, i
);
476 mca_state_t
*mca_state
= current_cpu_datap()->cpu_mca_state
;
481 * Capture local MCA registers to per-cpu data.
483 mca_save_state(mca_state
);
486 * Serialize: the first caller controls dumping MCA registers,
487 * other threads spin meantime.
489 simple_lock(&mca_lock
);
490 if (mca_dump_state
> CLEAR
) {
491 simple_unlock(&mca_lock
);
492 while (mca_dump_state
== DUMPING
)
496 mca_dump_state
= DUMPING
;
497 simple_unlock(&mca_lock
);
500 * Wait for all other hardware threads to save their state.
503 deadline
= mach_absolute_time() + LockTimeOut
;
504 while (mach_absolute_time() < deadline
&& i
< real_ncpus
) {
505 if (!cpu_datap(i
)->cpu_mca_state
->mca_is_saved
) {
513 * Report machine-check capabilities:
516 "Machine-check capabilities 0x%016qx:\n", ia32_mcg_cap
.u64
);
518 mca_report_cpu_info();
521 " %d error-reporting banks\n%s%s%s", mca_error_bank_count
,
522 IF(mca_control_MSR_present
,
523 " control MSR present\n"),
524 IF(mca_threshold_status_present
,
525 " threshold-based error status present\n"),
527 " extended corrected memory error handling present\n"));
528 if (mca_extended_MSRs_present
)
530 " %d extended MSRs present\n", mca_extended_MSRs_count
);
533 * Dump all processor state:
535 for (i
= 0; i
< real_ncpus
; i
++) {
536 mca_state_t
*mcsp
= cpu_datap(i
)->cpu_mca_state
;
537 ia32_mcg_status_t status
;
539 kdb_printf("Processor %d: ", i
);
541 mcsp
->mca_is_saved
== FALSE
||
542 mcsp
->mca_mcg_status
.u64
== 0) {
543 kdb_printf("no machine-check status reported\n");
546 if (!mcsp
->mca_is_valid
) {
547 kdb_printf("no valid machine-check state\n");
550 status
= mcsp
->mca_mcg_status
;
552 "machine-check status 0x%016qx:\n%s%s%s", status
.u64
,
553 IF(status
.bits
.ripv
, " restart IP valid\n"),
554 IF(status
.bits
.eipv
, " error IP valid\n"),
555 IF(status
.bits
.mcip
, " machine-check in progress\n"));
557 mca_cpu_dump_error_banks(mcsp
);
561 * Dump any extended machine state:
563 if (mca_extended_MSRs_present
) {
564 if (cpu_mode_is64bit())
565 mca_dump_64bit_state();
567 mca_dump_32bit_state();
570 /* Update state to release any other threads. */
571 mca_dump_state
= DUMPED
;
575 extern void mca_exception_panic(void);
576 extern void mtrr_lapic_cached(void);
577 void mca_exception_panic(void)
582 kprintf("mca_exception_panic() requires DEBUG build\n");