]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/machine_check.c
xnu-1699.22.81.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_check.c
CommitLineData
0c530ab8
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
2d21ac55 29#include <kern/kalloc.h>
593a1d5f 30#include <mach/mach_time.h>
0c530ab8
A
31#include <i386/cpu_data.h>
32#include <i386/cpuid.h>
593a1d5f
A
33#include <i386/cpu_topology.h>
34#include <i386/cpu_threads.h>
35#include <i386/machine_cpu.h>
0c530ab8
A
36#include <i386/machine_check.h>
37#include <i386/proc_reg.h>
38
39#define IF(bool,str) ((bool) ? (str) : "")
40
41static boolean_t mca_initialized = FALSE;
42static boolean_t mca_MCE_present = FALSE;
43static boolean_t mca_MCA_present = FALSE;
2d21ac55 44static uint32_t mca_family = 0;
0c530ab8
A
45static unsigned int mca_error_bank_count = 0;
46static boolean_t mca_control_MSR_present = FALSE;
47static boolean_t mca_threshold_status_present = FALSE;
6d2010ae 48static boolean_t mca_sw_error_recovery_present = FALSE;
0c530ab8
A
49static boolean_t mca_extended_MSRs_present = FALSE;
50static unsigned int mca_extended_MSRs_count = 0;
c910b4d9 51static boolean_t mca_cmci_present = FALSE;
0c530ab8 52static ia32_mcg_cap_t ia32_mcg_cap;
2d21ac55
A
53decl_simple_lock_data(static, mca_lock);
54
55typedef struct {
56 ia32_mci_ctl_t mca_mci_ctl;
57 ia32_mci_status_t mca_mci_status;
58 ia32_mci_misc_t mca_mci_misc;
59 ia32_mci_addr_t mca_mci_addr;
60} mca_mci_bank_t;
61
62typedef struct mca_state {
63 ia32_mcg_ctl_t mca_mcg_ctl;
64 ia32_mcg_status_t mca_mcg_status;
65 mca_mci_bank_t mca_error_bank[0];
66} mca_state_t;
0c530ab8 67
593a1d5f
A
68typedef enum {
69 CLEAR,
70 DUMPING,
71 DUMPED
72} mca_dump_state_t;
73static volatile mca_dump_state_t mca_dump_state = CLEAR;
74
0c530ab8
A
75static void
76mca_get_availability(void)
77{
78 uint64_t features = cpuid_info()->cpuid_features;
79 uint32_t family = cpuid_info()->cpuid_family;
80
81 mca_MCE_present = (features & CPUID_FEATURE_MCE) != 0;
82 mca_MCA_present = (features & CPUID_FEATURE_MCA) != 0;
2d21ac55 83 mca_family = family;
0c530ab8
A
84
85 /*
86 * If MCA, the number of banks etc is reported by the IA32_MCG_CAP MSR.
87 */
88 if (mca_MCA_present) {
89 ia32_mcg_cap.u64 = rdmsr64(IA32_MCG_CAP);
90 mca_error_bank_count = ia32_mcg_cap.bits.count;
91 mca_control_MSR_present = ia32_mcg_cap.bits.mcg_ctl_p;
92 mca_threshold_status_present = ia32_mcg_cap.bits.mcg_tes_p;
6d2010ae 93 mca_sw_error_recovery_present = ia32_mcg_cap.bits.mcg_ser_p;
c910b4d9 94 mca_cmci_present = ia32_mcg_cap.bits.mcg_ext_corr_err_p;
0c530ab8
A
95 if (family == 0x0F) {
96 mca_extended_MSRs_present = ia32_mcg_cap.bits.mcg_ext_p;
97 mca_extended_MSRs_count = ia32_mcg_cap.bits.mcg_ext_cnt;
98 }
99 }
100}
101
102void
103mca_cpu_init(void)
104{
105 unsigned int i;
106
107 /*
108 * The first (boot) processor is responsible for discovering the
109 * machine check architecture present on this machine.
110 */
111 if (!mca_initialized) {
112 mca_get_availability();
113 mca_initialized = TRUE;
2d21ac55 114 simple_lock_init(&mca_lock, 0);
0c530ab8
A
115 }
116
117 if (mca_MCA_present) {
118
119 /* Enable all MCA features */
120 if (mca_control_MSR_present)
121 wrmsr64(IA32_MCG_CTL, IA32_MCG_CTL_ENABLE);
122
2d21ac55 123 switch (mca_family) {
0c530ab8
A
124 case 0x06:
125 /* Enable all but mc0 */
126 for (i = 1; i < mca_error_bank_count; i++)
127 wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL);
128
129 /* Clear all errors */
130 for (i = 0; i < mca_error_bank_count; i++)
131 wrmsr64(IA32_MCi_STATUS(i), 0ULL);
132 break;
133 case 0x0F:
134 /* Enable all banks */
135 for (i = 0; i < mca_error_bank_count; i++)
136 wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL);
137
138 /* Clear all errors */
139 for (i = 0; i < mca_error_bank_count; i++)
140 wrmsr64(IA32_MCi_STATUS(i), 0ULL);
141 break;
142 }
143 }
144
145 /* Enable machine check exception handling if available */
146 if (mca_MCE_present) {
147 set_cr4(get_cr4()|CR4_MCE);
148 }
149}
150
c910b4d9
A
151boolean_t
152mca_is_cmci_present(void)
153{
154 if (!mca_initialized)
155 mca_cpu_init();
156 return mca_cmci_present;
157}
158
2d21ac55
A
159void
160mca_cpu_alloc(cpu_data_t *cdp)
161{
162 vm_size_t mca_state_size;
163
164 /*
165 * Allocate space for an array of error banks.
166 */
167 mca_state_size = sizeof(mca_state_t) +
168 sizeof(mca_mci_bank_t) * mca_error_bank_count;
169 cdp->cpu_mca_state = kalloc(mca_state_size);
170 if (cdp->cpu_mca_state == NULL) {
171 printf("mca_cpu_alloc() failed for cpu %d\n", cdp->cpu_number);
172 return;
173 }
174 bzero((void *) cdp->cpu_mca_state, mca_state_size);
175
176 /*
177 * If the boot processor is yet have its allocation made,
178 * do this now.
179 */
180 if (cpu_datap(master_cpu)->cpu_mca_state == NULL)
181 mca_cpu_alloc(cpu_datap(master_cpu));
182}
183
184static void
593a1d5f 185mca_save_state(mca_state_t *mca_state)
2d21ac55 186{
2d21ac55
A
187 mca_mci_bank_t *bank;
188 unsigned int i;
189
190 assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0);
191
2d21ac55
A
192 if (mca_state == NULL)
193 return;
194
195 mca_state->mca_mcg_ctl = mca_control_MSR_present ?
196 rdmsr64(IA32_MCG_CTL) : 0ULL;
197 mca_state->mca_mcg_status.u64 = rdmsr64(IA32_MCG_STATUS);
198
199 bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0];
200 for (i = 0; i < mca_error_bank_count; i++, bank++) {
201 bank->mca_mci_ctl = rdmsr64(IA32_MCi_CTL(i));
202 bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i));
203 if (!bank->mca_mci_status.bits.val)
204 continue;
205 bank->mca_mci_misc = (bank->mca_mci_status.bits.miscv)?
206 rdmsr64(IA32_MCi_MISC(i)) : 0ULL;
207 bank->mca_mci_addr = (bank->mca_mci_status.bits.addrv)?
208 rdmsr64(IA32_MCi_ADDR(i)) : 0ULL;
209 }
c910b4d9
A
210
211 /*
212 * If we're the first thread with MCA state, point our package to it
213 * and don't care about races
214 */
215 if (x86_package()->mca_state == NULL)
216 x86_package()->mca_state = mca_state;
2d21ac55
A
217}
218
219void
220mca_check_save(void)
221{
593a1d5f
A
222 if (mca_dump_state > CLEAR)
223 mca_save_state(current_cpu_datap()->cpu_mca_state);
2d21ac55
A
224}
225
0c530ab8
A
226static void mca_dump_64bit_state(void)
227{
228 kdb_printf("Extended Machine Check State:\n");
229 kdb_printf(" IA32_MCG_RAX: 0x%016qx\n", rdmsr64(IA32_MCG_RAX));
230 kdb_printf(" IA32_MCG_RBX: 0x%016qx\n", rdmsr64(IA32_MCG_RBX));
231 kdb_printf(" IA32_MCG_RCX: 0x%016qx\n", rdmsr64(IA32_MCG_RCX));
232 kdb_printf(" IA32_MCG_RDX: 0x%016qx\n", rdmsr64(IA32_MCG_RDX));
233 kdb_printf(" IA32_MCG_RSI: 0x%016qx\n", rdmsr64(IA32_MCG_RSI));
234 kdb_printf(" IA32_MCG_RDI: 0x%016qx\n", rdmsr64(IA32_MCG_RDI));
235 kdb_printf(" IA32_MCG_RBP: 0x%016qx\n", rdmsr64(IA32_MCG_RBP));
236 kdb_printf(" IA32_MCG_RSP: 0x%016qx\n", rdmsr64(IA32_MCG_RSP));
237 kdb_printf(" IA32_MCG_RFLAGS: 0x%016qx\n", rdmsr64(IA32_MCG_RFLAGS));
238 kdb_printf(" IA32_MCG_RIP: 0x%016qx\n", rdmsr64(IA32_MCG_RIP));
239 kdb_printf(" IA32_MCG_MISC: 0x%016qx\n", rdmsr64(IA32_MCG_MISC));
240 kdb_printf(" IA32_MCG_R8: 0x%016qx\n", rdmsr64(IA32_MCG_R8));
241 kdb_printf(" IA32_MCG_R9: 0x%016qx\n", rdmsr64(IA32_MCG_R9));
242 kdb_printf(" IA32_MCG_R10: 0x%016qx\n", rdmsr64(IA32_MCG_R10));
243 kdb_printf(" IA32_MCG_R11: 0x%016qx\n", rdmsr64(IA32_MCG_R11));
244 kdb_printf(" IA32_MCG_R12: 0x%016qx\n", rdmsr64(IA32_MCG_R12));
245 kdb_printf(" IA32_MCG_R13: 0x%016qx\n", rdmsr64(IA32_MCG_R13));
246 kdb_printf(" IA32_MCG_R14: 0x%016qx\n", rdmsr64(IA32_MCG_R14));
247 kdb_printf(" IA32_MCG_R15: 0x%016qx\n", rdmsr64(IA32_MCG_R15));
248}
249
250static uint32_t rdmsr32(uint32_t msr)
251{
252 return (uint32_t) rdmsr64(msr);
253}
254
255static void mca_dump_32bit_state(void)
256{
257 kdb_printf("Extended Machine Check State:\n");
258 kdb_printf(" IA32_MCG_EAX: 0x%08x\n", rdmsr32(IA32_MCG_EAX));
259 kdb_printf(" IA32_MCG_EBX: 0x%08x\n", rdmsr32(IA32_MCG_EBX));
260 kdb_printf(" IA32_MCG_ECX: 0x%08x\n", rdmsr32(IA32_MCG_ECX));
261 kdb_printf(" IA32_MCG_EDX: 0x%08x\n", rdmsr32(IA32_MCG_EDX));
262 kdb_printf(" IA32_MCG_ESI: 0x%08x\n", rdmsr32(IA32_MCG_ESI));
263 kdb_printf(" IA32_MCG_EDI: 0x%08x\n", rdmsr32(IA32_MCG_EDI));
264 kdb_printf(" IA32_MCG_EBP: 0x%08x\n", rdmsr32(IA32_MCG_EBP));
265 kdb_printf(" IA32_MCG_ESP: 0x%08x\n", rdmsr32(IA32_MCG_ESP));
266 kdb_printf(" IA32_MCG_EFLAGS: 0x%08x\n", rdmsr32(IA32_MCG_EFLAGS));
267 kdb_printf(" IA32_MCG_EIP: 0x%08x\n", rdmsr32(IA32_MCG_EIP));
268 kdb_printf(" IA32_MCG_MISC: 0x%08x\n", rdmsr32(IA32_MCG_MISC));
269}
270
cf7d32b8
A
271static void
272mca_report_cpu_info(void)
273{
cf7d32b8
A
274 i386_cpu_info_t *infop = cpuid_info();
275
593a1d5f 276 kdb_printf(" family: %d model: %d stepping: %d microcode: %d\n",
cf7d32b8
A
277 infop->cpuid_family,
278 infop->cpuid_model,
279 infop->cpuid_stepping,
6d2010ae 280 infop->cpuid_microcode_version);
593a1d5f 281 kdb_printf(" %s\n", infop->cpuid_brand_string);
cf7d32b8
A
282}
283
c910b4d9 284static const char *mc8_memory_operation[] = {
6d2010ae
A
285 [MC8_MMM_GENERIC] = "generic",
286 [MC8_MMM_READ] = "read",
287 [MC8_MMM_WRITE] = "write",
288 [MC8_MMM_ADDRESS_COMMAND] = "address/command",
289 [MC8_MMM_RESERVED] = "reserved"
c910b4d9
A
290};
291
292static void
293mca_dump_bank_mc8(mca_state_t *state, int i)
294{
295 mca_mci_bank_t *bank;
296 ia32_mci_status_t status;
297 struct ia32_mc8_specific mc8;
298 int mmm;
299
300 bank = &state->mca_error_bank[i];
301 status = bank->mca_mci_status;
302 mc8 = status.bits_mc8;
303 mmm = MIN(mc8.memory_operation, MC8_MMM_RESERVED);
304
305 kdb_printf(
306 " IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
307 i, IA32_MCi_STATUS(i), status.u64, IF(!status.bits.val, "in"));
308 if (!status.bits.val)
309 return;
310
311 kdb_printf(
312 " Channel number: %d%s\n"
313 " Memory Operation: %s\n"
6d2010ae 314 " Machine-specific error: %s%s%s%s%s%s%s%s%s\n"
c910b4d9
A
315 " COR_ERR_CNT: %d\n",
316 mc8.channel_number,
317 IF(mc8.channel_number == 15, " (unknown)"),
318 mc8_memory_operation[mmm],
6d2010ae
A
319 IF(mc8.read_ecc, "Read ECC "),
320 IF(mc8.ecc_on_a_scrub, "ECC on scrub "),
321 IF(mc8.write_parity, "Write parity "),
322 IF(mc8.redundant_memory, "Redundant memory "),
323 IF(mc8.sparing, "Sparing/Resilvering "),
324 IF(mc8.access_out_of_range, "Access out of Range "),
325 IF(mc8.rtid_out_of_range, "RTID out of Range "),
326 IF(mc8.address_parity, "Address Parity "),
327 IF(mc8.byte_enable_parity, "Byte Enable Parity "),
c910b4d9
A
328 mc8.cor_err_cnt);
329 kdb_printf(
330 " Status bits:\n%s%s%s%s%s%s",
331 IF(status.bits.pcc, " Processor context corrupt\n"),
332 IF(status.bits.addrv, " ADDR register valid\n"),
333 IF(status.bits.miscv, " MISC register valid\n"),
334 IF(status.bits.en, " Error enabled\n"),
335 IF(status.bits.uc, " Uncorrected error\n"),
336 IF(status.bits.over, " Error overflow\n"));
337 if (status.bits.addrv)
338 kdb_printf(
339 " IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
340 i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
341 if (status.bits.miscv) {
342 ia32_mc8_misc_t mc8_misc;
343
344 mc8_misc.u64 = bank->mca_mci_misc;
345 kdb_printf(
346 " IA32_MC%d_MISC(0x%x): 0x%016qx\n"
6d2010ae 347 " RTID: %d\n"
c910b4d9
A
348 " DIMM: %d\n"
349 " Channel: %d\n"
350 " Syndrome: 0x%x\n",
351 i, IA32_MCi_MISC(i), mc8_misc.u64,
6d2010ae 352 mc8_misc.bits.rtid,
c910b4d9
A
353 mc8_misc.bits.dimm,
354 mc8_misc.bits.channel,
355 (int) mc8_misc.bits.syndrome);
356 }
357}
358
0c530ab8 359static const char *mca_threshold_status[] = {
6d2010ae
A
360 [THRESHOLD_STATUS_NO_TRACKING] = "No tracking",
361 [THRESHOLD_STATUS_GREEN] = "Green",
362 [THRESHOLD_STATUS_YELLOW] = "Yellow",
363 [THRESHOLD_STATUS_RESERVED] = "Reserved"
0c530ab8
A
364};
365
366static void
593a1d5f 367mca_dump_bank(mca_state_t *state, int i)
0c530ab8 368{
593a1d5f 369 mca_mci_bank_t *bank;
0c530ab8
A
370 ia32_mci_status_t status;
371
593a1d5f
A
372 bank = &state->mca_error_bank[i];
373 status = bank->mca_mci_status;
374 kdb_printf(
375 " IA32_MC%d_STATUS(0x%x): 0x%016qx %svalid\n",
376 i, IA32_MCi_STATUS(i), status.u64, IF(!status.bits.val, "in"));
377 if (!status.bits.val)
378 return;
379
380 kdb_printf(
381 " MCA error code: 0x%04x\n",
382 status.bits.mca_error);
383 kdb_printf(
384 " Model specific error code: 0x%04x\n",
385 status.bits.model_specific_error);
386 if (!mca_threshold_status_present) {
0c530ab8 387 kdb_printf(
593a1d5f
A
388 " Other information: 0x%08x\n",
389 status.bits.other_information);
390 } else {
391 int threshold = status.bits_tes_p.threshold;
0c530ab8 392 kdb_printf(
593a1d5f
A
393 " Other information: 0x%08x\n"
394 " Threshold-based status: %s\n",
395 status.bits_tes_p.other_information,
396 (status.bits_tes_p.uc == 0) ?
397 mca_threshold_status[threshold] :
398 "Undefined");
399 }
6d2010ae
A
400 if (mca_threshold_status_present &&
401 mca_sw_error_recovery_present) {
402 kdb_printf(
403 " Software Error Recovery:\n%s%s",
404 IF(status.bits_tes_p.ar, " Recovery action reqd\n"),
405 IF(status.bits_tes_p.s, " Signaling UCR error\n"));
406 }
593a1d5f
A
407 kdb_printf(
408 " Status bits:\n%s%s%s%s%s%s",
409 IF(status.bits.pcc, " Processor context corrupt\n"),
410 IF(status.bits.addrv, " ADDR register valid\n"),
411 IF(status.bits.miscv, " MISC register valid\n"),
412 IF(status.bits.en, " Error enabled\n"),
413 IF(status.bits.uc, " Uncorrected error\n"),
414 IF(status.bits.over, " Error overflow\n"));
415 if (status.bits.addrv)
0c530ab8 416 kdb_printf(
593a1d5f
A
417 " IA32_MC%d_ADDR(0x%x): 0x%016qx\n",
418 i, IA32_MCi_ADDR(i), bank->mca_mci_addr);
419 if (status.bits.miscv)
0c530ab8 420 kdb_printf(
593a1d5f
A
421 " IA32_MC%d_MISC(0x%x): 0x%016qx\n",
422 i, IA32_MCi_MISC(i), bank->mca_mci_misc);
423}
424
425static void
426mca_dump_error_banks(mca_state_t *state)
427{
428 unsigned int i;
429
430 kdb_printf("MCA error-reporting registers:\n");
431 for (i = 0; i < mca_error_bank_count; i++ ) {
c910b4d9
A
432 if (i == 8) {
433 /*
434 * Fatal Memory Error
435 */
436
437 /* Dump MC8 for local package */
438 kdb_printf(" Package %d logged:\n",
439 x86_package()->ppkg_num);
440 mca_dump_bank_mc8(state, 8);
441
442 /* If there's other packages, report their MC8s */
443 x86_pkg_t *pkg;
444 uint64_t deadline;
445 for (pkg = x86_pkgs; pkg != NULL; pkg = pkg->next) {
446 if (pkg == x86_package())
447 continue;
448 deadline = mach_absolute_time() + LockTimeOut;
449 while (pkg->mca_state == NULL &&
450 mach_absolute_time() < deadline)
451 cpu_pause();
452 if (pkg->mca_state) {
453 kdb_printf(" Package %d logged:\n",
454 pkg->ppkg_num);
455 mca_dump_bank_mc8(pkg->mca_state, 8);
456 } else {
457 kdb_printf(" Package %d timed out!\n",
458 pkg->ppkg_num);
459 }
460 }
461 continue;
462 }
593a1d5f 463 mca_dump_bank(state, i);
0c530ab8
A
464 }
465}
466
467void
468mca_dump(void)
469{
470 ia32_mcg_status_t status;
593a1d5f 471 mca_state_t *mca_state = current_cpu_datap()->cpu_mca_state;
0c530ab8 472
593a1d5f
A
473 /*
474 * Capture local MCA registers to per-cpu data.
475 */
476 mca_save_state(mca_state);
2d21ac55 477
4a3eedf9
A
478 /*
479 * Serialize in case of multiple simultaneous machine-checks.
593a1d5f
A
480 * Only the first caller is allowed to dump MCA registers,
481 * other threads spin meantime.
4a3eedf9 482 */
2d21ac55 483 simple_lock(&mca_lock);
593a1d5f 484 if (mca_dump_state > CLEAR) {
4a3eedf9 485 simple_unlock(&mca_lock);
593a1d5f
A
486 while (mca_dump_state == DUMPING)
487 cpu_pause();
4a3eedf9
A
488 return;
489 }
593a1d5f
A
490 mca_dump_state = DUMPING;
491 simple_unlock(&mca_lock);
2d21ac55 492
0c530ab8
A
493 /*
494 * Report machine-check capabilities:
495 */
496 kdb_printf(
2d21ac55
A
497 "Machine-check capabilities (cpu %d) 0x%016qx:\n",
498 cpu_number(), ia32_mcg_cap.u64);
cf7d32b8
A
499
500 mca_report_cpu_info();
501
0c530ab8 502 kdb_printf(
593a1d5f 503 " %d error-reporting banks\n%s%s%s", mca_error_bank_count,
0c530ab8
A
504 IF(mca_control_MSR_present,
505 " control MSR present\n"),
506 IF(mca_threshold_status_present,
593a1d5f 507 " threshold-based error status present\n"),
c910b4d9
A
508 IF(mca_cmci_present,
509 " extended corrected memory error handling present\n"));
0c530ab8
A
510 if (mca_extended_MSRs_present)
511 kdb_printf(
512 " %d extended MSRs present\n", mca_extended_MSRs_count);
513
514 /*
515 * Report machine-check status:
516 */
517 status.u64 = rdmsr64(IA32_MCG_STATUS);
518 kdb_printf(
593a1d5f 519 "Machine-check status 0x%016qx:\n%s%s%s", status.u64,
0c530ab8
A
520 IF(status.bits.ripv, " restart IP valid\n"),
521 IF(status.bits.eipv, " error IP valid\n"),
522 IF(status.bits.mcip, " machine-check in progress\n"));
523
524 /*
525 * Dump error-reporting registers:
526 */
593a1d5f 527 mca_dump_error_banks(mca_state);
0c530ab8
A
528
529 /*
530 * Dump any extended machine state:
531 */
532 if (mca_extended_MSRs_present) {
533 if (cpu_mode_is64bit())
534 mca_dump_64bit_state();
535 else
536 mca_dump_32bit_state();
537 }
2d21ac55 538
593a1d5f
A
539 /* Update state to release any other threads. */
540 mca_dump_state = DUMPED;
0c530ab8 541}