/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <i386/machine_routines.h>
#include <i386/pmCPU.h>
#include <i386/postcode.h>
+#include <i386/trap.h>
+#include <i386/mp.h> /* mp_rendezvous_break_lock */
#include <architecture/i386/pio.h> /* inb() */
#include <pexpert/i386/boot.h>
#if MACH_KDB
extern int default_preemption_rate;
extern int max_unsafe_quanta;
extern int max_poll_quanta;
-extern int idlehalt;
extern unsigned int panic_is_inited;
int db_run_mode;
-static int packAsc (uint8_t *inbuf, unsigned int length);
-extern int kdb_printf(const char *fmt, ...);
-
volatile int pbtcpu = -1;
hw_lock_data_t pbtlock; /* backtrace print lock */
uint32_t pbtcnt = 0;
-extern const char version[];
-
typedef struct _cframe_t {
struct _cframe_t *prev;
unsigned caller;
unsigned args[0];
} cframe_t;
-void panic_i386_backtrace(void *_frame, int nframes);
-
-static unsigned panic_io_port = 0;
+static unsigned panic_io_port;
+static unsigned commit_paniclog_to_nvram;
void
-machine_startup()
+machine_startup(void)
{
int boot_arg;
if (PE_parse_boot_arg("debug", &boot_arg)) {
if (boot_arg & DB_HALT) halt_in_debugger=1;
- if (boot_arg & DB_PRT) disableDebugOuput=FALSE;
+ if (boot_arg & DB_PRT) disable_debug_output=FALSE;
if (boot_arg & DB_SLOG) systemLogDiags=TRUE;
if (boot_arg & DB_NMI) panicDebugging=TRUE;
if (boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
}
+ if (!PE_parse_boot_arg("nvram_paniclog", &commit_paniclog_to_nvram))
+ commit_paniclog_to_nvram = 1;
+
+ /*
+ * Entering the debugger will put the CPUs into a "safe"
+ * power mode.
+ */
+ if (PE_parse_boot_arg("pmsafe_debug", &boot_arg))
+ pmsafe_debug = boot_arg;
+
#if NOTYET
hw_lock_init(&debugger_lock); /* initialize debugger lock */
#endif
panic_io_port = boot_arg & 0xffff;
}
-/*
- * fn is used to force napping.
- * fn=0 means no napping allowed
- * fn=1 means forces napping on, normal C2 and C4 transitions
- * fn=2 means forces napping on, but C4 is disabled
- * fn=3 means forces napping on, but use halt
- * fn=4 means forces napping on and will always use C4
- *
- * Note that this will take effect only when the system normally starts napping.
- *
- */
-
- if (!PE_parse_boot_arg("fn", &forcenap)) forcenap = 0; /* If force nap not set, make 0 */
- else {
- if(forcenap < 5) forcenap = forcenap + 1; /* See comments above for decode, this is set to fn + 1 */
- else forcenap = 0; /* Clear for error case */
- }
- machine_nap_policy(); /* Make sure the nap policy reflects the user's choice */
-
machine_conf();
#if NOTYET
uint32_t hdr_cksum;
uint32_t cksum;
- kprintf("Processing 64-bit EFI tables at 0x%x\n", (unsigned int)system_table);
+ kprintf("Processing 64-bit EFI tables at %p\n", system_table);
do {
if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
kprintf("Bad EFI system table signature\n");
gPEEFISystemTable = system_table;
kprintf("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
- runtime = (EFI_RUNTIME_SERVICES_64 *) (uint32_t)system_table->RuntimeServices; // XXX
- kprintf("Checking runtime services table 0x%x\n", runtime);
+ runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices; // XXX
+ kprintf("Checking runtime services table %p\n", runtime);
if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
kprintf("Bad EFI runtime table signature\n");
break;
uint32_t hdr_cksum;
uint32_t cksum;
- kprintf("Processing 32-bit EFI tables at 0x%x\n", (unsigned int)system_table);
+ kprintf("Processing 32-bit EFI tables at %p\n", system_table);
do {
if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
kprintf("Bad EFI system table signature\n");
gPEEFISystemTable = system_table;
+ kprintf("RuntimeServices table at %p\n", system_table->RuntimeServices);
runtime = (EFI_RUNTIME_SERVICES *) system_table->RuntimeServices;
if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
kprintf("Bad EFI runtime table signature\n");
do
{
- vm_offset_t vm_size, vm_addr;
+ vm_offset_t vm_size, vm_addr;
vm_map_offset_t phys_addr;
EfiMemoryRange *mptr;
unsigned int msize, mcount;
return;
do
{
- vm_offset_t vm_size, vm_addr;
+ vm_offset_t vm_size, vm_addr;
vm_map_offset_t phys_addr;
EfiMemoryRange *mptr;
unsigned int msize, mcount;
system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart);
- kprintf("Old system table %p, new %p\n",
- args->efiSystemTable, (void *) system_table_offset);
+ kprintf("Old system table 0x%x, new 0x%x\n",
+ (uint32_t)args->efiSystemTable, system_table_offset);
- args->efiSystemTable = (uint32_t) system_table_offset;
+ args->efiSystemTable = system_table_offset;
kprintf("Old map:\n");
msize = args->MemoryMapDescriptorSize;
vm_addr = (vm_offset_t) mptr->VirtualStart;
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
- kprintf("mapping[%d] %qx @ %x, %x\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages);
+ kprintf("mapping[%u] %qx @ %x, %llu\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages);
}
}
vm_addr = (vm_offset_t) mptr->VirtualStart;
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
- kprintf("mapping[%d] %qx @ %x, %x\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages);
+ kprintf("mapping[%u] %qx @ %x, %llu\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages);
pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
(mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
while(1);
}
+
/* Issue an I/O port read if one has been requested - this is an event logic
* analyzers can use as a trigger point.
*/
/* For use with the MP rendezvous mechanism
*/
+#if !CONFIG_EMBEDDED
static void
machine_halt_cpu(__unused void *arg) {
panic_io_port_read();
- __asm__ volatile("hlt");
+ pmCPUHalt(PM_HALT_DEBUG);
}
+#endif
void
Debugger(
__asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
/* Print backtrace - callee is internally synchronized */
- panic_i386_backtrace(stackptr, 16);
+ panic_i386_backtrace(stackptr, 16, NULL, FALSE, NULL);
/* everything should be printed now so copy to NVRAM
*/
if( debug_buf_size > 0) {
- /* Do not compress the panic log
- * or save to NVRAM unless kernel debugging
- * is disabled. The NVRAM shim doesn't
- * sync to the store until haltRestart is called.
- */
- if (!panicDebugging) {
+ /* Optionally sync the panic log, if any, to NVRAM
+ * This is the default.
+ */
+ if (commit_paniclog_to_nvram) {
unsigned int bufpos;
-
+ uintptr_t cr0;
+
debug_putc(0);
/* Now call the compressor */
/* XXX Consider using the WKdm compressor in the
* future, rather than just packing - would need to
* be co-ordinated with crashreporter, which decodes
- * this post-restart.
+ * this post-restart. The compressor should be
+ * capable of in-place compression.
*/
- bufpos = packAsc ((uint8_t *)debug_buf,
- (unsigned int) (debug_buf_ptr - debug_buf) );
+ bufpos = packA(debug_buf,
+ (unsigned int) (debug_buf_ptr - debug_buf), debug_buf_size);
/* If compression was successful,
* use the compressed length
*/
- if (bufpos) {
- debug_buf_ptr = debug_buf + bufpos;
- }
+ pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf);
+
/* Save panic log to non-volatile store
* Panic info handler must truncate data that is
* too long for this platform.
* This call must save data synchronously,
* since we can subsequently halt the system.
*/
- pi_size = debug_buf_ptr - debug_buf;
+ kprintf("Attempting to commit panic log to NVRAM\n");
+/* The following sequence is a workaround for:
+ * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
+ * any routines that use floating point (MMX in this case) when saving panic
+ * logs to nvram/flash.
+ */
+ cr0 = get_cr0();
+ clear_ts();
+
pi_size = PESavePanicInfo((unsigned char *)debug_buf,
pi_size );
+ set_cr0(cr0);
+
+ /* Uncompress in-place, to permit examination of
+ * the panic log by debuggers.
+ */
+
+ if (bufpos) {
+ unpackA(debug_buf, bufpos);
+ }
}
}
draw_panic_dialog();
* that a panic occurred while in that codepath.
*/
mp_rendezvous_break_lock();
+#if CONFIG_EMBEDDED
+ PEHaltRestart(kPEPanicRestartCPU);
+#else
/* Force all CPUs to disable interrupts and HLT.
* We've panicked, and shouldn't depend on the
* PEHaltRestart() mechanism, which relies on several
* bits of infrastructure.
*/
mp_rendezvous_no_intrs(machine_halt_cpu, NULL);
+#endif
/* NOT REACHED */
}
}
typedef struct pasc pasc_t;
-static int packAsc (unsigned char *inbuf, unsigned int length)
-{
- unsigned int i, j = 0;
- unsigned int extra;
- pasc_t pack;
-
- for (i = 0; i < length; i+=8)
- {
- pack.a = inbuf[i];
- pack.b = inbuf[i+1];
- pack.c = inbuf[i+2];
- pack.d = inbuf[i+3];
- pack.e = inbuf[i+4];
- pack.f = inbuf[i+5];
- pack.g = inbuf[i+6];
- pack.h = inbuf[i+7];
- bcopy ((char *) &pack, inbuf + j, 7);
- j += 7;
- }
- extra = (i - length);
- if (extra > 0) {
- inbuf[j - extra] &= (0xFF << (8-extra));
- }
- return j-((extra == 7) ? 6 : extra);
-}
-
/* Routines for address - symbol translation. Not called unless the "keepsyms"
* boot-arg is supplied.
*/
if (cmd->cmd == LC_SEGMENT) {
struct segment_command *orig_sg = (struct segment_command *) cmd;
- if (strcmp(SEG_TEXT, orig_sg->segname) == 0)
+ if (strncmp(SEG_TEXT, orig_sg->segname,
+ sizeof(orig_sg->segname)) == 0)
orig_ts = orig_sg;
- else if (strcmp(SEG_LINKEDIT, orig_sg->segname) == 0)
+ else if (strncmp(SEG_LINKEDIT, orig_sg->segname,
+ sizeof(orig_sg->segname)) == 0)
orig_le = orig_sg;
- else if (strcmp("", orig_sg->segname) == 0)
+ else if (strncmp("", orig_sg->segname,
+ sizeof(orig_sg->segname)) == 0)
orig_ts = orig_sg; /* kexts have a single unnamed segment */
}
else if (cmd->cmd == LC_SYMTAB)
if (bestsym != NULL) {
if (diff != 0) {
- kdb_printf("%s + 0x%08x ", bestsym, diff);
+ kdb_printf("%s + 0x%08x \n", bestsym, diff);
} else {
- kdb_printf("%s ", bestsym);
+ kdb_printf("%s \n", bestsym);
}
return 1;
}
}
if (current_kmod != NULL) {
/* if kexts had symbol table loaded, we'd call search_symbol_name again; alas, they don't */
- kdb_printf("%s + %d ", current_kmod->name, search - current_kmod->address);
+ kdb_printf("%s + %d \n", current_kmod->name, search - current_kmod->address);
}
}
#define DUMPFRAMES 32
#define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
void
-panic_i386_backtrace(void *_frame, int nframes)
+panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs)
{
cframe_t *frame = (cframe_t *)_frame;
vm_offset_t raddrs[DUMPFRAMES];
+ vm_offset_t PC = 0;
int frame_index;
volatile uint32_t *ppbtcnt = &pbtcnt;
uint64_t bt_tsc_timeout;
/* Spin on print backtrace lock, which serializes output
* Continue anyway if a timeout occurs.
*/
- hw_lock_to(&pbtlock, LockTimeOut*100);
+ hw_lock_to(&pbtlock, LockTimeOutTSC);
pbtcpu = cpu_number();
}
PE_parse_boot_arg("keepsyms", &keepsyms);
- kdb_printf("Backtrace, "
- "Format - Frame : Return Address (4 potential args on stack) ");
+ if (msg != NULL) {
+ kdb_printf(msg);
+ }
+
+ if ((regdump == TRUE) && (regs != NULL)) {
+ x86_saved_state32_t *ss32p = saved_state32(regs);
+
+ kdb_printf(
+ "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
+ "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
+ "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
+ ss32p->eax,ss32p->ebx,ss32p->ecx,ss32p->edx,
+ ss32p->cr2,ss32p->ebp,ss32p->esi,ss32p->edi,
+ ss32p->efl,ss32p->eip,ss32p->cs, ss32p->ds);
+ PC = ss32p->eip;
+ }
+
+ kdb_printf("Backtrace (CPU %d), "
+ "Frame : Return Address (4 potential args on stack)\n", cpu_number());
for (frame_index = 0; frame_index < nframes; frame_index++) {
vm_offset_t curframep = (vm_offset_t) frame;
goto invalid;
}
- kdb_printf("\n0x%x : 0x%x ",
- frame, frame->caller);
+ kdb_printf("%p : 0x%x ", frame, frame->caller);
if (frame_index < DUMPFRAMES)
raddrs[frame_index] = frame->caller;
if (kvtophys((vm_offset_t)&(frame->args[3])))
- kdb_printf("(0x%x 0x%x 0x%x 0x%x) ",
+ kdb_printf("(0x%x 0x%x 0x%x 0x%x) \n",
frame->args[0], frame->args[1],
frame->args[2], frame->args[3]);
goto out;
invalid:
- kdb_printf("Backtrace terminated-invalid frame pointer 0x%x\n",frame);
+ kdb_printf("Backtrace terminated-invalid frame pointer %p\n",frame);
out:
/* Identify kernel modules in the backtrace and display their
if (frame_index)
kmod_dump((vm_offset_t *)&raddrs[0], frame_index);
- kdb_printf("\nKernel version:\n%s\n\n",version);
+ if (PC != 0)
+ kmod_dump(&PC, 1);
+ panic_display_system_configuration();
/* Release print backtrace lock, to permit other callers in the
* event of panics on multiple processors.
*/