/*
- * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* Basic initialization for I386 - ISA bus machines.
*/
-#include <platforms.h>
#include <mach/i386/vm_param.h>
#include <mach/vm_prot.h>
#include <mach/machine.h>
#include <mach/time_value.h>
+#include <sys/kdebug.h>
#include <kern/spl.h>
#include <kern/assert.h>
#include <kern/debug.h>
#endif
#include <i386/ucode.h>
#include <i386/pmCPU.h>
+#include <i386/panic_hooks.h>
+
#include <architecture/i386/pio.h> /* inb() */
#include <pexpert/i386/boot.h>
+#include <kdp/kdp_dyld.h>
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
#include <libkern/kernel_mach_header.h>
#include <libkern/OSKextLibPrivate.h>
+#include <mach/branch_predicates.h>
+
#if DEBUG
#define DPRINTF(x...) kprintf(x)
#else
#endif
static void machine_conf(void);
+void panic_print_symbol_name(vm_address_t search);
+extern boolean_t init_task_died;
+extern const char version[];
+extern char osversion[];
extern int max_unsafe_quanta;
extern int max_poll_quanta;
extern unsigned int panic_is_inited;
+extern int proc_pid(void *p);
+
+/* Definitions for frame pointers */
+#define FP_ALIGNMENT_MASK ((uint32_t)(0x3))
+#define FP_LR_OFFSET ((uint32_t)4)
+#define FP_LR_OFFSET64 ((uint32_t)8)
+#define FP_MAX_NUM_TO_EVALUATE (50)
+
int db_run_mode;
volatile int pbtcpu = -1;
volatile int panic_double_fault_cpu = -1;
-#if defined (__i386__)
-#define PRINT_ARGS_FROM_STACK_FRAME 1
-#elif defined (__x86_64__)
#define PRINT_ARGS_FROM_STACK_FRAME 0
-#else
-#error unsupported architecture
-#endif
typedef struct _cframe_t {
struct _cframe_t *prev;
unsigned int debug_boot_arg;
+/*
+ * Backtrace a single frame.
+ */
+void
+print_one_backtrace(pmap_t pmap, vm_offset_t topfp, const char *cur_marker,
+ boolean_t is_64_bit, boolean_t nvram_format)
+{
+ int i = 0;
+ addr64_t lr;
+ addr64_t fp;
+ addr64_t fp_for_ppn;
+ ppnum_t ppn;
+ boolean_t dump_kernel_stack;
+
+ fp = topfp;
+ fp_for_ppn = 0;
+ ppn = (ppnum_t)NULL;
+
+ if (fp >= VM_MIN_KERNEL_ADDRESS)
+ dump_kernel_stack = TRUE;
+ else
+ dump_kernel_stack = FALSE;
+
+ do {
+ if ((fp == 0) || ((fp & FP_ALIGNMENT_MASK) != 0))
+ break;
+ if (dump_kernel_stack && ((fp < VM_MIN_KERNEL_ADDRESS) || (fp > VM_MAX_KERNEL_ADDRESS)))
+ break;
+ if ((!dump_kernel_stack) && (fp >=VM_MIN_KERNEL_ADDRESS))
+ break;
+
+ /* Check to see if current address will result in a different
+ ppn than previously computed (to avoid recomputation) via
+ (addr) ^ fp_for_ppn) >> PAGE_SHIFT) */
+
+ if ((((fp + FP_LR_OFFSET) ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
+ ppn = pmap_find_phys(pmap, fp + FP_LR_OFFSET);
+ fp_for_ppn = fp + (is_64_bit ? FP_LR_OFFSET64 : FP_LR_OFFSET);
+ }
+ if (ppn != (ppnum_t)NULL) {
+ if (is_64_bit) {
+ lr = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET64) & PAGE_MASK));
+ } else {
+ lr = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | ((fp + FP_LR_OFFSET) & PAGE_MASK));
+ }
+ } else {
+ if (is_64_bit) {
+ kdb_printf("%s\t Could not read LR from frame at 0x%016llx\n", cur_marker, fp + FP_LR_OFFSET64);
+ } else {
+ kdb_printf("%s\t Could not read LR from frame at 0x%08x\n", cur_marker, (uint32_t)(fp + FP_LR_OFFSET));
+ }
+ break;
+ }
+ if (((fp ^ fp_for_ppn) >> PAGE_SHIFT) != 0x0U) {
+ ppn = pmap_find_phys(pmap, fp);
+ fp_for_ppn = fp;
+ }
+ if (ppn != (ppnum_t)NULL) {
+ if (is_64_bit) {
+ fp = ml_phys_read_double_64(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
+ } else {
+ fp = ml_phys_read_word(((((vm_offset_t)ppn) << PAGE_SHIFT)) | (fp & PAGE_MASK));
+ }
+ } else {
+ if (is_64_bit) {
+ kdb_printf("%s\t Could not read FP from frame at 0x%016llx\n", cur_marker, fp);
+ } else {
+ kdb_printf("%s\t Could not read FP from frame at 0x%08x\n", cur_marker, (uint32_t)fp);
+ }
+ break;
+ }
+
+ if (nvram_format) {
+ if (is_64_bit) {
+ kdb_printf("%s\t0x%016llx\n", cur_marker, lr);
+ } else {
+ kdb_printf("%s\t0x%08x\n", cur_marker, (uint32_t)lr);
+ }
+ } else {
+ if (is_64_bit) {
+ kdb_printf("%s\t lr: 0x%016llx fp: 0x%016llx\n", cur_marker, lr, fp);
+ } else {
+ kdb_printf("%s\t lr: 0x%08x fp: 0x%08x\n", cur_marker, (uint32_t)lr, (uint32_t)fp);
+ }
+ }
+ } while ((++i < FP_MAX_NUM_TO_EVALUATE) && (fp != topfp));
+}
void
machine_startup(void)
{
if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg))) {
panicDebugging = TRUE;
+#if DEVELOPMENT || DEBUG
if (debug_boot_arg & DB_HALT) halt_in_debugger=1;
+#endif
if (debug_boot_arg & DB_PRT) disable_debug_output=FALSE;
if (debug_boot_arg & DB_SLOG) systemLogDiags=TRUE;
if (debug_boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
+#if KDEBUG_MOJO_TRACE
+ if (debug_boot_arg & DB_PRT_KDEBUG) {
+ kdebug_serial = TRUE;
+ disable_debug_output = FALSE;
+ }
+#endif
} else {
debug_boot_arg = 0;
}
machine_conf();
+ panic_hooks_init();
+
/*
* Start the system.
*/
gPEEFISystemTable = system_table;
- if (!cpu_mode_is64bit()) {
- kprintf("Skipping 64-bit EFI runtime services for 32-bit legacy mode\n");
- break;
- }
-
if(system_table->RuntimeServices == 0) {
kprintf("No runtime table present\n");
break;
// 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
// For a 64-bit kernel, booter provides a virtual address mod 4G
runtime = (EFI_RUNTIME_SERVICES_32 *)
-#ifdef __x86_64__
(system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS);
-#else
- system_table->RuntimeServices;
-#endif
DPRINTF("Runtime table addressed at %p\n", runtime);
if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
kprintf("Bad EFI runtime table signature\n");
if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) {
vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
vm_addr = (vm_offset_t) mptr->VirtualStart;
-#ifdef __x86_64__
/* For K64 on EFI32, shadow-map into high KVA */
if (vm_addr < VM_MIN_KERNEL_ADDRESS)
vm_addr |= VM_MIN_KERNEL_ADDRESS;
-#endif
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
mptr->Type,
vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
vm_addr = (vm_offset_t) mptr->VirtualStart;
-#ifdef __x86_64__
/* K64 on EFI32 */
if (vm_addr < VM_MIN_KERNEL_ADDRESS)
vm_addr |= VM_MIN_KERNEL_ADDRESS;
-#endif
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
vm_addr = (vm_offset_t) mptr->VirtualStart;
-#ifdef __x86_64__
if (vm_addr < VM_MIN_KERNEL_ADDRESS)
vm_addr |= VM_MIN_KERNEL_ADDRESS;
-#endif
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
void
machine_init(void)
{
-#if __x86_64__
/* Now with VM up, switch to dynamically allocated cpu data */
cpu_data_realloc();
-#endif
/* Ensure panic buffer is initialized. */
debug_log_init();
pmCPUHalt(PM_HALT_DEBUG);
}
+static int pid_from_task(task_t task)
+{
+ int pid = -1;
+
+ if (task->bsd_info)
+ pid = proc_pid(task->bsd_info);
+
+ return pid;
+}
+
void
DebuggerWithContext(
__unused unsigned int reason,
unsigned long pi_size = 0;
void *stackptr;
int cn = cpu_number();
+ task_t task = current_task();
+ int task_pid = pid_from_task(task);
+ boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
hw_atomic_add(&debug_mode, 1);
if (!panic_is_inited) {
asm("hlt");
}
+ doprnt_hide_pointers = FALSE;
+
printf("Debugger called: <%s>\n", message);
kprintf("Debugger called: <%s>\n", message);
panic_io_port_read();
/* Obtain current frame pointer */
-#if defined (__i386__)
- __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
-#elif defined (__x86_64__)
__asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
-#endif
/* Print backtrace - callee is internally synchronized */
- panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80: 48), NULL, FALSE, NULL);
+ if (task_pid == 1 && (init_task_died)) {
+ /* Special handling of launchd died panics */
+ print_launchd_info();
+ } else {
+ panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80: 48), NULL, FALSE, NULL);
+ }
/* everything should be printed now so copy to NVRAM
*/
}
}
+ doprnt_hide_pointers = old_doprnt_hide_pointers;
__asm__("int3");
hw_atomic_sub(&debug_mode, 1);
}
}
}
-static void
+void
panic_print_symbol_name(vm_address_t search)
{
/* try searching in the kernel */
uint64_t bt_tsc_timeout;
boolean_t keepsyms = FALSE;
int cn = cpu_number();
+ boolean_t old_doprnt_hide_pointers = doprnt_hide_pointers;
if(pbtcpu != cn) {
hw_atomic_add(&pbtcnt, 1);
/* Spin on print backtrace lock, which serializes output
* Continue anyway if a timeout occurs.
*/
- hw_lock_to(&pbtlock, LockTimeOutTSC*2);
+ hw_lock_to(&pbtlock, ~0U);
pbtcpu = cn;
}
+ if (__improbable(doprnt_hide_pointers == TRUE)) {
+ /* If we're called directly, the Debugger() function will not be called,
+ * so we need to reset the value in here. */
+ doprnt_hide_pointers = FALSE;
+ }
+
+ panic_check_hook();
+
PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
if (msg != NULL) {
}
if ((regdump == TRUE) && (regs != NULL)) {
-#if defined(__x86_64__)
x86_saved_state64_t *ss64p = saved_state64(regs);
kdb_printf(
"RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs,
ss64p->isf.ss);
PC = ss64p->isf.rip;
-#else
- x86_saved_state32_t *ss32p = saved_state32(regs);
- kdb_printf(
- "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
- "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
- "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
- ss32p->eax,ss32p->ebx,ss32p->ecx,ss32p->edx,
- ss32p->cr2,ss32p->ebp,ss32p->esi,ss32p->edi,
- ss32p->efl,ss32p->eip,ss32p->cs, ss32p->ds);
- PC = ss32p->eip;
-#endif
}
kdb_printf("Backtrace (CPU %d), "
panic_display_system_configuration();
+ doprnt_hide_pointers = old_doprnt_hide_pointers;
+
/* Release print backtrace lock, to permit other callers in the
* event of panics on multiple processors.
*/
bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
}
+
+static boolean_t
+debug_copyin(pmap_t p, uint64_t uaddr, void *dest, size_t size)
+{
+ size_t rem = size;
+ char *kvaddr = dest;
+
+ while (rem) {
+ ppnum_t upn = pmap_find_phys(p, uaddr);
+ uint64_t phys_src = ptoa_64(upn) | (uaddr & PAGE_MASK);
+ uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
+ uint64_t src_rem = PAGE_SIZE - (phys_src & PAGE_MASK);
+ uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
+ size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
+ cur_size = MIN(cur_size, rem);
+
+ if (upn && pmap_valid_page(upn) && phys_dest) {
+ bcopy_phys(phys_src, phys_dest, cur_size);
+ }
+ else
+ break;
+ uaddr += cur_size;
+ kvaddr += cur_size;
+ rem -= cur_size;
+ }
+ return (rem == 0);
+}
+
+void
+print_threads_registers(thread_t thread)
+{
+ x86_saved_state_t *savestate;
+
+ savestate = get_user_regs(thread);
+ kdb_printf(
+ "\nRAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
+ "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
+ "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
+ "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
+ "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n\n",
+ savestate->ss_64.rax, savestate->ss_64.rbx, savestate->ss_64.rcx, savestate->ss_64.rdx,
+ savestate->ss_64.isf.rsp, savestate->ss_64.rbp, savestate->ss_64.rsi, savestate->ss_64.rdi,
+ savestate->ss_64.r8, savestate->ss_64.r9, savestate->ss_64.r10, savestate->ss_64.r11,
+ savestate->ss_64.r12, savestate->ss_64.r13, savestate->ss_64.r14, savestate->ss_64.r15,
+ savestate->ss_64.isf.rflags, savestate->ss_64.isf.rip, savestate->ss_64.isf.cs,
+ savestate->ss_64.isf.ss);
+}
+
+void
+print_tasks_user_threads(task_t task)
+{
+ thread_t thread = current_thread();
+ x86_saved_state_t *savestate;
+ pmap_t pmap = 0;
+ uint64_t rbp;
+ const char *cur_marker = 0;
+ int j;
+
+ for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
+ ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
+
+ kdb_printf("Thread %d: %p\n", j, thread);
+ pmap = get_task_pmap(task);
+ savestate = get_user_regs(thread);
+ rbp = savestate->ss_64.rbp;
+ print_one_backtrace(pmap, (vm_offset_t)rbp, cur_marker, TRUE, TRUE);
+ kdb_printf("\n");
+ }
+}
+
+void
+print_thread_num_that_crashed(task_t task)
+{
+ thread_t c_thread = current_thread();
+ thread_t thread;
+ int j;
+
+ for (j = 0, thread = (thread_t) queue_first(&task->threads); j < task->thread_count;
+ ++j, thread = (thread_t) queue_next(&thread->task_threads)) {
+
+ if (c_thread == thread) {
+ kdb_printf("\nThread %d crashed\n", j);
+ break;
+ }
+ }
+}
+
+#define PANICLOG_UUID_BUF_SIZE 256
+
+void print_uuid_info(task_t task)
+{
+ uint32_t uuid_info_count = 0;
+ mach_vm_address_t uuid_info_addr = 0;
+ boolean_t have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
+ boolean_t have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
+ int task_pid = pid_from_task(task);
+ char uuidbuf[PANICLOG_UUID_BUF_SIZE] = {0};
+ char *uuidbufptr = uuidbuf;
+ uint32_t k;
+
+ if (have_pmap && task->active && task_pid > 0) {
+ /* Read dyld_all_image_infos struct from task memory to get UUID array count & location */
+ struct user64_dyld_all_image_infos task_image_infos;
+ if (debug_copyin(task->map->pmap, task->all_image_info_addr,
+ &task_image_infos, sizeof(struct user64_dyld_all_image_infos))) {
+ uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
+ uuid_info_addr = task_image_infos.uuidArray;
+ }
+
+ /* If we get a NULL uuid_info_addr (which can happen when we catch dyld
+ * in the middle of updating this data structure), we zero the
+ * uuid_info_count so that we won't even try to save load info for this task
+ */
+ if (!uuid_info_addr) {
+ uuid_info_count = 0;
+ }
+ }
+
+ if (task_pid > 0 && uuid_info_count > 0) {
+ uint32_t uuid_info_size = sizeof(struct user64_dyld_uuid_info);
+ uint32_t uuid_array_size = uuid_info_count * uuid_info_size;
+ uint32_t uuid_copy_size = 0;
+ uint32_t uuid_image_count = 0;
+ char *current_uuid_buffer = NULL;
+ /* Copy in the UUID info array. It may be nonresident, in which case just fix up nloadinfos to 0 */
+
+ kdb_printf("\nuuid info:\n");
+ while (uuid_array_size) {
+ if (uuid_array_size <= PANICLOG_UUID_BUF_SIZE) {
+ uuid_copy_size = uuid_array_size;
+ uuid_image_count = uuid_array_size/uuid_info_size;
+ } else {
+ uuid_image_count = PANICLOG_UUID_BUF_SIZE/uuid_info_size;
+ uuid_copy_size = uuid_image_count * uuid_info_size;
+ }
+ if (have_pmap && !debug_copyin(task->map->pmap, uuid_info_addr, uuidbufptr,
+ uuid_copy_size)) {
+ kdb_printf("Error!! Failed to copy UUID info for task %p pid %d\n", task, task_pid);
+ uuid_image_count = 0;
+ break;
+ }
+
+ if (uuid_image_count > 0) {
+ current_uuid_buffer = uuidbufptr;
+ for (k = 0; k < uuid_image_count; k++) {
+ kdb_printf(" %#llx", *(uint64_t *)current_uuid_buffer);
+ current_uuid_buffer += sizeof(uint64_t);
+ uint8_t *uuid = (uint8_t *)current_uuid_buffer;
+ kdb_printf("\tuuid = <%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x>\n",
+ uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5], uuid[6], uuid[7], uuid[8],
+ uuid[9], uuid[10], uuid[11], uuid[12], uuid[13], uuid[14], uuid[15]);
+ current_uuid_buffer += 16;
+ }
+ bzero(&uuidbuf, sizeof(uuidbuf));
+ }
+ uuid_info_addr += uuid_copy_size;
+ uuid_array_size -= uuid_copy_size;
+ }
+ }
+}
+
+void print_launchd_info(void)
+{
+ task_t task = current_task();
+ thread_t thread = current_thread();
+ volatile uint32_t *ppbtcnt = &pbtcnt;
+ uint64_t bt_tsc_timeout;
+ int cn = cpu_number();
+
+ if(pbtcpu != cn) {
+ hw_atomic_add(&pbtcnt, 1);
+ /* Spin on print backtrace lock, which serializes output
+ * Continue anyway if a timeout occurs.
+ */
+ hw_lock_to(&pbtlock, ~0U);
+ pbtcpu = cn;
+ }
+
+ print_uuid_info(task);
+ print_thread_num_that_crashed(task);
+ print_threads_registers(thread);
+ print_tasks_user_threads(task);
+ kdb_printf("Mac OS version: %s\n", (osversion[0] != 0) ? osversion : "Not yet set");
+ kdb_printf("Kernel version: %s\n", version);
+ panic_display_kernel_uuid();
+ panic_display_model_name();
+
+ /* Release print backtrace lock, to permit other callers in the
+ * event of panics on multiple processors.
+ */
+ hw_lock_unlock(&pbtlock);
+ hw_atomic_sub(&pbtcnt, 1);
+ /* Wait for other processors to complete output
+ * Timeout and continue after PBT_TIMEOUT_CYCLES.
+ */
+ bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES;
+ while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout));
+
+}