/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#include <kern/clock.h>
#include <kern/cpu_data.h>
#include <kern/machine.h>
-#include <i386/fpu.h>
-#include <i386/ipl.h>
+#include <i386/postcode.h>
+#include <i386/mp_desc.h>
#include <i386/misc_protos.h>
-#include <i386/mtrr.h>
+#include <i386/thread.h>
+#include <i386/trap.h>
#include <i386/machine_routines.h>
+#include <i386/mp.h> /* mp_rendezvous_break_lock */
+#include <i386/cpuid.h>
+#include <i386/fpu.h>
+#include <i386/machine_cpu.h>
+#include <i386/pmap.h>
+#if CONFIG_MTRR
+#include <i386/mtrr.h>
+#endif
+#include <i386/ucode.h>
#include <i386/pmCPU.h>
-#include <i386/postcode.h>
#include <architecture/i386/pio.h> /* inb() */
#include <pexpert/i386/boot.h>
#if MACH_KDB
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
-#include <i386/mp_desc.h>
-#include <i386/mp.h>
-#include <i386/cpuid.h>
-
#include <IOKit/IOPlatformExpert.h>
#include <IOKit/IOHibernatePrivate.h>
#include <pexpert/i386/efi.h>
#include <kern/thread.h>
-#include <i386/thread.h>
#include <mach-o/loader.h>
#include <mach-o/nlist.h>
-void enable_bluebox(void);
-void disable_bluebox(void);
+#include <libkern/kernel_mach_header.h>
+#include <libkern/OSKextLibPrivate.h>
+
+#define DPRINTF(x...)
+//#define DPRINTF(x...) kprintf(x)
static void machine_conf(void);
extern int default_preemption_rate;
extern int max_unsafe_quanta;
extern int max_poll_quanta;
-extern int idlehalt;
extern unsigned int panic_is_inited;
int db_run_mode;
-static int packAsc (uint8_t *inbuf, unsigned int length);
-
volatile int pbtcpu = -1;
hw_lock_data_t pbtlock; /* backtrace print lock */
uint32_t pbtcnt = 0;
-extern const char version[];
+volatile int panic_double_fault_cpu = -1;
+
+#if defined (__i386__)
+#define PRINT_ARGS_FROM_STACK_FRAME 1
+#elif defined (__x86_64__)
+#define PRINT_ARGS_FROM_STACK_FRAME 0
+#else
+#error unsupported architecture
+#endif
+
+#ifdef __LP64__
+typedef struct nlist_64 kernel_nlist_t;
+#else
+typedef struct nlist kernel_nlist_t;
+#endif
typedef struct _cframe_t {
struct _cframe_t *prev;
- unsigned caller;
+ uintptr_t caller;
+#if PRINT_ARGS_FROM_STACK_FRAME
unsigned args[0];
+#endif
} cframe_t;
-void panic_i386_backtrace(void *_frame, int nframes);
+static unsigned panic_io_port;
+static unsigned commit_paniclog_to_nvram;
-static unsigned panic_io_port = 0;
+unsigned int debug_boot_arg;
void
-machine_startup()
+machine_startup(void)
{
int boot_arg;
halt_in_debugger = halt_in_debugger ? 0 : 1;
#endif
- if (PE_parse_boot_arg("debug", &boot_arg)) {
- if (boot_arg & DB_HALT) halt_in_debugger=1;
- if (boot_arg & DB_PRT) disableDebugOuput=FALSE;
- if (boot_arg & DB_SLOG) systemLogDiags=TRUE;
- if (boot_arg & DB_NMI) panicDebugging=TRUE;
- if (boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
+ if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg))) {
+ panicDebugging = TRUE;
+ if (debug_boot_arg & DB_HALT) halt_in_debugger=1;
+ if (debug_boot_arg & DB_PRT) disable_debug_output=FALSE;
+ if (debug_boot_arg & DB_SLOG) systemLogDiags=TRUE;
+ if (debug_boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE;
+ } else {
+ debug_boot_arg = 0;
}
+ if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram)))
+ commit_paniclog_to_nvram = 1;
+
+ /*
+ * Entering the debugger will put the CPUs into a "safe"
+ * power mode.
+ */
+ if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof (boot_arg)))
+ pmsafe_debug = boot_arg;
+
#if NOTYET
hw_lock_init(&debugger_lock); /* initialize debugger lock */
#endif
}
#endif /* MACH_KDB */
- if (PE_parse_boot_arg("preempt", &boot_arg)) {
+ if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) {
default_preemption_rate = boot_arg;
}
- if (PE_parse_boot_arg("unsafe", &boot_arg)) {
+ if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) {
max_unsafe_quanta = boot_arg;
}
- if (PE_parse_boot_arg("poll", &boot_arg)) {
+ if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) {
max_poll_quanta = boot_arg;
}
- if (PE_parse_boot_arg("yield", &boot_arg)) {
+ if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) {
sched_poll_yield_shift = boot_arg;
}
- if (PE_parse_boot_arg("idlehalt", &boot_arg)) {
- idlehalt = boot_arg;
- }
/* The I/O port to issue a read from, in the event of a panic. Useful for
* triggering logic analyzers.
*/
- if (PE_parse_boot_arg("panic_io_port", &boot_arg)) {
+ if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof (boot_arg))) {
/*I/O ports range from 0 through 0xFFFF */
panic_io_port = boot_arg & 0xffff;
}
-/*
- * fn is used to force napping.
- * fn=0 means no napping allowed
- * fn=1 means forces napping on, normal C2 and C4 transitions
- * fn=2 means forces napping on, but C4 is disabled
- * fn=3 means forces napping on, but use halt
- * fn=4 means forces napping on and will always use C4
- *
- * Note that this will take effect only when the system normally starts napping.
- *
- */
-
- if (!PE_parse_boot_arg("fn", &forcenap)) forcenap = 0; /* If force nap not set, make 0 */
- else {
- if(forcenap < 5) forcenap = forcenap + 1; /* See comments above for decode, this is set to fn + 1 */
- else forcenap = 0; /* Clear for error case */
- }
- machine_nap_policy(); /* Make sure the nap policy reflects the user's choice */
-
machine_conf();
#if NOTYET
static void
machine_conf(void)
{
- machine_info.memory_size = mem_size;
+ machine_info.memory_size = (typeof(machine_info.memory_size))mem_size;
}
uint32_t hdr_cksum;
uint32_t cksum;
- kprintf("Processing 64-bit EFI tables at 0x%x\n", (unsigned int)system_table);
+ DPRINTF("Processing 64-bit EFI tables at %p\n", system_table);
do {
+ DPRINTF("Header:\n");
+ DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
+ DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
+ DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
+ DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
+ DPRINTF("RuntimeServices: 0x%016llx\n", system_table->RuntimeServices);
if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
kprintf("Bad EFI system table signature\n");
break;
system_table->Hdr.CRC32 = 0;
cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
- //kprintf("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
+ DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
system_table->Hdr.CRC32 = hdr_cksum;
if (cksum != hdr_cksum) {
kprintf("Bad EFI system table checksum\n");
break;
}
- gPEEFISystemTable = system_table;
+ gPEEFISystemTable = system_table;
- kprintf("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
- runtime = (EFI_RUNTIME_SERVICES_64 *) (uint32_t)system_table->RuntimeServices; // XXX
- kprintf("Checking runtime services table 0x%x\n", runtime);
- if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
- kprintf("Bad EFI runtime table signature\n");
- break;
- }
+ if (!cpu_mode_is64bit()) {
+ kprintf("Skipping 64-bit EFI runtime services for 32-bit legacy mode\n");
+ break;
+ }
+
+ if(system_table->RuntimeServices == 0) {
+ kprintf("No runtime table present\n");
+ break;
+ }
+ DPRINTF("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices);
+ // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel.
+ runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices;
+ DPRINTF("Checking runtime services table %p\n", runtime);
+ if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
+ kprintf("Bad EFI runtime table signature\n");
+ break;
+ }
// Verify signature of runtime services table
hdr_cksum = runtime->Hdr.CRC32;
runtime->Hdr.CRC32 = 0;
cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
- //kprintf("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
+ DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
runtime->Hdr.CRC32 = hdr_cksum;
if (cksum != hdr_cksum) {
kprintf("Bad EFI runtime table checksum\n");
}
static void
-efi_set_tables_32(EFI_SYSTEM_TABLE * system_table)
+efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table)
{
- EFI_RUNTIME_SERVICES *runtime;
+ EFI_RUNTIME_SERVICES_32 *runtime;
uint32_t hdr_cksum;
uint32_t cksum;
- kprintf("Processing 32-bit EFI tables at 0x%x\n", (unsigned int)system_table);
+ DPRINTF("Processing 32-bit EFI tables at %p\n", system_table);
do {
+ DPRINTF("Header:\n");
+ DPRINTF(" Signature: 0x%016llx\n", system_table->Hdr.Signature);
+ DPRINTF(" Revision: 0x%08x\n", system_table->Hdr.Revision);
+ DPRINTF(" HeaderSize: 0x%08x\n", system_table->Hdr.HeaderSize);
+ DPRINTF(" CRC32: 0x%08x\n", system_table->Hdr.CRC32);
+ DPRINTF("RuntimeServices: 0x%08x\n", system_table->RuntimeServices);
if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) {
- kprintf("Bad EFI system table signature\n");
+ kprintf("Bad EFI system table signature\n");
break;
}
// Verify signature of the system table
hdr_cksum = system_table->Hdr.CRC32;
system_table->Hdr.CRC32 = 0;
+ DPRINTF("System table at %p HeaderSize 0x%x\n", system_table, system_table->Hdr.HeaderSize);
cksum = crc32(0L, system_table, system_table->Hdr.HeaderSize);
- //kprintf("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
+ DPRINTF("System table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
system_table->Hdr.CRC32 = hdr_cksum;
if (cksum != hdr_cksum) {
kprintf("Bad EFI system table checksum\n");
break;
}
- gPEEFISystemTable = system_table;
+ gPEEFISystemTable = system_table;
- runtime = (EFI_RUNTIME_SERVICES *) system_table->RuntimeServices;
- if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
- kprintf("Bad EFI runtime table signature\n");
- break;
- }
+ if(system_table->RuntimeServices == 0) {
+ kprintf("No runtime table present\n");
+ break;
+ }
+ DPRINTF("RuntimeServices table at 0x%x\n", system_table->RuntimeServices);
+ // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel.
+ // For a 64-bit kernel, booter provides a virtual address mod 4G
+ runtime = (EFI_RUNTIME_SERVICES_32 *)
+#ifdef __x86_64__
+ (system_table->RuntimeServices | VM_MIN_KERNEL_ADDRESS);
+#else
+ system_table->RuntimeServices;
+#endif
+ DPRINTF("Runtime table addressed at %p\n", runtime);
+ if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) {
+ kprintf("Bad EFI runtime table signature\n");
+ break;
+ }
// Verify signature of runtime services table
hdr_cksum = runtime->Hdr.CRC32;
runtime->Hdr.CRC32 = 0;
cksum = crc32(0L, runtime, runtime->Hdr.HeaderSize);
- //kprintf("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
+ DPRINTF("Runtime table calculated CRC32 = 0x%x, header = 0x%x\n", cksum, hdr_cksum);
runtime->Hdr.CRC32 = hdr_cksum;
if (cksum != hdr_cksum) {
kprintf("Bad EFI runtime table checksum\n");
break;
}
+ DPRINTF("Runtime functions\n");
+ DPRINTF(" GetTime : 0x%x\n", runtime->GetTime);
+ DPRINTF(" SetTime : 0x%x\n", runtime->SetTime);
+ DPRINTF(" GetWakeupTime : 0x%x\n", runtime->GetWakeupTime);
+ DPRINTF(" SetWakeupTime : 0x%x\n", runtime->SetWakeupTime);
+ DPRINTF(" SetVirtualAddressMap : 0x%x\n", runtime->SetVirtualAddressMap);
+ DPRINTF(" ConvertPointer : 0x%x\n", runtime->ConvertPointer);
+ DPRINTF(" GetVariable : 0x%x\n", runtime->GetVariable);
+ DPRINTF(" GetNextVariableName : 0x%x\n", runtime->GetNextVariableName);
+ DPRINTF(" SetVariable : 0x%x\n", runtime->SetVariable);
+ DPRINTF(" GetNextHighMonotonicCount: 0x%x\n", runtime->GetNextHighMonotonicCount);
+ DPRINTF(" ResetSystem : 0x%x\n", runtime->ResetSystem);
+
gPEEFIRuntimeServices = runtime;
}
while (FALSE);
do
{
- vm_offset_t vm_size, vm_addr;
+ vm_offset_t vm_size, vm_addr;
vm_map_offset_t phys_addr;
EfiMemoryRange *mptr;
unsigned int msize, mcount;
msize = args->MemoryMapDescriptorSize;
mcount = args->MemoryMapSize / msize;
- mptr = (EfiMemoryRange *)args->MemoryMap;
+ DPRINTF("efi_init() kernel base: 0x%x size: 0x%x\n",
+ args->kaddr, args->ksize);
+ DPRINTF(" efiSystemTable physical: 0x%x virtual: %p\n",
+ args->efiSystemTable,
+ (void *) ml_static_ptovirt(args->efiSystemTable));
+ DPRINTF(" efiRuntimeServicesPageStart: 0x%x\n",
+ args->efiRuntimeServicesPageStart);
+ DPRINTF(" efiRuntimeServicesPageCount: 0x%x\n",
+ args->efiRuntimeServicesPageCount);
+ DPRINTF(" efiRuntimeServicesVirtualPageStart: 0x%016llx\n",
+ args->efiRuntimeServicesVirtualPageStart);
+ mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) {
- vm_size = i386_ptob((uint32_t)mptr->NumberOfPages);
+ vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
vm_addr = (vm_offset_t) mptr->VirtualStart;
+#ifdef __x86_64__
+ /* For K64 on EFI32, shadow-map into high KVA */
+ if (vm_addr < VM_MIN_KERNEL_ADDRESS)
+ vm_addr |= VM_MIN_KERNEL_ADDRESS;
+#endif
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
+ DPRINTF(" Type: %x phys: %p EFIv: %p kv: %p size: %p\n",
+ mptr->Type,
+ (void *) (uintptr_t) phys_addr,
+ (void *) (uintptr_t) mptr->VirtualStart,
+ (void *) vm_addr,
+ (void *) vm_size);
pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
(mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
(mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT);
}
}
- if (args->Version > 1)
- panic("Incompatible boot args version %d\n", args->Version);
+ if (args->Version != kBootArgsVersion2)
+ panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
- if (args->Revision >= 4 && args->efiMode == kBootArgsEfiMode64) {
- efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) args->efiSystemTable);
+ if (args->efiMode == kBootArgsEfiMode64) {
+ efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
} else {
- efi_set_tables_32((EFI_SYSTEM_TABLE *) args->efiSystemTable);
+ efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
}
}
while (FALSE);
kprintf("Reinitializing EFI runtime services\n");
- if (args->Revision < 3)
- return;
do
{
- vm_offset_t vm_size, vm_addr;
+ vm_offset_t vm_size, vm_addr;
vm_map_offset_t phys_addr;
EfiMemoryRange *mptr;
unsigned int msize, mcount;
system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart);
- kprintf("Old system table %p, new %p\n",
- args->efiSystemTable, (void *) system_table_offset);
+ kprintf("Old system table 0x%x, new 0x%x\n",
+ (uint32_t)args->efiSystemTable, system_table_offset);
- args->efiSystemTable = (uint32_t) system_table_offset;
+ args->efiSystemTable = system_table_offset;
kprintf("Old map:\n");
msize = args->MemoryMapDescriptorSize;
mcount = args->MemoryMapSize / msize;
- mptr = (EfiMemoryRange *)args->MemoryMap;
+ mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap);
for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
- vm_size = i386_ptob((uint32_t)mptr->NumberOfPages);
+ vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
vm_addr = (vm_offset_t) mptr->VirtualStart;
+#ifdef __x86_64__
+ /* K64 on EFI32 */
+ if (vm_addr < VM_MIN_KERNEL_ADDRESS)
+ vm_addr |= VM_MIN_KERNEL_ADDRESS;
+#endif
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
- kprintf("mapping[%d] %qx @ %x, %x\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages);
+ kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
}
}
kprintf("New map:\n");
msize = args->MemoryMapDescriptorSize;
- mcount = map_size / msize;
+ mcount = (unsigned int )(map_size / msize);
mptr = map;
for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) {
if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) {
- vm_size = i386_ptob((uint32_t)mptr->NumberOfPages);
+ vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages);
vm_addr = (vm_offset_t) mptr->VirtualStart;
+#ifdef __x86_64__
+ if (vm_addr < VM_MIN_KERNEL_ADDRESS)
+ vm_addr |= VM_MIN_KERNEL_ADDRESS;
+#endif
phys_addr = (vm_map_offset_t) mptr->PhysicalStart;
- kprintf("mapping[%d] %qx @ %x, %x\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages);
+ kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages);
pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size),
(mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE,
}
}
- if (args->Version > 1)
- panic("Incompatible boot args version %d\n", args->Version);
+ if (args->Version != kBootArgsVersion2)
+ panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision);
kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode);
- if (args->Revision >= 4 && args->efiMode == kBootArgsEfiMode64) {
- efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) args->efiSystemTable);
+ if (args->efiMode == kBootArgsEfiMode64) {
+ efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable));
} else {
- efi_set_tables_32((EFI_SYSTEM_TABLE *) args->efiSystemTable);
+ efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable));
}
}
while (FALSE);
*/
clock_config();
+#if CONFIG_MTRR
/*
* Initialize MTRR from boot processor.
*/
* Set up PAT for boot processor.
*/
pat_init();
+#endif
/*
- * Free lowmem pages
+ * Free lowmem pages and complete other setup
*/
- x86_lowmem_free();
+ pmap_lowmem_finalize();
}
/*
while(1);
}
+
/* Issue an I/O port read if one has been requested - this is an event logic
* analyzers can use as a trigger point.
*/
/* For use with the MP rendezvous mechanism
*/
+uint64_t panic_restart_timeout = ~(0ULL);
+
static void
-machine_halt_cpu(__unused void *arg) {
+machine_halt_cpu(void) {
panic_io_port_read();
- __asm__ volatile("hlt");
+
+ if (panic_restart_timeout != ~(0ULL)) {
+ uint64_t deadline = mach_absolute_time() + panic_restart_timeout;
+ while (mach_absolute_time() < deadline) {
+ cpu_pause();
+ }
+ kprintf("Invoking PE_halt_restart\n");
+ /* Attempt restart via ACPI RESET_REG; at the time of this
+ * writing, this is routine is chained through AppleSMC->
+ * AppleACPIPlatform
+ */
+ if (PE_halt_restart)
+ (*PE_halt_restart)(kPERestartCPU);
+ }
+ pmCPUHalt(PM_HALT_DEBUG);
}
void
{
unsigned long pi_size = 0;
void *stackptr;
+ int cn = cpu_number();
hw_atomic_add(&debug_mode, 1);
if (!panic_is_inited) {
asm("hlt");
}
-
printf("Debugger called: <%s>\n", message);
kprintf("Debugger called: <%s>\n", message);
panic_io_port_read();
/* Obtain current frame pointer */
+#if defined (__i386__)
__asm__ volatile("movl %%ebp, %0" : "=m" (stackptr));
+#elif defined (__x86_64__)
+ __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr));
+#endif
/* Print backtrace - callee is internally synchronized */
- panic_i386_backtrace(stackptr, 16);
+ panic_i386_backtrace(stackptr, ((panic_double_fault_cpu == cn) ? 80: 48), NULL, FALSE, NULL);
/* everything should be printed now so copy to NVRAM
*/
if( debug_buf_size > 0) {
- /* Do not compress the panic log
- * or save to NVRAM unless kernel debugging
- * is disabled. The NVRAM shim doesn't
- * sync to the store until haltRestart is called.
- */
- if (!panicDebugging) {
+ /* Optionally sync the panic log, if any, to NVRAM
+ * This is the default.
+ */
+ if (commit_paniclog_to_nvram) {
unsigned int bufpos;
+ uintptr_t cr0;
- debug_putc(0);
+ debug_putc(0);
/* Now call the compressor */
/* XXX Consider using the WKdm compressor in the
* future, rather than just packing - would need to
* be co-ordinated with crashreporter, which decodes
- * this post-restart.
+ * this post-restart. The compressor should be
+ * capable of in-place compression.
*/
- bufpos = packAsc ((uint8_t *)debug_buf,
- (unsigned int) (debug_buf_ptr - debug_buf) );
+ bufpos = packA(debug_buf,
+ (unsigned int) (debug_buf_ptr - debug_buf), debug_buf_size);
/* If compression was successful,
* use the compressed length
*/
- if (bufpos) {
- debug_buf_ptr = debug_buf + bufpos;
- }
+ pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf);
+
/* Save panic log to non-volatile store
* Panic info handler must truncate data that is
* too long for this platform.
* This call must save data synchronously,
* since we can subsequently halt the system.
*/
- pi_size = debug_buf_ptr - debug_buf;
- pi_size = PESavePanicInfo((unsigned char *)debug_buf,
- pi_size );
+
+
+/* The following sequence is a workaround for:
+ * <rdar://problem/5915669> SnowLeopard10A67: AppleEFINVRAM should not invoke
+ * any routines that use floating point (MMX in this case) when saving panic
+ * logs to nvram/flash.
+ */
+ cr0 = get_cr0();
+ clear_ts();
+
+ kprintf("Attempting to commit panic log to NVRAM\n");
+ pi_size = PESavePanicInfo((unsigned char *)debug_buf,
+ (uint32_t)pi_size );
+ set_cr0(cr0);
+
+ /* Uncompress in-place, to permit examination of
+ * the panic log by debuggers.
+ */
+
+ if (bufpos) {
+ unpackA(debug_buf, bufpos);
+ }
}
}
- draw_panic_dialog();
+
+ /* If the user won't be able to read the dialog,
+ * don't bother trying to show it
+ */
+ if (!PE_reboot_on_panic())
+ draw_panic_dialog();
if (!panicDebugging) {
+ unsigned cnum;
/* Clear the MP rendezvous function lock, in the event
* that a panic occurred while in that codepath.
*/
mp_rendezvous_break_lock();
- /* Force all CPUs to disable interrupts and HLT.
- * We've panicked, and shouldn't depend on the
- * PEHaltRestart() mechanism, which relies on several
- * bits of infrastructure.
+ if (PE_reboot_on_panic()) {
+ if (PE_halt_restart)
+ (*PE_halt_restart)(kPERestartCPU);
+ }
+
+ /* Non-maskably interrupt all other processors
+ * If a restart timeout is specified, this processor
+ * will attempt a restart.
*/
- mp_rendezvous_no_intrs(machine_halt_cpu, NULL);
+ kprintf("Invoking machine_halt_cpu on CPU %d\n", cn);
+ for (cnum = 0; cnum < real_ncpus; cnum++) {
+ if (cnum != (unsigned) cn) {
+ cpu_NMI_interrupt(cnum);
+ }
+ }
+ machine_halt_cpu();
/* NOT REACHED */
}
}
hw_atomic_sub(&debug_mode, 1);
}
-void
-enable_bluebox(void)
-{
-}
-
-void
-disable_bluebox(void)
-{
-}
-
char *
machine_boot_info(char *buf, __unused vm_size_t size)
{
return buf;
}
-
-struct pasc {
- unsigned a: 7;
- unsigned b: 7;
- unsigned c: 7;
- unsigned d: 7;
- unsigned e: 7;
- unsigned f: 7;
- unsigned g: 7;
- unsigned h: 7;
-} __attribute__((packed));
-
-typedef struct pasc pasc_t;
-
-static int packAsc (unsigned char *inbuf, unsigned int length)
-{
- unsigned int i, j = 0;
- unsigned int extra;
- pasc_t pack;
-
- for (i = 0; i < length; i+=8)
- {
- pack.a = inbuf[i];
- pack.b = inbuf[i+1];
- pack.c = inbuf[i+2];
- pack.d = inbuf[i+3];
- pack.e = inbuf[i+4];
- pack.f = inbuf[i+5];
- pack.g = inbuf[i+6];
- pack.h = inbuf[i+7];
- bcopy ((char *) &pack, inbuf + j, 7);
- j += 7;
- }
- extra = (i - length);
- if (extra > 0) {
- inbuf[j - extra] &= (0xFF << (8-extra));
- }
- return j-((extra == 7) ? 6 : extra);
-}
-
/* Routines for address - symbol translation. Not called unless the "keepsyms"
* boot-arg is supplied.
*/
static int
-panic_print_macho_symbol_name(struct mach_header *mh, vm_address_t search)
+panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search, const char *module_name)
{
- struct nlist *sym = NULL;
+ kernel_nlist_t *sym = NULL;
struct load_command *cmd;
- struct segment_command *orig_ts = NULL, *orig_le = NULL;
+ kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL;
struct symtab_command *orig_st = NULL;
unsigned int i;
char *strings, *bestsym = NULL;
vm_address_t bestaddr = 0, diff, curdiff;
-
- if (mh->magic != MH_MAGIC) {
- /* bad magic number */
- return 0;
- }
+
+ /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */
cmd = (struct load_command *) &mh[1];
for (i = 0; i < mh->ncmds; i++) {
- if (cmd->cmd == LC_SEGMENT) {
- struct segment_command *orig_sg = (struct segment_command *) cmd;
+ if (cmd->cmd == LC_SEGMENT_KERNEL) {
+ kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd;
- if (strcmp(SEG_TEXT, orig_sg->segname) == 0)
+ if (strncmp(SEG_TEXT, orig_sg->segname,
+ sizeof(orig_sg->segname)) == 0)
orig_ts = orig_sg;
- else if (strcmp(SEG_LINKEDIT, orig_sg->segname) == 0)
+ else if (strncmp(SEG_LINKEDIT, orig_sg->segname,
+ sizeof(orig_sg->segname)) == 0)
orig_le = orig_sg;
- else if (strcmp("", orig_sg->segname) == 0)
- orig_ts = orig_sg; /* kexts have a single unnamed segment */
+ else if (strncmp("", orig_sg->segname,
+ sizeof(orig_sg->segname)) == 0)
+ orig_ts = orig_sg; /* pre-Lion i386 kexts have a single unnamed segment */
}
else if (cmd->cmd == LC_SYMTAB)
orig_st = (struct symtab_command *) cmd;
- cmd = (struct load_command *) ((caddr_t) cmd + cmd->cmdsize);
+ cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize);
}
if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL))
return 0;
- /* kexts don't have a LINKEDIT segment for now, so we'll never get this far for kexts */
-
- vm_address_t slide = ((vm_address_t)mh) - orig_ts->vmaddr;
- if (slide != 0)
- search -= slide; /* adjusting search since the binary has slid */
-
if ((search < orig_ts->vmaddr) ||
(search >= orig_ts->vmaddr + orig_ts->vmsize)) {
/* search out of range for this mach header */
return 0;
}
- sym = (struct nlist *)orig_le->vmaddr;
- strings = ((char *)sym) + orig_st->nsyms * sizeof(struct nlist);
+ sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff);
+ strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff);
diff = search;
for (i = 0; i < orig_st->nsyms; i++) {
+ if (sym[i].n_type & N_STAB) continue;
+
if (sym[i].n_value <= search) {
curdiff = search - (vm_address_t)sym[i].n_value;
if (curdiff < diff) {
if (bestsym != NULL) {
if (diff != 0) {
- kdb_printf("%s + 0x%08x ", bestsym, diff);
+ kdb_printf("%s : %s + 0x%lx", module_name, bestsym, (unsigned long)diff);
} else {
- kdb_printf("%s ", bestsym);
+ kdb_printf("%s : %s", module_name, bestsym);
}
return 1;
}
static void
panic_print_kmod_symbol_name(vm_address_t search)
{
- kmod_info_t * current_kmod = kmod;
-
- while (current_kmod != NULL) {
- if ((current_kmod->address <= search) &&
- (current_kmod->address + current_kmod->size > search))
+ u_int i;
+
+ if (gLoadedKextSummaries == NULL)
+ return;
+ for (i = 0; i < gLoadedKextSummaries->numSummaries; ++i) {
+ OSKextLoadedKextSummary *summary = gLoadedKextSummaries->summaries + i;
+
+ if ((search >= summary->address) &&
+ (search < (summary->address + summary->size)))
+ {
+ kernel_mach_header_t *header = (kernel_mach_header_t *)(uintptr_t) summary->address;
+ if (panic_print_macho_symbol_name(header, search, summary->name) == 0) {
+ kdb_printf("%s + %llu", summary->name, (unsigned long)search - summary->address);
+ }
break;
- current_kmod = current_kmod->next;
- }
- if (current_kmod != NULL) {
- /* if kexts had symbol table loaded, we'd call search_symbol_name again; alas, they don't */
- kdb_printf("%s + %d ", current_kmod->name, search - current_kmod->address);
+ }
}
}
-extern struct mach_header _mh_execute_header; /* the kernel's mach header */
-
static void
panic_print_symbol_name(vm_address_t search)
{
/* try searching in the kernel */
- if (panic_print_macho_symbol_name(&_mh_execute_header, search) == 0) {
+ if (panic_print_macho_symbol_name(&_mh_execute_header, search, "mach_kernel") == 0) {
/* that failed, now try to search for the right kext */
panic_print_kmod_symbol_name(search);
}
#define DUMPFRAMES 32
#define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL)
void
-panic_i386_backtrace(void *_frame, int nframes)
+panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs)
{
cframe_t *frame = (cframe_t *)_frame;
vm_offset_t raddrs[DUMPFRAMES];
+ vm_offset_t PC = 0;
int frame_index;
volatile uint32_t *ppbtcnt = &pbtcnt;
uint64_t bt_tsc_timeout;
boolean_t keepsyms = FALSE;
+ int cn = cpu_number();
- if(pbtcpu != cpu_number()) {
+ if(pbtcpu != cn) {
hw_atomic_add(&pbtcnt, 1);
/* Spin on print backtrace lock, which serializes output
* Continue anyway if a timeout occurs.
*/
- hw_lock_to(&pbtlock, LockTimeOut*100);
- pbtcpu = cpu_number();
+ hw_lock_to(&pbtlock, LockTimeOutTSC*2);
+ pbtcpu = cn;
}
- PE_parse_boot_arg("keepsyms", &keepsyms);
+ PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms));
+
+ if (msg != NULL) {
+ kdb_printf("%s", msg);
+ }
+
+ if ((regdump == TRUE) && (regs != NULL)) {
+#if defined(__x86_64__)
+ x86_saved_state64_t *ss64p = saved_state64(regs);
+ kdb_printf(
+ "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
+ "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
+ "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
+ "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
+ "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n",
+ ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
+ ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
+ ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
+ ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
+ ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs,
+ ss64p->isf.ss);
+ PC = ss64p->isf.rip;
+#else
+ x86_saved_state32_t *ss32p = saved_state32(regs);
+ kdb_printf(
+ "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
+ "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
+ "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
+ ss32p->eax,ss32p->ebx,ss32p->ecx,ss32p->edx,
+ ss32p->cr2,ss32p->ebp,ss32p->esi,ss32p->edi,
+ ss32p->efl,ss32p->eip,ss32p->cs, ss32p->ds);
+ PC = ss32p->eip;
+#endif
+ }
- kdb_printf("Backtrace, "
- "Format - Frame : Return Address (4 potential args on stack) ");
+ kdb_printf("Backtrace (CPU %d), "
+#if PRINT_ARGS_FROM_STACK_FRAME
+ "Frame : Return Address (4 potential args on stack)\n", cn);
+#else
+ "Frame : Return Address\n", cn);
+#endif
for (frame_index = 0; frame_index < nframes; frame_index++) {
vm_offset_t curframep = (vm_offset_t) frame;
}
if (!kvtophys(curframep) ||
- !kvtophys(curframep + sizeof(cframe_t))) {
+ !kvtophys(curframep + sizeof(cframe_t) - 1)) {
kdb_printf("No mapping exists for frame pointer\n");
goto invalid;
}
- kdb_printf("\n0x%x : 0x%x ",
- frame, frame->caller);
+ kdb_printf("%p : 0x%lx ", frame, frame->caller);
if (frame_index < DUMPFRAMES)
raddrs[frame_index] = frame->caller;
+#if PRINT_ARGS_FROM_STACK_FRAME
if (kvtophys((vm_offset_t)&(frame->args[3])))
kdb_printf("(0x%x 0x%x 0x%x 0x%x) ",
frame->args[0], frame->args[1],
frame->args[2], frame->args[3]);
+#endif
/* Display address-symbol translation only if the "keepsyms"
* boot-arg is suppplied, since we unload LINKEDIT otherwise.
if (keepsyms)
panic_print_symbol_name((vm_address_t)frame->caller);
- /* Stack grows downward */
- if (frame->prev < frame) {
- frame = frame->prev;
- goto invalid;
- }
+ kdb_printf("\n");
+
frame = frame->prev;
}
goto out;
invalid:
- kdb_printf("Backtrace terminated-invalid frame pointer 0x%x\n",frame);
+ kdb_printf("Backtrace terminated-invalid frame pointer %p\n",frame);
out:
/* Identify kernel modules in the backtrace and display their
* the kmod list safely.
*/
if (frame_index)
- kmod_dump((vm_offset_t *)&raddrs[0], frame_index);
+ kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index);
+
+ if (PC != 0)
+ kmod_panic_dump(&PC, 1);
- kdb_printf("\nKernel version:\n%s\n\n",version);
+ panic_display_system_configuration();
/* Release print backtrace lock, to permit other callers in the
* event of panics on multiple processors.