X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/c0fea4742e91338fffdcf79f86a7c1d5e2b97eb1..b7266188b87f3620ec3f9f717e57194a7dd989fe:/osfmk/i386/AT386/model_dep.c diff --git a/osfmk/i386/AT386/model_dep.c b/osfmk/i386/AT386/model_dep.c index 938f59992..c623ba72e 100644 --- a/osfmk/i386/AT386/model_dep.c +++ b/osfmk/i386/AT386/model_dep.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -78,13 +84,18 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include /* mp_rendezvous_break_lock */ +#include #include #include -#include #include -#include #include -#include #include /* inb() */ #include #if MACH_KDB @@ -95,54 +106,59 @@ #include #include -#include -#include - #include #include #include #include -#include #include #include -void enable_bluebox(void); -void disable_bluebox(void); +#include static void machine_conf(void); -#include extern int default_preemption_rate; extern int max_unsafe_quanta; extern int max_poll_quanta; -extern int idlehalt; extern unsigned int panic_is_inited; int db_run_mode; -static int packAsc (uint8_t *inbuf, unsigned int length); -extern int kdb_printf(const char *fmt, ...); - volatile int pbtcpu = -1; hw_lock_data_t pbtlock; /* backtrace print lock */ uint32_t pbtcnt = 0; -extern const char version[]; +#if defined (__i386__) +#define PRINT_ARGS_FROM_STACK_FRAME 1 +#elif defined (__x86_64__) +#define PRINT_ARGS_FROM_STACK_FRAME 0 +#else +#error unsupported architecture +#endif + +#ifdef __LP64__ +typedef struct nlist_64 kernel_nlist_t; +#else +typedef struct nlist kernel_nlist_t; +#endif typedef struct _cframe_t { struct _cframe_t *prev; - unsigned caller; + uintptr_t caller; +#if PRINT_ARGS_FROM_STACK_FRAME unsigned args[0]; +#endif } cframe_t; -void panic_i386_backtrace(void *_frame, int nframes); +static unsigned panic_io_port; +static unsigned commit_paniclog_to_nvram; -unsigned panic_io_port = 0; +unsigned int debug_boot_arg; void -machine_startup() +machine_startup(void) { int boot_arg; @@ -151,14 +167,26 @@ machine_startup() halt_in_debugger = halt_in_debugger ? 0 : 1; #endif - if (PE_parse_boot_arg("debug", &boot_arg)) { - if (boot_arg & DB_HALT) halt_in_debugger=1; - if (boot_arg & DB_PRT) disableDebugOuput=FALSE; - if (boot_arg & DB_SLOG) systemLogDiags=TRUE; - if (boot_arg & DB_NMI) panicDebugging=TRUE; - if (boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE; + if (PE_parse_boot_argn("debug", &debug_boot_arg, sizeof (debug_boot_arg))) { + if (debug_boot_arg & DB_HALT) halt_in_debugger=1; + if (debug_boot_arg & DB_PRT) disable_debug_output=FALSE; + if (debug_boot_arg & DB_SLOG) systemLogDiags=TRUE; + if (debug_boot_arg & DB_NMI) panicDebugging=TRUE; + if (debug_boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE; + } else { + debug_boot_arg = 0; } + if (!PE_parse_boot_argn("nvram_paniclog", &commit_paniclog_to_nvram, sizeof (commit_paniclog_to_nvram))) + commit_paniclog_to_nvram = 1; + + /* + * Entering the debugger will put the CPUs into a "safe" + * power mode. + */ + if (PE_parse_boot_argn("pmsafe_debug", &boot_arg, sizeof (boot_arg))) + pmsafe_debug = boot_arg; + #if NOTYET hw_lock_init(&debugger_lock); /* initialize debugger lock */ #endif @@ -188,48 +216,29 @@ machine_startup() } #endif /* MACH_KDB */ - if (PE_parse_boot_arg("preempt", &boot_arg)) { + if (PE_parse_boot_argn("preempt", &boot_arg, sizeof (boot_arg))) { default_preemption_rate = boot_arg; } - if (PE_parse_boot_arg("unsafe", &boot_arg)) { + if (PE_parse_boot_argn("unsafe", &boot_arg, sizeof (boot_arg))) { max_unsafe_quanta = boot_arg; } - if (PE_parse_boot_arg("poll", &boot_arg)) { + if (PE_parse_boot_argn("poll", &boot_arg, sizeof (boot_arg))) { max_poll_quanta = boot_arg; } - if (PE_parse_boot_arg("yield", &boot_arg)) { + if (PE_parse_boot_argn("yield", &boot_arg, sizeof (boot_arg))) { sched_poll_yield_shift = boot_arg; } - if (PE_parse_boot_arg("idlehalt", &boot_arg)) { + if (PE_parse_boot_argn("idlehalt", &boot_arg, sizeof (boot_arg))) { idlehalt = boot_arg; } /* The I/O port to issue a read from, in the event of a panic. Useful for * triggering logic analyzers. */ - if (PE_parse_boot_arg("panic_io_port", &boot_arg)) { + if (PE_parse_boot_argn("panic_io_port", &boot_arg, sizeof (boot_arg))) { /*I/O ports range from 0 through 0xFFFF */ panic_io_port = boot_arg & 0xffff; } -/* - * fn is used to force napping. - * fn=0 means no napping allowed - * fn=1 means forces napping on, normal C2 and C4 transitions - * fn=2 means forces napping on, but C4 is disabled - * fn=3 means forces napping on, but use halt - * fn=4 means forces napping on and will always use C4 - * - * Note that this will take effect only when the system normally starts napping. - * - */ - - if (!PE_parse_boot_arg("fn", &forcenap)) forcenap = 0; /* If force nap not set, make 0 */ - else { - if(forcenap < 5) forcenap = forcenap + 1; /* See comments above for decode, this is set to fn + 1 */ - else forcenap = 0; /* Clear for error case */ - } - machine_nap_policy(); /* Make sure the nap policy reflects the user's choice */ - machine_conf(); #if NOTYET @@ -247,7 +256,7 @@ machine_startup() static void machine_conf(void) { - machine_info.memory_size = mem_size; + machine_info.memory_size = (typeof(machine_info.memory_size))mem_size; } @@ -363,7 +372,7 @@ efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table) uint32_t hdr_cksum; uint32_t cksum; - kprintf("Processing 64-bit EFI tables at 0x%x\n", (unsigned int)system_table); + kprintf("Processing 64-bit EFI tables at %p\n", system_table); do { if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) { kprintf("Bad EFI system table signature\n"); @@ -381,15 +390,26 @@ efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table) break; } - gPEEFISystemTable = system_table; + gPEEFISystemTable = system_table; + + + if (!cpu_mode_is64bit()) { + kprintf("Skipping 64-bit EFI runtime services for 32-bit legacy mode\n"); + break; + } + if(system_table->RuntimeServices == 0) { + kprintf("No runtime table present\n"); + break; + } kprintf("RuntimeServices table at 0x%qx\n", system_table->RuntimeServices); - runtime = (EFI_RUNTIME_SERVICES_64 *) (uint32_t)system_table->RuntimeServices; // XXX - kprintf("Checking runtime services table 0x%x\n", runtime); - if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { - kprintf("Bad EFI runtime table signature\n"); - break; - } + // 64-bit virtual address is OK for 64-bit EFI and 64/32-bit kernel. + runtime = (EFI_RUNTIME_SERVICES_64 *) (uintptr_t)system_table->RuntimeServices; + kprintf("Checking runtime services table %p\n", runtime); + if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { + kprintf("Bad EFI runtime table signature\n"); + break; + } // Verify signature of runtime services table hdr_cksum = runtime->Hdr.CRC32; @@ -409,16 +429,16 @@ efi_set_tables_64(EFI_SYSTEM_TABLE_64 * system_table) } static void -efi_set_tables_32(EFI_SYSTEM_TABLE * system_table) +efi_set_tables_32(EFI_SYSTEM_TABLE_32 * system_table) { - EFI_RUNTIME_SERVICES *runtime; + EFI_RUNTIME_SERVICES_32 *runtime; uint32_t hdr_cksum; uint32_t cksum; - kprintf("Processing 32-bit EFI tables at 0x%x\n", (unsigned int)system_table); + kprintf("Processing 32-bit EFI tables at %p\n", system_table); do { if (system_table->Hdr.Signature != EFI_SYSTEM_TABLE_SIGNATURE) { - kprintf("Bad EFI system table signature\n"); + kprintf("Bad EFI system table signature\n"); break; } // Verify signature of the system table @@ -433,13 +453,21 @@ efi_set_tables_32(EFI_SYSTEM_TABLE * system_table) break; } - gPEEFISystemTable = system_table; + gPEEFISystemTable = system_table; - runtime = (EFI_RUNTIME_SERVICES *) system_table->RuntimeServices; - if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { - kprintf("Bad EFI runtime table signature\n"); - break; - } + + if(system_table->RuntimeServices == 0) { + kprintf("No runtime table present\n"); + break; + } + kprintf("RuntimeServices table at 0x%x\n", system_table->RuntimeServices); + // 32-bit virtual address is OK for 32-bit EFI and 32-bit kernel. + // For a 64-bit kernel, booter will ensure pointer is zeroed out + runtime = (EFI_RUNTIME_SERVICES_32 *) (intptr_t)system_table->RuntimeServices; + if (runtime->Hdr.Signature != EFI_RUNTIME_SERVICES_SIGNATURE) { + kprintf("Bad EFI runtime table signature\n"); + break; + } // Verify signature of runtime services table hdr_cksum = runtime->Hdr.CRC32; @@ -469,7 +497,7 @@ efi_init(void) do { - vm_offset_t vm_size, vm_addr; + vm_offset_t vm_size, vm_addr; vm_map_offset_t phys_addr; EfiMemoryRange *mptr; unsigned int msize, mcount; @@ -478,26 +506,31 @@ efi_init(void) msize = args->MemoryMapDescriptorSize; mcount = args->MemoryMapSize / msize; - mptr = (EfiMemoryRange *)args->MemoryMap; + mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { if (((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) ) { - vm_size = i386_ptob((uint32_t)mptr->NumberOfPages); + vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); vm_addr = (vm_offset_t) mptr->VirtualStart; phys_addr = (vm_map_offset_t) mptr->PhysicalStart; - pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size), +#if defined(__i386__) + pmap_map +#elif defined(__x86_64__) + pmap_map_bd /* K64todo resolve pmap layer inconsistency */ +#endif + (vm_addr, phys_addr, phys_addr + round_page(vm_size), (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE, (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT); } } - if (args->Version > 1) - panic("Incompatible boot args version %d\n", args->Version); + if ((args->Version != kBootArgsVersion1) || (args->Version == kBootArgsVersion1 && args->Revision < kBootArgsRevision1_5 )) + panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision); kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode); - if (args->Revision >= 4 && args->efiMode == kBootArgsEfiMode64) { - efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) args->efiSystemTable); + if (args->efiMode == kBootArgsEfiMode64) { + efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); } else { - efi_set_tables_32((EFI_SYSTEM_TABLE *) args->efiSystemTable); + efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); } } while (FALSE); @@ -513,11 +546,11 @@ hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_o kprintf("Reinitializing EFI runtime services\n"); - if (args->Revision < 3) + if (args->Version != kBootArgsVersion1) return; do { - vm_offset_t vm_size, vm_addr; + vm_offset_t vm_size, vm_addr; vm_map_offset_t phys_addr; EfiMemoryRange *mptr; unsigned int msize, mcount; @@ -528,23 +561,23 @@ hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_o system_table_offset += ptoa_32(args->efiRuntimeServicesPageStart); - kprintf("Old system table %p, new %p\n", - args->efiSystemTable, (void *) system_table_offset); + kprintf("Old system table 0x%x, new 0x%x\n", + (uint32_t)args->efiSystemTable, system_table_offset); - args->efiSystemTable = (uint32_t) system_table_offset; + args->efiSystemTable = system_table_offset; kprintf("Old map:\n"); msize = args->MemoryMapDescriptorSize; mcount = args->MemoryMapSize / msize; - mptr = (EfiMemoryRange *)args->MemoryMap; + mptr = (EfiMemoryRange *)ml_static_ptovirt(args->MemoryMap); for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) { - vm_size = i386_ptob((uint32_t)mptr->NumberOfPages); + vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); vm_addr = (vm_offset_t) mptr->VirtualStart; phys_addr = (vm_map_offset_t) mptr->PhysicalStart; - kprintf("mapping[%d] %qx @ %x, %x\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages); + kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages); } } @@ -553,31 +586,36 @@ hibernate_newruntime_map(void * map, vm_size_t map_size, uint32_t system_table_o kprintf("New map:\n"); msize = args->MemoryMapDescriptorSize; - mcount = map_size / msize; + mcount = (unsigned int )(map_size / msize); mptr = map; for (i=0; i < mcount; i++, mptr = (EfiMemoryRange *)(((vm_offset_t)mptr) + msize)) { if ((mptr->Attribute & EFI_MEMORY_RUNTIME) == EFI_MEMORY_RUNTIME) { - vm_size = i386_ptob((uint32_t)mptr->NumberOfPages); + vm_size = (vm_offset_t)i386_ptob((uint32_t)mptr->NumberOfPages); vm_addr = (vm_offset_t) mptr->VirtualStart; phys_addr = (vm_map_offset_t) mptr->PhysicalStart; - kprintf("mapping[%d] %qx @ %x, %x\n", mptr->Type, phys_addr, vm_addr, mptr->NumberOfPages); + kprintf("mapping[%u] %qx @ %lx, %llu\n", mptr->Type, phys_addr, (unsigned long)vm_addr, mptr->NumberOfPages); - pmap_map(vm_addr, phys_addr, phys_addr + round_page(vm_size), +#if defined(__i386__) + pmap_map +#elif defined(__x86_64__) + pmap_map_bd /* K64todo resolve pmap layer inconsistency */ +#endif + (vm_addr, phys_addr, phys_addr + round_page(vm_size), (mptr->Type == kEfiRuntimeServicesCode) ? VM_PROT_READ | VM_PROT_EXECUTE : VM_PROT_READ|VM_PROT_WRITE, (mptr->Type == EfiMemoryMappedIO) ? VM_WIMG_IO : VM_WIMG_USE_DEFAULT); } } - if (args->Version > 1) - panic("Incompatible boot args version %d\n", args->Version); + if ((args->Version != kBootArgsVersion1) || (args->Version == kBootArgsVersion1 && args->Revision < kBootArgsRevision1_5 )) + panic("Incompatible boot args version %d revision %d\n", args->Version, args->Revision); kprintf("Boot args version %d revision %d mode %d\n", args->Version, args->Revision, args->efiMode); - if (args->Revision >= 4 && args->efiMode == kBootArgsEfiMode64) { - efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) args->efiSystemTable); + if (args->efiMode == kBootArgsEfiMode64) { + efi_set_tables_64((EFI_SYSTEM_TABLE_64 *) ml_static_ptovirt(args->efiSystemTable)); } else { - efi_set_tables_32((EFI_SYSTEM_TABLE *) args->efiSystemTable); + efi_set_tables_32((EFI_SYSTEM_TABLE_32 *) ml_static_ptovirt(args->efiSystemTable)); } } while (FALSE); @@ -631,9 +669,9 @@ machine_init(void) pat_init(); /* - * Free lowmem pages + * Free lowmem pages and complete other setup */ - x86_lowmem_free(); + pmap_lowmem_finalize(); } /* @@ -663,12 +701,24 @@ halt_all_cpus(boolean_t reboot) while(1); } + +/* Issue an I/O port read if one has been requested - this is an event logic + * analyzers can use as a trigger point. + */ + +void +panic_io_port_read(void) { + if (panic_io_port) + (void)inb(panic_io_port); +} + /* For use with the MP rendezvous mechanism */ static void machine_halt_cpu(__unused void *arg) { - __asm__ volatile("hlt"); + panic_io_port_read(); + pmCPUHalt(PM_HALT_DEBUG); } void @@ -701,61 +751,89 @@ Debugger( /* Issue an I/O port read if one has been requested - this is an event logic * analyzers can use as a trigger point. */ - if (panic_io_port) - (void)inb(panic_io_port); + panic_io_port_read(); /* Obtain current frame pointer */ +#if defined (__i386__) __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr)); +#elif defined (__x86_64__) + __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); +#endif /* Print backtrace - callee is internally synchronized */ - panic_i386_backtrace(stackptr, 16); + panic_i386_backtrace(stackptr, 32, NULL, FALSE, NULL); /* everything should be printed now so copy to NVRAM */ if( debug_buf_size > 0) { - /* Do not compress the panic log - * or save to NVRAM unless kernel debugging - * is disabled. The NVRAM shim doesn't - * sync to the store until haltRestart is called. - */ - if (!panicDebugging) { + /* Optionally sync the panic log, if any, to NVRAM + * This is the default. + */ + if (commit_paniclog_to_nvram) { unsigned int bufpos; + uintptr_t cr0; - debug_putc(0); + debug_putc(0); /* Now call the compressor */ /* XXX Consider using the WKdm compressor in the * future, rather than just packing - would need to * be co-ordinated with crashreporter, which decodes - * this post-restart. + * this post-restart. The compressor should be + * capable of in-place compression. */ - bufpos = packAsc ((uint8_t *)debug_buf, - (unsigned int) (debug_buf_ptr - debug_buf) ); + bufpos = packA(debug_buf, + (unsigned int) (debug_buf_ptr - debug_buf), debug_buf_size); /* If compression was successful, * use the compressed length */ - if (bufpos) { - debug_buf_ptr = debug_buf + bufpos; - } + pi_size = bufpos ? bufpos : (unsigned) (debug_buf_ptr - debug_buf); + /* Save panic log to non-volatile store * Panic info handler must truncate data that is * too long for this platform. * This call must save data synchronously, * since we can subsequently halt the system. */ - pi_size = debug_buf_ptr - debug_buf; - pi_size = PESavePanicInfo((unsigned char *)debug_buf, - pi_size ); + + kprintf("Attempting to commit panic log to NVRAM\n"); +/* The following sequence is a workaround for: + * SnowLeopard10A67: AppleEFINVRAM should not invoke + * any routines that use floating point (MMX in this case) when saving panic + * logs to nvram/flash. + */ + cr0 = get_cr0(); + clear_ts(); + + pi_size = PESavePanicInfo((unsigned char *)debug_buf, + (uint32_t)pi_size ); + set_cr0(cr0); + /* Uncompress in-place, to permit examination of + * the panic log by debuggers. + */ + + if (bufpos) { + unpackA(debug_buf, bufpos); + } } } - draw_panic_dialog(); + + /* If the user won't be able to read the dialog, + * don't bother trying to show it + */ + if (!PE_reboot_on_panic()) + draw_panic_dialog(); if (!panicDebugging) { /* Clear the MP rendezvous function lock, in the event * that a panic occurred while in that codepath. */ mp_rendezvous_break_lock(); + if (PE_reboot_on_panic()) { + PEHaltRestart(kPEPanicRestartCPU); + } + /* Force all CPUs to disable interrupts and HLT. * We've panicked, and shouldn't depend on the * PEHaltRestart() mechanism, which relies on several @@ -770,16 +848,6 @@ Debugger( hw_atomic_sub(&debug_mode, 1); } -void -enable_bluebox(void) -{ -} - -void -disable_bluebox(void) -{ -} - char * machine_boot_info(char *buf, __unused vm_size_t size) { @@ -801,68 +869,42 @@ struct pasc { typedef struct pasc pasc_t; -static int packAsc (unsigned char *inbuf, unsigned int length) -{ - unsigned int i, j = 0; - unsigned int extra; - pasc_t pack; - - for (i = 0; i < length; i+=8) - { - pack.a = inbuf[i]; - pack.b = inbuf[i+1]; - pack.c = inbuf[i+2]; - pack.d = inbuf[i+3]; - pack.e = inbuf[i+4]; - pack.f = inbuf[i+5]; - pack.g = inbuf[i+6]; - pack.h = inbuf[i+7]; - bcopy ((char *) &pack, inbuf + j, 7); - j += 7; - } - extra = (i - length); - if (extra > 0) { - inbuf[j - extra] &= (0xFF << (8-extra)); - } - return j-((extra == 7) ? 6 : extra); -} - /* Routines for address - symbol translation. Not called unless the "keepsyms" * boot-arg is supplied. */ static int -panic_print_macho_symbol_name(struct mach_header *mh, vm_address_t search) +panic_print_macho_symbol_name(kernel_mach_header_t *mh, vm_address_t search) { - struct nlist *sym = NULL; + kernel_nlist_t *sym = NULL; struct load_command *cmd; - struct segment_command *orig_ts = NULL, *orig_le = NULL; + kernel_segment_command_t *orig_ts = NULL, *orig_le = NULL; struct symtab_command *orig_st = NULL; unsigned int i; char *strings, *bestsym = NULL; vm_address_t bestaddr = 0, diff, curdiff; - - if (mh->magic != MH_MAGIC) { - /* bad magic number */ - return 0; - } + + /* Assume that if it's loaded and linked into the kernel, it's a valid Mach-O */ cmd = (struct load_command *) &mh[1]; for (i = 0; i < mh->ncmds; i++) { - if (cmd->cmd == LC_SEGMENT) { - struct segment_command *orig_sg = (struct segment_command *) cmd; + if (cmd->cmd == LC_SEGMENT_KERNEL) { + kernel_segment_command_t *orig_sg = (kernel_segment_command_t *) cmd; - if (strcmp(SEG_TEXT, orig_sg->segname) == 0) + if (strncmp(SEG_TEXT, orig_sg->segname, + sizeof(orig_sg->segname)) == 0) orig_ts = orig_sg; - else if (strcmp(SEG_LINKEDIT, orig_sg->segname) == 0) + else if (strncmp(SEG_LINKEDIT, orig_sg->segname, + sizeof(orig_sg->segname)) == 0) orig_le = orig_sg; - else if (strcmp("", orig_sg->segname) == 0) + else if (strncmp("", orig_sg->segname, + sizeof(orig_sg->segname)) == 0) orig_ts = orig_sg; /* kexts have a single unnamed segment */ } else if (cmd->cmd == LC_SYMTAB) orig_st = (struct symtab_command *) cmd; - cmd = (struct load_command *) ((caddr_t) cmd + cmd->cmdsize); + cmd = (struct load_command *) ((uintptr_t) cmd + cmd->cmdsize); } if ((orig_ts == NULL) || (orig_st == NULL) || (orig_le == NULL)) @@ -870,7 +912,7 @@ panic_print_macho_symbol_name(struct mach_header *mh, vm_address_t search) /* kexts don't have a LINKEDIT segment for now, so we'll never get this far for kexts */ - vm_address_t slide = ((vm_address_t)mh) - orig_ts->vmaddr; + vm_offset_t slide = ((vm_address_t)mh) - orig_ts->vmaddr; if (slide != 0) search -= slide; /* adjusting search since the binary has slid */ @@ -880,11 +922,13 @@ panic_print_macho_symbol_name(struct mach_header *mh, vm_address_t search) return 0; } - sym = (struct nlist *)orig_le->vmaddr; - strings = ((char *)sym) + orig_st->nsyms * sizeof(struct nlist); + sym = (kernel_nlist_t *)(uintptr_t)(orig_le->vmaddr + orig_st->symoff - orig_le->fileoff); + strings = (char *)(uintptr_t)(orig_le->vmaddr + orig_st->stroff - orig_le->fileoff); diff = search; for (i = 0; i < orig_st->nsyms; i++) { + if (sym[i].n_type & N_STAB) continue; + if (sym[i].n_value <= search) { curdiff = search - (vm_address_t)sym[i].n_value; if (curdiff < diff) { @@ -897,9 +941,9 @@ panic_print_macho_symbol_name(struct mach_header *mh, vm_address_t search) if (bestsym != NULL) { if (diff != 0) { - kdb_printf("%s + 0x%08x ", bestsym, diff); + kdb_printf("%s + 0x%lx", bestsym, (unsigned long)diff); } else { - kdb_printf("%s ", bestsym); + kdb_printf("%s", bestsym); } return 1; } @@ -921,12 +965,10 @@ panic_print_kmod_symbol_name(vm_address_t search) } if (current_kmod != NULL) { /* if kexts had symbol table loaded, we'd call search_symbol_name again; alas, they don't */ - kdb_printf("%s + %d ", current_kmod->name, search - current_kmod->address); + kdb_printf("%s + %lu \n", current_kmod->name, (unsigned long)search - current_kmod->address); } } -extern struct mach_header _mh_execute_header; /* the kernel's mach header */ - static void panic_print_symbol_name(vm_address_t search) { @@ -946,10 +988,11 @@ panic_print_symbol_name(vm_address_t search) #define DUMPFRAMES 32 #define PBT_TIMEOUT_CYCLES (5 * 1000 * 1000 * 1000ULL) void -panic_i386_backtrace(void *_frame, int nframes) +panic_i386_backtrace(void *_frame, int nframes, const char *msg, boolean_t regdump, x86_saved_state_t *regs) { cframe_t *frame = (cframe_t *)_frame; vm_offset_t raddrs[DUMPFRAMES]; + vm_offset_t PC = 0; int frame_index; volatile uint32_t *ppbtcnt = &pbtcnt; uint64_t bt_tsc_timeout; @@ -960,14 +1003,51 @@ panic_i386_backtrace(void *_frame, int nframes) /* Spin on print backtrace lock, which serializes output * Continue anyway if a timeout occurs. */ - hw_lock_to(&pbtlock, LockTimeOut*100); + hw_lock_to(&pbtlock, LockTimeOutTSC); pbtcpu = cpu_number(); } - PE_parse_boot_arg("keepsyms", &keepsyms); + PE_parse_boot_argn("keepsyms", &keepsyms, sizeof (keepsyms)); + + if (msg != NULL) { + kdb_printf("%s", msg); + } - kdb_printf("Backtrace, " - "Format - Frame : Return Address (4 potential args on stack) "); + if ((regdump == TRUE) && (regs != NULL)) { +#if defined(__x86_64__) + x86_saved_state64_t *ss64p = saved_state64(regs); + kdb_printf( + "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" + "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" + "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" + "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" + "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n", + ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx, + ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi, + ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11, + ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15, + ss64p->isf.rflags, ss64p->isf.rip, ss64p->isf.cs, + ss64p->isf.ss); + PC = ss64p->isf.rip; +#else + x86_saved_state32_t *ss32p = saved_state32(regs); + kdb_printf( + "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n" + "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n" + "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n", + ss32p->eax,ss32p->ebx,ss32p->ecx,ss32p->edx, + ss32p->cr2,ss32p->ebp,ss32p->esi,ss32p->edi, + ss32p->efl,ss32p->eip,ss32p->cs, ss32p->ds); + PC = ss32p->eip; +#endif + } + + kdb_printf("Backtrace (CPU %d), " +#if PRINT_ARGS_FROM_STACK_FRAME + "Frame : Return Address (4 potential args on stack)\n", cpu_number()); +#else + "Frame : Return Address\n", cpu_number()); +#endif for (frame_index = 0; frame_index < nframes; frame_index++) { vm_offset_t curframep = (vm_offset_t) frame; @@ -986,15 +1066,16 @@ panic_i386_backtrace(void *_frame, int nframes) goto invalid; } - kdb_printf("\n0x%x : 0x%x ", - frame, frame->caller); + kdb_printf("%p : 0x%lx ", frame, frame->caller); if (frame_index < DUMPFRAMES) raddrs[frame_index] = frame->caller; +#if PRINT_ARGS_FROM_STACK_FRAME if (kvtophys((vm_offset_t)&(frame->args[3]))) kdb_printf("(0x%x 0x%x 0x%x 0x%x) ", frame->args[0], frame->args[1], frame->args[2], frame->args[3]); +#endif /* Display address-symbol translation only if the "keepsyms" * boot-arg is suppplied, since we unload LINKEDIT otherwise. @@ -1004,11 +1085,8 @@ panic_i386_backtrace(void *_frame, int nframes) if (keepsyms) panic_print_symbol_name((vm_address_t)frame->caller); - /* Stack grows downward */ - if (frame->prev < frame) { - frame = frame->prev; - goto invalid; - } + kdb_printf("\n"); + frame = frame->prev; } @@ -1018,7 +1096,7 @@ panic_i386_backtrace(void *_frame, int nframes) goto out; invalid: - kdb_printf("Backtrace terminated-invalid frame pointer 0x%x\n",frame); + kdb_printf("Backtrace terminated-invalid frame pointer %p\n",frame); out: /* Identify kernel modules in the backtrace and display their @@ -1026,9 +1104,12 @@ out: * the kmod list safely. */ if (frame_index) - kmod_dump((vm_offset_t *)&raddrs[0], frame_index); + kmod_panic_dump((vm_offset_t *)&raddrs[0], frame_index); + + if (PC != 0) + kmod_panic_dump(&PC, 1); - kdb_printf("\nKernel version:\n%s\n\n",version); + panic_display_system_configuration(); /* Release print backtrace lock, to permit other callers in the * event of panics on multiple processors. @@ -1041,3 +1122,5 @@ out: bt_tsc_timeout = rdtsc64() + PBT_TIMEOUT_CYCLES; while(*ppbtcnt && (rdtsc64() < bt_tsc_timeout)); } + +void *apic_table = NULL;