X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/2d21ac55c334faf3a56e5634905ed6987fc787d4..bb59bff194111743b33cc36712410b5656329d3c:/libkern/gen/OSDebug.cpp diff --git a/libkern/gen/OSDebug.cpp b/libkern/gen/OSDebug.cpp index 43544373f..7cb847108 100644 --- a/libkern/gen/OSDebug.cpp +++ b/libkern/gen/OSDebug.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2005 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2005-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -28,18 +28,21 @@ // NOTE: This file is only c++ so I can get static initialisers going #include +#include #include #include #include #include -#include +#include #include // From bsd's libkern directory #include #include +#include + extern int etext; __BEGIN_DECLS // From osmfk/kern/thread.h but considered to be private @@ -47,20 +50,19 @@ extern vm_offset_t min_valid_stack_address(void); extern vm_offset_t max_valid_stack_address(void); // From osfmk/kmod.c -extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt); +extern void kmod_dump_log(vm_offset_t *addr, unsigned int cnt, boolean_t doUnslide); extern addr64_t kvtophys(vm_offset_t va); -#if __arm__ -extern int copyinframe(vm_address_t fp, uint32_t *frame); -#endif __END_DECLS -static mutex_t *sOSReportLock = mutex_alloc(0); +extern lck_grp_t *IOLockGroup; + +static lck_mtx_t *sOSReportLock = lck_mtx_alloc_init(IOLockGroup, LCK_ATTR_NULL); /* Use kernel_debug() to log a backtrace */ void -trace_backtrace(unsigned int debugid, unsigned int debugid2, int size, int data) { +trace_backtrace(uint32_t debugid, uint32_t debugid2, uintptr_t size, uintptr_t data) { void *bt[16]; const unsigned cnt = sizeof(bt) / sizeof(bt[0]); unsigned i; @@ -81,7 +83,7 @@ trace_backtrace(unsigned int debugid, unsigned int debugid2, int size, int data) */ if (!found) i=2; -#define safe_bt(a) (int)(a VM_MIN_KERNEL_AND_KEXT_ADDRESS) && + (raddr < VM_MAX_KERNEL_ADDRESS)); +} static unsigned int -i386_validate_stackptr(vm_offset_t stackptr) +x86_64_validate_stackptr(vm_offset_t stackptr) { /* Existence and alignment check */ - if (!stackptr || (stackptr & 0x3)) + if (!stackptr || (stackptr & 0x7) || !x86_64_validate_raddr(stackptr)) return 0; /* Is a virtual->physical translation present? @@ -133,66 +144,43 @@ i386_validate_stackptr(vm_offset_t stackptr) /* Check if the return address lies on the same page; * If not, verify that a translation exists. */ - if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < i386_RETURN_OFFSET) && - !kvtophys(stackptr + i386_RETURN_OFFSET)) + if (((PAGE_SIZE - (stackptr & PAGE_MASK)) < x86_64_RETURN_OFFSET) && + !kvtophys(stackptr + x86_64_RETURN_OFFSET)) return 0; return 1; } +#endif -static unsigned int -i386_validate_raddr(vm_offset_t raddr) +void +OSPrintBacktrace(void) { - return ((raddr > VM_MIN_KERNEL_ADDRESS) && - (raddr < VM_MAX_KERNEL_ADDRESS)); + void * btbuf[20]; + int tmp = OSBacktrace(btbuf, 20); + int i; + for(i=0;i> 2]; - if ((stackptr_prev ^ stackptr) > 8 * 1024) // Sanity check - break; - - vm_offset_t addr = mem[(stackptr >> 2) + 2]; - if ((addr & 3) || (addr < 0x8000)) // More sanity checks - break; - bt[i] = (void *) addr; - } - frame = i; - - for ( ; i < maxAddrs; i++) - bt[i] = (void *) 0; -#elif __i386__ -#define SANE_i386_FRAME_SIZE 8*1024 +#if __x86_64__ +#define SANE_x86_64_FRAME_SIZE (kernel_stack_size >> 1) vm_offset_t stackptr, stackptr_prev, raddr; unsigned frame_index = 0; /* Obtain current frame pointer */ - __asm__ volatile("movl %%ebp, %0" : "=m" (stackptr)); - if (!i386_validate_stackptr(stackptr)) + __asm__ volatile("movq %%rbp, %0" : "=m" (stackptr)); + + if (!x86_64_validate_stackptr(stackptr)) goto pad; - raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET)); + raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); - if (!i386_validate_raddr(raddr)) + if (!x86_64_validate_raddr(raddr)) goto pad; bt[frame_index++] = (void *) raddr; @@ -201,18 +189,18 @@ unsigned OSBacktrace(void **bt, unsigned maxAddrs) stackptr_prev = stackptr; stackptr = *((vm_offset_t *) stackptr_prev); - if (!i386_validate_stackptr(stackptr)) + if (!x86_64_validate_stackptr(stackptr)) break; /* Stack grows downwards */ if (stackptr < stackptr_prev) break; - if ((stackptr_prev ^ stackptr) > SANE_i386_FRAME_SIZE) + if ((stackptr - stackptr_prev) > SANE_x86_64_FRAME_SIZE) break; - raddr = *((vm_offset_t *) (stackptr + i386_RETURN_OFFSET)); + raddr = *((vm_offset_t *) (stackptr + x86_64_RETURN_OFFSET)); - if (!i386_validate_raddr(raddr)) + if (!x86_64_validate_raddr(raddr)) break; bt[frame_index] = (void *) raddr; @@ -222,30 +210,6 @@ pad: for ( ; frame_index < maxAddrs; frame_index++) bt[frame_index] = (void *) 0; -#elif __arm__ - uint32_t i= 0; - uint32_t frameb[2]; - uint32_t fp= 0; - - // get the current frame pointer for this thread - __asm__ volatile("mov %0,r7" : "=r" (fp)); - - // now crawl up the stack recording the link value of each frame - do { - // check bounds - if ((fp == 0) || ((fp & 3) != 0) || (fp > VM_MAX_KERNEL_ADDRESS) || (fp < VM_MIN_KERNEL_ADDRESS)) { - break; - } - // safely read frame - if (copyinframe(fp, frameb) != 0) { - break; - } - - // No need to use copyin as this is always a kernel address, see check above - bt[i] = (void*)frameb[1]; // link register - fp = frameb[0]; - } while (++i < maxAddrs); - frame= i; #else #error arch #endif