]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kdp/ml/i386/kdp_machdep.c
xnu-2050.7.9.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / i386 / kdp_machdep.c
index 8d91941199b83212a6b4aedafdb7d083e6cc51cb..ca38128a4b643bd8dcdc68d0474727d2e2fd3f04 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <i386/trap.h>
 #include <i386/mp.h>
 #include <kdp/kdp_internal.h>
+#include <kdp/kdp_callout.h>
 #include <mach-o/loader.h>
 #include <mach-o/nlist.h>
 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
 #include <kern/machine.h> /* for halt_all_cpus */
+#include <libkern/OSAtomic.h>
 
 #include <kern/thread.h>
 #include <i386/thread.h>
 #include <vm/vm_map.h>
 #include <i386/pmap.h>
+#include <kern/kalloc.h>
 
 #define KDP_TEST_HARNESS 0
 #if KDP_TEST_HARNESS
@@ -63,12 +66,15 @@ void                kdp_setstate(i386_thread_state_t *);
 void           kdp_print_phys(int);
 
 int
-machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
+machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
 
 int
-machine_trace_thread64(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
+machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
 
-extern unsigned kdp_vm_read(caddr_t src, caddr_t dst, unsigned len);
+unsigned
+machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
+
+static void    kdp_callouts(kdp_event_t event);
 
 void
 kdp_exception(
@@ -146,8 +152,11 @@ kdp_getstate(
     state->esi = saved_state->esi;
     state->ebp = saved_state->ebp;
 
-    if ((saved_state->cs & 0x3) == 0){ /* Kernel State */
-       state->esp = (unsigned int) &saved_state->uesp;
+    if ((saved_state->cs & SEL_PL) == SEL_PL_K) { /* Kernel state? */
+           if (cpu_mode_is64bit())
+                   state->esp = (uint32_t) saved_state->uesp;
+           else
+                   state->esp = ((uint32_t)saved_state) + offsetof(x86_saved_state_t, ss_32) + sizeof(x86_saved_state32_t);
         state->ss = KERNEL_DS;
     } else {
        state->esp = saved_state->uesp;
@@ -186,8 +195,6 @@ kdp_setstate(
     saved_state->frame.eflags |=  ( EFL_IF | EFL_SET );
 #endif
     saved_state->eip = state->eip;
-    saved_state->fs = state->fs;
-    saved_state->gs = state->gs;
 }
 
 
@@ -199,11 +206,10 @@ kdp_machine_read_regs(
     __unused int *size
 )
 {
-    static struct i386_float_state  null_fpstate;
+    static x86_float_state32_t  null_fpstate;
 
     switch (flavor) {
 
-    case OLD_i386_THREAD_STATE:
     case x86_THREAD_STATE32:
        dprintf(("kdp_readregs THREAD_STATE\n"));
        kdp_getstate((x86_thread_state32_t *)data);
@@ -233,7 +239,6 @@ kdp_machine_write_regs(
 {
     switch (flavor) {
 
-    case OLD_i386_THREAD_STATE:
     case x86_THREAD_STATE32:
        dprintf(("kdp_writeregs THREAD_STATE\n"));
        kdp_setstate((x86_thread_state32_t *)data);
@@ -282,9 +287,10 @@ kdp_panic(
 
 
 void
-kdp_reboot(void)
+kdp_machine_reboot(void)
 {
        printf("Attempting system restart...");
+       kprintf("Attempting system restart...");
        /* Call the platform specific restart*/
        if (PE_halt_restart)
                (*PE_halt_restart)(kPERestartCPU);
@@ -305,7 +311,7 @@ kdp_intr_enbl(int s)
 }
 
 int
-kdp_getc()
+kdp_getc(void)
 {
        return  cnmaygetc();
 }
@@ -325,18 +331,18 @@ void print_saved_state(void *state)
        kprintf("pc = 0x%x\n", saved_state->eip);
        kprintf("cr2= 0x%x\n", saved_state->cr2);
        kprintf("rp = TODO FIXME\n");
-       kprintf("sp = 0x%x\n", saved_state);
+       kprintf("sp = %p\n", saved_state);
 
 }
 
 void
-kdp_sync_cache()
+kdp_sync_cache(void)
 {
        return; /* No op here. */
 }
 
 void
-kdp_call()
+kdp_call(void)
 {
        __asm__ volatile ("int  $3");   /* Let the processor do the work */
 }
@@ -373,7 +379,7 @@ kdp_print_phys(int src)
 
 boolean_t
 kdp_i386_trap(
-    unsigned int       trapno,
+    unsigned int               trapno,
     x86_saved_state32_t        *saved_state,
     kern_return_t      result,
     vm_offset_t                va
@@ -382,13 +388,19 @@ kdp_i386_trap(
     unsigned int exception, subcode = 0, code;
 
     if (trapno != T_INT3 && trapno != T_DEBUG) {
-       kprintf("unexpected kernel trap 0x%x eip 0x%x cr2 0x%x \n",
+       kprintf("Debugger: Unexpected kernel trap number: "
+               "0x%x, EIP: 0x%x, CR2: 0x%x\n",
                trapno, saved_state->eip, saved_state->cr2);
        if (!kdp.is_conn)
            return FALSE;
     }  
 
     mp_kdp_enter();
+    kdp_callouts(KDP_EVENT_ENTER);
+
+    if (saved_state->efl & EFL_TF) {
+           enable_preemption_no_check();
+    }
 
     switch (trapno) {
     
@@ -454,7 +466,13 @@ kdp_i386_trap(
     }
 
     kdp_raise_exception(exception, code, subcode, saved_state);
+    /* If the instruction single step bit is set, disable kernel preemption
+     */
+    if (saved_state->efl & EFL_TF) {
+           disable_preemption();
+    }
 
+    kdp_callouts(KDP_EVENT_EXIT);
     mp_kdp_exit();
 
     return TRUE;
@@ -467,16 +485,21 @@ kdp_call_kdb(
         return(FALSE);
 }
 
-unsigned int
-kdp_ml_get_breakinsn(void)
+void
+kdp_machine_get_breakinsn(
+                                                 uint8_t *bytes,
+                                                 uint32_t *size
+)
 {
-  return 0xcc;
+       bytes[0] = 0xcc;
+       *size = 1;
 }
+
 extern pmap_t kdp_pmap;
 
 #define RETURN_OFFSET 4
 int
-machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p)
+machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
 {
        uint32_t *tracebuf = (uint32_t *)tracepos;
        uint32_t fence = 0;
@@ -492,8 +515,8 @@ machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, in
                
                iss32 = USER_REGS32(thread);
 
-               init_eip = iss32->eip;
-               stackptr = iss32->ebp;
+                       init_eip = iss32->eip;
+                       stackptr = iss32->ebp;
 
                /* This bound isn't useful, but it doesn't hinder us*/
                stacklimit = 0xffffffff;
@@ -509,7 +532,7 @@ machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, in
 
        for (framecount = 0; framecount < nframes; framecount++) {
 
-               if ((tracebound - ((uint32_t) tracebuf)) < (4 * framesize)) {
+               if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
                        tracebuf--;
                        break;
                }
@@ -519,25 +542,27 @@ machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, in
                if (!stackptr || (stackptr == fence)) {
                        break;
                }
-               /* Stack grows downward */
-               if (stackptr < prevsp) {
-                       break;
-               }
+
                /* Unaligned frame */
                if (stackptr & 0x0000003) {
                        break;
                }
+
                if (stackptr > stacklimit) {
                        break;
                }
+               
+               if (stackptr <= prevsp) {
+                       break;
+               }
 
-               if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
+               if (kdp_machine_vm_read((mach_vm_address_t)(stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
                        break;
                }
                tracebuf++;
                
                prevsp = stackptr;
-               if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
+               if (kdp_machine_vm_read((mach_vm_address_t)stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
                        *tracebuf++ = 0;
                        break;
                }
@@ -545,11 +570,130 @@ machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, in
 
        kdp_pmap = 0;
 
-       return ((uint32_t) tracebuf - tracepos);
+       return (uint32_t) (((char *) tracebuf) - tracepos);
+}
+
+#define RETURN_OFFSET64        8
+/* Routine to encapsulate the 64-bit address read hack*/
+unsigned
+machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
+{
+       return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
 }
 
-/* This is a stub until the x86 64-bit model becomes clear */
 int
-machine_trace_thread64(__unused thread_t thread, __unused uint32_t tracepos, __unused uint32_t tracebound, __unused int nframes, __unused boolean_t user_p) {
-       return 0;
+machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
+{
+       uint64_t *tracebuf = (uint64_t *)tracepos;
+       uint32_t fence = 0;
+       addr64_t stackptr = 0;
+       uint64_t stacklimit = 0xfc000000;
+       int framecount = 0;
+       addr64_t init_rip = 0;
+       addr64_t prevsp = 0;
+       unsigned framesize = 2 * sizeof(addr64_t);
+       
+       if (user_p) {
+               x86_saved_state64_t     *iss64;
+               iss64 = USER_REGS64(thread);
+               init_rip = iss64->isf.rip;
+               stackptr = iss64->rbp;
+               stacklimit = 0xffffffffffffffffULL;
+               kdp_pmap = thread->task->map->pmap;
+       }
+
+       *tracebuf++ = init_rip;
+
+       for (framecount = 0; framecount < nframes; framecount++) {
+
+               if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
+                       tracebuf--;
+                       break;
+               }
+
+               *tracebuf++ = stackptr;
+
+               if (!stackptr || (stackptr == fence)){
+                       break;
+               }
+
+               if (stackptr & 0x0000003) {
+                       break;
+               }
+               if (stackptr > stacklimit) {
+                       break;
+               }
+
+               if (stackptr <= prevsp) {
+                       break;
+               }
+
+               if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
+                       break;
+               }
+               tracebuf++;
+
+               prevsp = stackptr;
+               if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
+                       *tracebuf++ = 0;
+                       break;
+               }
+       }
+
+       kdp_pmap = NULL;
+
+       return (uint32_t) (((char *) tracebuf) - tracepos);
+}
+
+static struct kdp_callout {
+       struct kdp_callout      *callout_next;
+       kdp_callout_fn_t        callout_fn;
+       void                    *callout_arg;
+} *kdp_callout_list = NULL;
+
+
+/*
+ * Called from kernel context to register a kdp event callout.
+ */
+void
+kdp_register_callout(
+       kdp_callout_fn_t        fn,
+       void                    *arg)
+{
+       struct kdp_callout      *kcp;
+       struct kdp_callout      *list_head;
+
+       kcp = kalloc(sizeof(*kcp));
+       if (kcp == NULL)
+               panic("kdp_register_callout() kalloc failed");
+
+       kcp->callout_fn  = fn;
+       kcp->callout_arg = arg;
+
+       /* Lock-less list insertion using compare and exchange. */
+       do {
+               list_head = kdp_callout_list;
+               kcp->callout_next = list_head;
+       } while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list));
+}
+
+/*
+ * Called at exception/panic time when extering or exiting kdp.  
+ * We are single-threaded at this time and so we don't use locks.
+ */
+static void
+kdp_callouts(kdp_event_t event)
+{
+       struct kdp_callout      *kcp = kdp_callout_list;
+
+       while (kcp) {
+               kcp->callout_fn(kcp->callout_arg, event); 
+               kcp = kcp->callout_next;
+       }       
+}
+
+void
+kdp_ml_enter_debugger(void)
+{
+       __asm__ __volatile__("int3");
 }