/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <i386/trap.h>
#include <i386/mp.h>
#include <kdp/kdp_internal.h>
+#include <kdp/kdp_callout.h>
#include <mach-o/loader.h>
#include <mach-o/nlist.h>
#include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
#include <kern/machine.h> /* for halt_all_cpus */
+#include <libkern/OSAtomic.h>
#include <kern/thread.h>
#include <i386/thread.h>
#include <vm/vm_map.h>
#include <i386/pmap.h>
+#include <kern/kalloc.h>
#define KDP_TEST_HARNESS 0
#if KDP_TEST_HARNESS
void kdp_print_phys(int);
int
-machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
+machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
int
-machine_trace_thread64(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
+machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
-extern unsigned kdp_vm_read(caddr_t src, caddr_t dst, unsigned len);
+unsigned
+machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
+
+static void kdp_callouts(kdp_event_t event);
void
kdp_exception(
state->esi = saved_state->esi;
state->ebp = saved_state->ebp;
- if ((saved_state->cs & 0x3) == 0){ /* Kernel State */
- state->esp = (unsigned int) &saved_state->uesp;
+ if ((saved_state->cs & SEL_PL) == SEL_PL_K) { /* Kernel state? */
+ if (cpu_mode_is64bit())
+ state->esp = (uint32_t) saved_state->uesp;
+ else
+ state->esp = ((uint32_t)saved_state) + offsetof(x86_saved_state_t, ss_32) + sizeof(x86_saved_state32_t);
state->ss = KERNEL_DS;
} else {
state->esp = saved_state->uesp;
saved_state->frame.eflags |= ( EFL_IF | EFL_SET );
#endif
saved_state->eip = state->eip;
- saved_state->fs = state->fs;
- saved_state->gs = state->gs;
}
__unused int *size
)
{
- static struct i386_float_state null_fpstate;
+ static x86_float_state32_t null_fpstate;
switch (flavor) {
- case OLD_i386_THREAD_STATE:
case x86_THREAD_STATE32:
dprintf(("kdp_readregs THREAD_STATE\n"));
kdp_getstate((x86_thread_state32_t *)data);
{
switch (flavor) {
- case OLD_i386_THREAD_STATE:
case x86_THREAD_STATE32:
dprintf(("kdp_writeregs THREAD_STATE\n"));
kdp_setstate((x86_thread_state32_t *)data);
void
-kdp_reboot(void)
+kdp_machine_reboot(void)
{
printf("Attempting system restart...");
+ kprintf("Attempting system restart...");
/* Call the platform specific restart*/
if (PE_halt_restart)
(*PE_halt_restart)(kPERestartCPU);
}
int
-kdp_getc()
+kdp_getc(void)
{
return cnmaygetc();
}
kprintf("pc = 0x%x\n", saved_state->eip);
kprintf("cr2= 0x%x\n", saved_state->cr2);
kprintf("rp = TODO FIXME\n");
- kprintf("sp = 0x%x\n", saved_state);
+ kprintf("sp = %p\n", saved_state);
}
void
-kdp_sync_cache()
+kdp_sync_cache(void)
{
return; /* No op here. */
}
void
-kdp_call()
+kdp_call(void)
{
__asm__ volatile ("int $3"); /* Let the processor do the work */
}
boolean_t
kdp_i386_trap(
- unsigned int trapno,
+ unsigned int trapno,
x86_saved_state32_t *saved_state,
kern_return_t result,
vm_offset_t va
unsigned int exception, subcode = 0, code;
if (trapno != T_INT3 && trapno != T_DEBUG) {
- kprintf("unexpected kernel trap 0x%x eip 0x%x cr2 0x%x \n",
+ kprintf("Debugger: Unexpected kernel trap number: "
+ "0x%x, EIP: 0x%x, CR2: 0x%x\n",
trapno, saved_state->eip, saved_state->cr2);
if (!kdp.is_conn)
return FALSE;
}
mp_kdp_enter();
+ kdp_callouts(KDP_EVENT_ENTER);
+
+ if (saved_state->efl & EFL_TF) {
+ enable_preemption_no_check();
+ }
switch (trapno) {
}
kdp_raise_exception(exception, code, subcode, saved_state);
+ /* If the instruction single step bit is set, disable kernel preemption
+ */
+ if (saved_state->efl & EFL_TF) {
+ disable_preemption();
+ }
+ kdp_callouts(KDP_EVENT_EXIT);
mp_kdp_exit();
return TRUE;
return(FALSE);
}
-unsigned int
-kdp_ml_get_breakinsn(void)
+void
+kdp_machine_get_breakinsn(
+ uint8_t *bytes,
+ uint32_t *size
+)
{
- return 0xcc;
+ bytes[0] = 0xcc;
+ *size = 1;
}
+
extern pmap_t kdp_pmap;
#define RETURN_OFFSET 4
int
-machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p)
+machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
{
uint32_t *tracebuf = (uint32_t *)tracepos;
uint32_t fence = 0;
iss32 = USER_REGS32(thread);
- init_eip = iss32->eip;
- stackptr = iss32->ebp;
+ init_eip = iss32->eip;
+ stackptr = iss32->ebp;
/* This bound isn't useful, but it doesn't hinder us*/
stacklimit = 0xffffffff;
for (framecount = 0; framecount < nframes; framecount++) {
- if ((tracebound - ((uint32_t) tracebuf)) < (4 * framesize)) {
+ if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
tracebuf--;
break;
}
if (!stackptr || (stackptr == fence)) {
break;
}
- /* Stack grows downward */
- if (stackptr < prevsp) {
- break;
- }
+
/* Unaligned frame */
if (stackptr & 0x0000003) {
break;
}
+
if (stackptr > stacklimit) {
break;
}
+
+ if (stackptr <= prevsp) {
+ break;
+ }
- if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
+ if (kdp_machine_vm_read((mach_vm_address_t)(stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
break;
}
tracebuf++;
prevsp = stackptr;
- if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
+ if (kdp_machine_vm_read((mach_vm_address_t)stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
*tracebuf++ = 0;
break;
}
kdp_pmap = 0;
- return ((uint32_t) tracebuf - tracepos);
+ return (uint32_t) (((char *) tracebuf) - tracepos);
+}
+
+#define RETURN_OFFSET64 8
+/* Routine to encapsulate the 64-bit address read hack*/
+unsigned
+machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
+{
+ return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
}
-/* This is a stub until the x86 64-bit model becomes clear */
int
-machine_trace_thread64(__unused thread_t thread, __unused uint32_t tracepos, __unused uint32_t tracebound, __unused int nframes, __unused boolean_t user_p) {
- return 0;
+machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
+{
+ uint64_t *tracebuf = (uint64_t *)tracepos;
+ uint32_t fence = 0;
+ addr64_t stackptr = 0;
+ uint64_t stacklimit = 0xfc000000;
+ int framecount = 0;
+ addr64_t init_rip = 0;
+ addr64_t prevsp = 0;
+ unsigned framesize = 2 * sizeof(addr64_t);
+
+ if (user_p) {
+ x86_saved_state64_t *iss64;
+ iss64 = USER_REGS64(thread);
+ init_rip = iss64->isf.rip;
+ stackptr = iss64->rbp;
+ stacklimit = 0xffffffffffffffffULL;
+ kdp_pmap = thread->task->map->pmap;
+ }
+
+ *tracebuf++ = init_rip;
+
+ for (framecount = 0; framecount < nframes; framecount++) {
+
+ if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
+ tracebuf--;
+ break;
+ }
+
+ *tracebuf++ = stackptr;
+
+ if (!stackptr || (stackptr == fence)){
+ break;
+ }
+
+ if (stackptr & 0x0000003) {
+ break;
+ }
+ if (stackptr > stacklimit) {
+ break;
+ }
+
+ if (stackptr <= prevsp) {
+ break;
+ }
+
+ if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
+ break;
+ }
+ tracebuf++;
+
+ prevsp = stackptr;
+ if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
+ *tracebuf++ = 0;
+ break;
+ }
+ }
+
+ kdp_pmap = NULL;
+
+ return (uint32_t) (((char *) tracebuf) - tracepos);
+}
+
+static struct kdp_callout {
+ struct kdp_callout *callout_next;
+ kdp_callout_fn_t callout_fn;
+ void *callout_arg;
+} *kdp_callout_list = NULL;
+
+
+/*
+ * Called from kernel context to register a kdp event callout.
+ */
+void
+kdp_register_callout(
+ kdp_callout_fn_t fn,
+ void *arg)
+{
+ struct kdp_callout *kcp;
+ struct kdp_callout *list_head;
+
+ kcp = kalloc(sizeof(*kcp));
+ if (kcp == NULL)
+ panic("kdp_register_callout() kalloc failed");
+
+ kcp->callout_fn = fn;
+ kcp->callout_arg = arg;
+
+ /* Lock-less list insertion using compare and exchange. */
+ do {
+ list_head = kdp_callout_list;
+ kcp->callout_next = list_head;
+ } while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list));
+}
+
+/*
+ * Called at exception/panic time when extering or exiting kdp.
+ * We are single-threaded at this time and so we don't use locks.
+ */
+static void
+kdp_callouts(kdp_event_t event)
+{
+ struct kdp_callout *kcp = kdp_callout_list;
+
+ while (kcp) {
+ kcp->callout_fn(kcp->callout_arg, event);
+ kcp = kcp->callout_next;
+ }
+}
+
+void
+kdp_ml_enter_debugger(void)
+{
+ __asm__ __volatile__("int3");
}