/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_OSREFERENCE_HEADER_START@
*
#include <i386/setjmp.h>
#include <i386/pmap.h>
#include <i386/misc_protos.h>
+#include <i386/mp.h>
+#include <i386/machine_cpu.h>
#include <mach/vm_param.h>
#include <vm/vm_map.h>
#include <ddb/db_break.h>
#include <ddb/db_watch.h>
+#include <i386/cpu_data.h>
+
int db_active = 0;
-struct i386_saved_state *i386_last_saved_statep;
-struct i386_saved_state i386_nested_saved_state;
+x86_saved_state32_t *i386_last_saved_statep;
+x86_saved_state32_t i386_nested_saved_state;
unsigned i386_last_kdb_sp;
extern thread_t db_default_act;
int esi;
int ebp;
int ebx;
- struct i386_interrupt_state *is;
+ x86_saved_state32_t *is;
};
extern char * trap_type[];
* in a ktss, we hard-wire that in, rather than indexing the gdt
* with tss_sel to derive a pointer to the desired tss.
*/
+
+/*
+ * Code used to synchronize kdb among all cpus, one active at a time, switch
+ * from one to another using cpu #cpu
+ */
+
+decl_simple_lock_data(, kdb_lock) /* kdb lock */
+
+#define db_simple_lock_init(l, e) hw_lock_init(&((l)->interlock))
+#define db_simple_lock_try(l) hw_lock_try(&((l)->interlock))
+#define db_simple_unlock(l) hw_lock_unlock(&((l)->interlock))
+
+int kdb_cpu = -1; /* current cpu running kdb */
+int kdb_debug = 1;
+volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus
+ * holding breakpoints
+ */
+extern boolean_t db_breakpoints_inserted;
+
void
db_tss_to_frame(
int tss_sel,
- struct i386_saved_state *regs)
+ x86_saved_state32_t *regs)
{
extern struct i386_tss ktss;
int mycpu = cpu_number();
/*
* ddb will overwrite whatever's in esp, so put esp0 elsewhere, too.
*/
- regs->esp = tss->esp0;
+ regs->cr2 = tss->esp0;
regs->efl = tss->eflags;
regs->eip = tss->eip;
regs->trapno = tss->ss0; /* XXX */
*/
boolean_t
db_trap_from_asm(
- struct i386_saved_state *regs)
+ x86_saved_state32_t *regs)
{
int code;
int type;
kdb_trap(
int type,
int code,
- struct i386_saved_state *regs)
+ x86_saved_state32_t *regs)
{
extern char etext;
boolean_t trap_from_user;
- spl_t s = splhigh();
+ spl_t s;
+ int previous_console_device;
+ s = splhigh();
+
+ previous_console_device = switch_to_serial_console();
+
+ db_printf("kdb_trap(): type %d, code %d, regs->eip 0x%x\n", type, code, regs->eip);
switch (type) {
case T_DEBUG: /* single_step */
{
if (!IS_USER_TRAP(regs, &etext)) {
bzero((char *)&ddb_regs, sizeof (ddb_regs));
- *(struct i386_saved_state_from_kernel *)&ddb_regs =
- *(struct i386_saved_state_from_kernel *)regs;
+ *(struct x86_saved_state32_from_kernel *)&ddb_regs =
+ *(struct x86_saved_state32_from_kernel *)regs;
trap_from_user = FALSE;
}
else {
regs->ecx = ddb_regs.ecx;
regs->edx = ddb_regs.edx;
regs->ebx = ddb_regs.ebx;
+
if (trap_from_user) {
/*
* user mode - saved esp and ss valid
regs->uesp = ddb_regs.uesp; /* user stack pointer */
regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
}
+
regs->ebp = ddb_regs.ebp;
regs->esi = ddb_regs.esi;
regs->edi = ddb_regs.edi;
trap_from_user))
== BKPT_INST))
regs->eip += BKPT_SIZE;
-
+
+ switch_to_old_console(previous_console_device);
kdb_exit:
kdb_leave();
current_cpu_datap()->cpu_kdb_saved_state = 0;
-#if MACH_MP_DEBUG
- current_cpu_datap()->cpu_masked_state_cnt = 0;
-#endif /* MACH_MP_DEBUG */
-
enable_preemption();
splx(s);
{
extern char etext;
boolean_t trap_from_user;
- struct i386_interrupt_state *is = int_regs->is;
- struct i386_saved_state regs;
+ x86_saved_state32_t *is = int_regs->is;
+ x86_saved_state32_t regs;
spl_t s;
s = splhigh();
int flag)
{
register pt_entry_t *ptp;
+ vm_offset_t src;
+
+ /*
+ * must not pre-empted while using the pte pointer passed
+ * back since it's been mapped through a per-cpu window
+ */
+ mp_disable_preemption();
- ptp = pmap_pte(task->map->pmap, addr);
+ ptp = pmap_pte(task->map->pmap, (vm_map_offset_t)addr);
if (ptp == PT_ENTRY_NULL || (*ptp & INTEL_PTE_VALID) == 0) {
if (flag) {
db_printf("\nno memory is assigned to address %08x\n", addr);
db_error(0);
/* NOTREACHED */
}
+ mp_enable_preemption();
return(-1);
}
-
src = (vm_offset_t)pte_to_pa(*ptp);
+
+ mp_enable_preemption();
+
*(int *) DMAP1 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
INTEL_PTE_REF | INTEL_PTE_MOD;
#if defined(I386_CPU)
if (addr >= VM_MIN_KERNEL_ADDRESS &&
addr <= (vm_offset_t)&etext)
{
- ptep0 = pmap_pte(kernel_pmap, addr);
+ ptep0 = pmap_pte(kernel_pmap, (vm_map_offset_t)addr);
oldmap0 = *ptep0;
*ptep0 |= INTEL_PTE_WRITE;
if (i386_trunc_page(addr) != addr1) {
/* data crosses a page boundary */
- ptep1 = pmap_pte(kernel_pmap, addr1);
+ ptep1 = pmap_pte(kernel_pmap, (vm_map_offset_t)addr1);
oldmap1 = *ptep1;
*ptep1 |= INTEL_PTE_WRITE;
}
db_printf(" ");
}
-/*
- * Code used to synchronize kdb among all cpus, one active at a time, switch
- * from on to another using kdb_on! #cpu or cpu #cpu
- */
-
-decl_simple_lock_data(, kdb_lock) /* kdb lock */
-
-#define db_simple_lock_init(l, e) hw_lock_init(&((l)->interlock))
-#define db_simple_lock_try(l) hw_lock_try(&((l)->interlock))
-#define db_simple_unlock(l) hw_lock_unlock(&((l)->interlock))
-
-int kdb_cpu = -1; /* current cpu running kdb */
-int kdb_debug = 0;
-volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus holding
- breakpoints (ie: cpus that did not
- insert back breakpoints) */
-extern boolean_t db_breakpoints_inserted;
-
void
db_machdep_init(void)
{
db_simple_lock_init(&kdb_lock, 0);
for (c = 0; c < real_ncpus; ++c) {
- db_stacks[c] = (vm_offset_t) (db_stack_store +
- (INTSTACK_SIZE * (c + 1)) - sizeof (natural_t));
if (c == master_cpu) {
- dbtss.esp0 = (int)(db_task_stack_store +
+ master_dbtss.esp0 = (int)(db_task_stack_store +
(INTSTACK_SIZE * (c + 1)) - sizeof (natural_t));
- dbtss.esp = dbtss.esp0;
- dbtss.eip = (int)&db_task_start;
+ master_dbtss.esp = master_dbtss.esp0;
+ master_dbtss.eip = (int)&db_task_start;
/*
* The TSS for the debugging task on each slave CPU
- * is set up in mp_desc_init().
+ * is set up in cpu_desc_init().
*/
}
}
int
kdb_enter(int pc)
{
- int mycpu;
+ int my_cpu;
int retval;
disable_preemption();
- mycpu = cpu_number();
+ my_cpu = cpu_number();
if (current_cpu_datap()->cpu_db_pass_thru) {
retval = 0;
}
current_cpu_datap()->cpu_kdb_active++;
+
lock_kdb();
- if (kdb_debug)
- db_printf("kdb_enter: cpu %d, is_slave %d, kdb_cpu %d, run mode %d pc %x (%x) holds %d\n",
- my_cpu, current_cpu_datap()->cpu_kdb_is_slave, kdb_cpu,
- db_run_mode, pc, *(int *)pc, cpus_holding_bkpts);
+ db_printf("kdb_enter(): cpu_number %d, kdb_cpu %d\n", my_cpu, kdb_cpu);
+
if (db_breakpoints_inserted)
cpus_holding_bkpts++;
+
if (kdb_cpu == -1 && !current_cpu_datap()->cpu_kdb_is_slave) {
kdb_cpu = my_cpu;
+ db_printf("Signaling other processors..\n");
remote_kdb(); /* stop other cpus */
retval = 1;
} else if (kdb_cpu == my_cpu)
unlock_kdb();
current_cpu_datap()->cpu_kdb_active--;
+ mp_kdb_exit();
+
enable_preemption();
if (wait) {
{
int my_cpu;
register i;
- extern void kdb_console(void);
disable_preemption();
my_cpu = cpu_number();
for(;;) {
- kdb_console();
if (kdb_cpu != -1 && kdb_cpu != my_cpu) {
continue;
}
}
}
+/*
+ * system reboot
+ */
+
+extern void kdp_reboot(void);
+
void db_reboot(
db_expr_t addr,
boolean_t have_addr,
db_expr_t count,
char *modif)
{
- boolean_t reboot = TRUE;
- char *cp, c;
-
- cp = modif;
- while ((c = *cp++) != 0) {
- if (c == 'r') /* reboot */
- reboot = TRUE;
- if (c == 'h') /* halt */
- reboot = FALSE;
- }
- halt_all_cpus(reboot);
+ kdp_reboot();
}