]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/ppc/model_dep.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / ppc / model_dep.c
index 970e9b1567371fc677a47bf7183b7542312df99f..b88e4af6482211457f0ebd90acecd245ddfd44ec 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
  *
  * @APPLE_LICENSE_HEADER_START@
  * 
 #include <mach_kdb.h>
 #include <mach_kdp.h>
 #include <db_machine_commands.h>
-#include <cpus.h>
 
 #include <kern/thread.h>
 #include <machine/pmap.h>
-#include <machine/mach_param.h>
 #include <device/device_types.h>
 
 #include <mach/vm_param.h>
 #include <ppc/low_trace.h>
 #include <ppc/mappings.h>
 #include <ppc/FirmwareCalls.h>
-#include <ppc/setjmp.h>
+#include <ppc/cpu_internal.h>
 #include <ppc/exception.h>
+#include <ppc/hw_perfmon.h>
+#include <ppc/lowglobals.h>
 
 #include <kern/clock.h>
 #include <kern/debug.h>
 #include <machine/trap.h>
 #include <kern/spl.h>
 #include <pexpert/pexpert.h>
-#include <ppc/mp.h>
 
 #include <IOKit/IOPlatformExpert.h>
 
@@ -125,25 +124,31 @@ char env_buf[256];
  * from on to another using kdb_on! #cpu or cpu #cpu
  */
 
-decl_simple_lock_data(, debugger_lock) /* debugger lock */
+hw_lock_data_t debugger_lock;  /* debugger lock */
+hw_lock_data_t pbtlock;                /* backtrace print lock */
 
 int                    debugger_cpu = -1;                      /* current cpu running debugger */
 int                    debugger_debug = 0;                     /* Debug debugger */
-int                    debugger_is_slave[NCPUS];       /* Show that we were entered via sigp */
-int                    debugger_active[NCPUS];         /* Debugger active on CPU */
-int                    debugger_pending[NCPUS];        /* Debugger entry pending on CPU (this is a HACK) */
-int                    debugger_holdoff[NCPUS];        /* Holdoff debugger entry on this CPU (this is a HACK) */
 int            db_run_mode;                            /* Debugger run mode */
 unsigned int debugger_sync = 0;                        /* Cross processor debugger entry sync */
 extern                 unsigned int NMIss;                     /* NMI debounce switch */
 
+extern volatile int panicwait;
+volatile unsigned int pbtcnt = 0;
+volatile unsigned int pbtcpu = -1;
+
 unsigned int lastTrace;                                        /* Value of low-level exception trace controls */
 
+
 volatile unsigned int  cpus_holding_bkpts;     /* counter for number of cpus holding
                                                                                           breakpoints (ie: cpus that did not
                                                                                           insert back breakpoints) */
 void unlock_debugger(void);
 void lock_debugger(void);
+void dump_backtrace(savearea *sv, unsigned int stackptr, unsigned int fence);
+void dump_savearea(savearea *sv, unsigned int fence);
+
+int packAsc (unsigned char *inbuf, unsigned int length);
 
 #if !MACH_KDB
 boolean_t      db_breakpoints_inserted = TRUE;
@@ -165,18 +170,38 @@ extern int        kdp_flag;
 
 boolean_t db_im_stepping = 0xFFFFFFFF; /* Remember if we were stepping */
 
+
+char *failNames[] = {  
+
+       "Debugging trap",                       /* failDebug */
+       "Corrupt stack",                        /* failStack */
+       "Corrupt mapping tables",       /* failMapping */
+       "Corrupt context",                      /* failContext */
+       "No saveareas",                         /* failNoSavearea */
+       "Savearea corruption",          /* failSaveareaCorr */
+       "Invalid live context",         /* failBadLiveContext */
+       "Corrupt skip lists",           /* failSkipLists */
+       "Unaligned stack",                      /* failUnalignedStk */
+       "Invalid pmap",                         /* failPmap */
+       "Lock timeout",                         /* failTimeout */
+       "Unknown failure code"          /* Unknown failure code - must always be last */
+};
+
+char *invxcption = "Unknown code";
+
 extern const char version[];
+extern char *trap_type[];
 
 #if !MACH_KDB
-void kdb_trap(int type, struct ppc_saved_state *regs);
-void kdb_trap(int type, struct ppc_saved_state *regs) {
+void kdb_trap(int type, struct savearea *regs);
+void kdb_trap(int type, struct savearea *regs) {
        return;
 }
 #endif
 
 #if !MACH_KDP
-void kdp_trap(int type, struct ppc_saved_state *regs);
-void kdp_trap(int type, struct ppc_saved_state *regs) {
+void kdp_trap(int type, struct savearea *regs);
+void kdp_trap(int type, struct savearea *regs) {
        return;
 }
 #endif
@@ -185,12 +210,13 @@ void
 machine_startup(boot_args *args)
 {
        int     boot_arg;
+       unsigned int wncpu;
+       unsigned int vmm_arg;
 
        if (PE_parse_boot_arg("cpus", &wncpu)) {
-               if (!((wncpu > 0) && (wncpu < NCPUS)))
-                        wncpu = NCPUS;
-       } else 
-               wncpu = NCPUS;
+               if ((wncpu > 0) && (wncpu < MAX_CPUS))
+                        max_ncpus = wncpu;
+       }
 
        if( PE_get_hotkey( kPEControlKey ))
             halt_in_debugger = halt_in_debugger ? 0 : 1;
@@ -199,9 +225,14 @@ machine_startup(boot_args *args)
                if (boot_arg & DB_HALT) halt_in_debugger=1;
                if (boot_arg & DB_PRT) disableDebugOuput=FALSE; 
                if (boot_arg & DB_SLOG) systemLogDiags=TRUE; 
+               if (boot_arg & DB_NMI) panicDebugging=TRUE; 
+               if (boot_arg & DB_LOG_PI_SCRN) logPanicDataToScreen=TRUE; 
        }
+       
+       PE_parse_boot_arg("vmmforce", &lowGlo.lgVMMforcedFeats);
 
-       hw_lock_init(&debugger_lock);                           /* initialized debugger lock */
+       hw_lock_init(&debugger_lock);                           /* initialize debugger lock */
+       hw_lock_init(&pbtlock);                                         /* initialize print backtrace lock */
 
 #if    MACH_KDB
        /*
@@ -231,24 +262,29 @@ machine_startup(boot_args *args)
 
                default_preemption_rate = boot_arg;
        }
-       if (PE_parse_boot_arg("kpreempt", &boot_arg)) {
-               extern int kernel_preemption_mode;
-               extern boolean_t zone_gc_allowed;
+       if (PE_parse_boot_arg("unsafe", &boot_arg)) {
+               extern int max_unsafe_quanta;
 
-               kernel_preemption_mode = boot_arg;
-               zone_gc_allowed = FALSE; /* XXX: TO BE REMOVED  */
+               max_unsafe_quanta = boot_arg;
        }
+       if (PE_parse_boot_arg("poll", &boot_arg)) {
+               extern int max_poll_quanta;
 
-       machine_conf();
+               max_poll_quanta = boot_arg;
+       }
+       if (PE_parse_boot_arg("yield", &boot_arg)) {
+               extern int sched_poll_yield_shift;
 
-       ml_thrm_init();                                                 /* Start thermal monitoring on this processor */
+               sched_poll_yield_shift = boot_arg;
+       }
+
+       machine_conf();
 
        /*
-        * Start the system.
+        * Kick off the kernel bootstrap.
         */
-       setup_main();
-
-       /* Should never return */
+       kernel_bootstrap();
+       /*NOTREACHED*/
 }
 
 char *
@@ -262,22 +298,24 @@ machine_boot_info(
 void
 machine_conf(void)
 {
-       machine_info.max_cpus = NCPUS;
-       machine_info.avail_cpus = 1;
-       machine_info.memory_size = mem_size;
+       machine_info.memory_size = mem_size;    /* Note that this will be 2 GB for >= 2 GB machines */
 }
 
 void
 machine_init(void)
 {
        clock_config();
+/*     Note that we must initialize the stepper tables AFTER the clock is configured!!!!! */
+       if(pmsExperimental & 1) pmsCPUConf();   /* (EXPERIMENTAL) Initialize the stepper tables */
+       perfmon_init();
+       return;
+
 }
 
 void slave_machine_init(void)
 {
-       (void) ml_set_interrupts_enabled(FALSE);        /* Make sure we are disabled */
-       clock_init();                           /* Init the clock */
        cpu_machine_init();                     /* Initialize the processor */
+       clock_init();                           /* Init the clock */
 }                               
 
 void
@@ -316,64 +354,185 @@ void machine_callstack(
 
 
 void
-print_backtrace(struct ppc_saved_state *ssp)
+print_backtrace(struct savearea *ssp)
 {
-       unsigned int *stackptr, *raddr, *rstack, trans;
+       unsigned int stackptr, *raddr, *rstack, trans, fence;
        int i, frames_cnt, skip_top_frames, frames_max;
        unsigned int store[8];                  /* Buffer for real storage reads */
        vm_offset_t backtrace_entries[32];
+       savearea *sv, *svssp;
+       int cpu;
+       savearea *psv;
 
-       printf("backtrace: ");
-       frames_cnt =0;
-
-       /* Get our stackpointer for backtrace */
-       if (ssp==NULL) {
-               __asm__ volatile("mr %0,        r1" : "=r" (stackptr));
-               skip_top_frames = 1;
-       } else {
-               stackptr = (unsigned int *)(ssp->r1);
-               skip_top_frames = 0;
-               backtrace_entries[frames_cnt] = ssp->srr0;
-               frames_cnt++;
-               printf("0x%08x ", ssp->srr0);
-       }
+/*
+ *     We need this lock to make sure we don't hang up when we double panic on an MP.
+ */
 
-       frames_max = 32-frames_cnt;
-       for (i = 0; i < frames_max; i++) {
+       cpu  = cpu_number();                                    /* Just who are we anyways? */
+       if(pbtcpu != cpu) {                                             /* Allow recursion */
+               hw_atomic_add((uint32_t *)&pbtcnt, 1); /* Remember we are trying */
+               while(!hw_lock_try(&pbtlock));          /* Spin here until we can get in. If we never do, well, we're crashing anyhow... */     
+               pbtcpu = cpu;                                           /* Mark it as us */     
+       }       
+
+       svssp = (savearea *)ssp;                                /* Make this easier */
+       sv = 0;
+       if(current_thread()) sv = (savearea *)current_thread()->machine.pcb;    /* Find most current savearea if system has started */
+
+       fence = 0xFFFFFFFF;                                             /* Show we go all the way */
+       if(sv) fence = (unsigned int)sv->save_r1;       /* Stop at previous exception point */
+       
+       if(!svssp) {                                                    /* Should we start from stack? */
+               kdb_printf("Latest stack backtrace for cpu %d:\n", cpu_number());
+               __asm__ volatile("mr %0,r1" : "=r" (stackptr)); /* Get current stack */
+               dump_backtrace((savearea *)0,stackptr, fence);  /* Dump the backtrace */
+               if(!sv) {                                                       /* Leave if no saveareas */
+                       kdb_printf("\nKernel version:\n%s\n",version);  /* Print kernel version */
+                       hw_lock_unlock(&pbtlock);               /* Allow another back trace to happen */
+                       return; 
+               }
+       }
+       else {                                                                  /* Were we passed an exception? */
+               fence = 0xFFFFFFFF;                                     /* Show we go all the way */
+               if(svssp->save_hdr.save_prev) {
+                       if((svssp->save_hdr.save_prev <= vm_last_addr) && ((unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)svssp->save_hdr.save_prev))) {   /* Valid address? */    
+                               psv = (savearea *)((unsigned int)svssp->save_hdr.save_prev);    /* Get the 64-bit back chain converted to a regualr pointer */
+                               fence = (unsigned int)psv->save_r1;     /* Stop at previous exception point */
+                       }
+               }
+       
+               kdb_printf("Latest crash info for cpu %d:\n", cpu_number());
+               kdb_printf("   Exception state (sv=0x%08X)\n", sv);
+               dump_savearea(svssp, fence);            /* Dump this savearea */        
+       }
 
-               if(!stackptr) break;    /* No more to get... */
+       if(!sv) {                                                               /* Leave if no saveareas */
+               kdb_printf("\nKernel version:\n%s\n",version);  /* Print kernel version */
+               hw_lock_unlock(&pbtlock);                       /* Allow another back trace to happen */
+               return; 
+       }
+       
+       kdb_printf("Proceeding back via exception chain:\n");
 
-               /* Avoid causing page fault */
-               if (!(raddr = LRA(PPC_SID_KERNEL, (void *)((unsigned int)stackptr+FM_LR_SAVE))))
+       while(sv) {                                                             /* Do them all... */
+               if(!(((addr64_t)((uintptr_t)sv) <= vm_last_addr) && 
+                       (unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)((uintptr_t)sv)))) {        /* Valid address? */    
+                       kdb_printf("   Exception state (sv=0x%08X) Not mapped or invalid. stopping...\n", sv);
                        break;
-               ReadReal((unsigned int)raddr, &store[0]);
-               if (skip_top_frames)
-                       skip_top_frames--;
+               }
+               
+               kdb_printf("   Exception state (sv=0x%08X)\n", sv);
+               if(sv == svssp) {                                       /* Did we dump it already? */
+                       kdb_printf("      previously dumped as \"Latest\" state. skipping...\n");
+               }
                else {
-                       backtrace_entries[frames_cnt] = store[0];
-                       frames_cnt++;
-                       printf("0x%08x ",store[0]);
+                       fence = 0xFFFFFFFF;                             /* Show we go all the way */
+                       if(sv->save_hdr.save_prev) {
+                               if((sv->save_hdr.save_prev <= vm_last_addr) && ((unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)sv->save_hdr.save_prev))) { /* Valid address? */    
+                                       psv = (savearea *)((unsigned int)sv->save_hdr.save_prev);       /* Get the 64-bit back chain converted to a regualr pointer */
+                                       fence = (unsigned int)psv->save_r1;     /* Stop at previous exception point */
+                               }
+                       }
+                       dump_savearea(sv, fence);               /* Dump this savearea */        
+               }       
+               
+               sv = CAST_DOWN(savearea *, sv->save_hdr.save_prev);     /* Back chain */ 
+       }
+       
+       kdb_printf("\nKernel version:\n%s\n",version);  /* Print kernel version */
+
+       pbtcpu = -1;                                                    /* Mark as unowned */
+       hw_lock_unlock(&pbtlock);                               /* Allow another back trace to happen */
+       hw_atomic_sub((uint32_t *) &pbtcnt, 1);  /* Show we are done */
+
+       while(pbtcnt);                                                  /* Wait for completion */
+
+       return;
+}
+
+void dump_savearea(savearea *sv, unsigned int fence) {
+
+       char *xcode;
+       
+       if(sv->save_exception > T_MAX) xcode = invxcption;      /* Too big for table */
+       else xcode = trap_type[sv->save_exception / 4];         /* Point to the type */
+       
+       kdb_printf("      PC=0x%08X; MSR=0x%08X; DAR=0x%08X; DSISR=0x%08X; LR=0x%08X; R1=0x%08X; XCP=0x%08X (%s)\n",
+               (unsigned int)sv->save_srr0, (unsigned int)sv->save_srr1, (unsigned int)sv->save_dar, sv->save_dsisr,
+               (unsigned int)sv->save_lr, (unsigned int)sv->save_r1, sv->save_exception, xcode);
+       
+       if(!(sv->save_srr1 & MASK(MSR_PR))) {           /* Are we in the kernel? */
+               dump_backtrace(sv, (unsigned int)sv->save_r1, fence);   /* Dump the stack back trace from  here if not user state */
+       }
+       
+       return;
+}
+
+
+
+#define DUMPFRAMES 34
+#define LRindex 2
+
+void dump_backtrace(savearea *sv, unsigned int stackptr, unsigned int fence) {
+
+       unsigned int bframes[DUMPFRAMES];
+       unsigned int  sframe[8], raddr, dumbo;
+       int i, index=0;
+       
+       kdb_printf("      Backtrace:\n");
+       if (sv != (savearea *)0) {
+               bframes[0] = (unsigned int)sv->save_srr0;
+               bframes[1] = (unsigned int)sv->save_lr;
+               index = 2;
+       }
+       for(i = index; i < DUMPFRAMES; i++) {                   /* Dump up to max frames */
+       
+               if(!stackptr || (stackptr == fence)) break;             /* Hit stop point or end... */
+               
+               if(stackptr & 0x0000000F) {                             /* Is stack pointer valid? */
+                       kdb_printf("\n         backtrace terminated - unaligned frame address: 0x%08X\n", stackptr);    /* No, tell 'em */
+                       break;
                }
-               if (!(raddr = LRA(PPC_SID_KERNEL, (void *)stackptr))) 
+
+               raddr = (unsigned int)pmap_find_phys(kernel_pmap, (addr64_t)stackptr);  /* Get physical frame address */
+               if(!raddr || (stackptr > vm_last_addr)) {               /* Is it mapped? */
+                       kdb_printf("\n         backtrace terminated - frame not mapped or invalid: 0x%08X\n", stackptr);        /* No, tell 'em */
                        break;
-               ReadReal((unsigned int)raddr, &store[0]);
-               stackptr=(unsigned int *)store[0];
-       }
-       printf("\n");
+               }
+       
+               if(!mapping_phys_lookup(raddr, &dumbo)) {       /* Is it within physical RAM? */
+                       kdb_printf("\n         backtrace terminated - frame outside of RAM: v=0x%08X, p=%08X\n", stackptr, raddr);      /* No, tell 'em */
+                       break;
+               }
+       
+               ReadReal((addr64_t)((raddr << 12) | (stackptr & 4095)), &sframe[0]);    /* Fetch the stack frame */
 
-       if (frames_cnt)
-               kmod_dump((vm_offset_t *)&backtrace_entries[0], frames_cnt);
+               bframes[i] = sframe[LRindex];                           /* Save the link register */
+               
+               if(!i) kdb_printf("         ");                         /* Indent first time */
+               else if(!(i & 7)) kdb_printf("\n         ");    /* Skip to new line every 8 */
+               kdb_printf("0x%08X ", bframes[i]);                      /* Dump the link register */
+               
+               stackptr = sframe[0];                                           /* Chain back */
+       }
+       kdb_printf("\n");
+       if(i >= DUMPFRAMES) kdb_printf("      backtrace continues...\n");       /* Say we terminated early */
+       if(i) kmod_dump((vm_offset_t *)&bframes[0], i); /* Show what kmods are in trace */
+       
 }
+       
+
 
 void 
 Debugger(const char    *message) {
 
        int i;
        unsigned int store[8];
+       unsigned long pi_size = 0;
        spl_t spl;
        
        spl = splhigh();                                                                /* No interruptions from here on */
-
+       
 /*
  *     backtrace for Debugger() call  from panic() if no current debugger
  *     backtrace and return for double panic() call
@@ -383,15 +542,84 @@ Debugger(const char       *message) {
                print_backtrace(NULL);
                if (nestedpanic != 0)  {
                        splx(spl);
-                       return;                                                                         /* Yeah, don't enter again... */
+                       return;                                                                 /* Yeah, don't enter again... */
                }
        }
-
-       if (debug_mode && debugger_active[cpu_number()]) {      /* Are we already on debugger on this processor? */
+       
+       if (debug_mode && getPerProc()->debugger_active) {      /* Are we already on debugger on this processor? */
                splx(spl);
                return;                                                                         /* Yeah, don't do it again... */
        }
 
+
+/*
+ * The above stuff catches the double panic case so we shouldn't have to worry about that here.
+ */
+       if ( panicstr != (char *)0 )
+       {
+               /* diable kernel preemptions */
+               disable_preemption();
+       
+               /* everything should be printed now so copy to NVRAM
+               */
+               if( debug_buf_size > 0)
+
+                 {
+                   /* Do not compress the panic log unless kernel debugging 
+                    * is disabled - the panic log isn't synced to NVRAM if 
+                    * debugging is enabled, and the panic log is valuable 
+                    * whilst debugging
+                    */
+                   if (!panicDebugging)
+                     {
+                       unsigned int bufpos;
+                       
+                       /* Now call the compressor */
+                       bufpos = packAsc (debug_buf, (unsigned int) (debug_buf_ptr - debug_buf) );
+                       /* If compression was successful, use the compressed length                */
+                       if (bufpos)
+                         {
+                           debug_buf_ptr = debug_buf + bufpos;
+                         }
+                     }
+                   /* Truncate if the buffer is larger than a certain magic 
+                    * size - this really ought to be some appropriate fraction
+                    * of the NVRAM image buffer, and is best done in the 
+                    * savePanicInfo() or PESavePanicInfo() calls 
+                    */
+                   pi_size = debug_buf_ptr - debug_buf;
+                   pi_size = PESavePanicInfo( debug_buf, ((pi_size > 2040) ? 2040 : pi_size));
+                 }
+                       
+               if( !panicDebugging && (pi_size != 0) ) {
+                       int     my_cpu;
+                       int     tcpu;
+
+                       my_cpu = cpu_number();
+                       debugger_cpu = my_cpu;
+
+                       hw_atomic_add(&debug_mode, 1);
+                       PerProcTable[my_cpu].ppe_vaddr->debugger_active++;
+                       lock_debugger();
+
+                       for(tcpu = 0; tcpu < real_ncpus; tcpu++) {
+                               if(tcpu == my_cpu) continue;
+                               hw_atomic_add(&debugger_sync, 1);
+                               (void)cpu_signal(tcpu, SIGPdebug, 0 ,0);
+                       }
+                       (void)hw_cpu_sync(&debugger_sync, LockTimeOut);
+                       debugger_sync = 0;
+               }
+
+               draw_panic_dialog();
+               
+               if( !panicDebugging && (pi_size != 0))
+                                       PEHaltRestart( kPEHangCPU );
+
+               enable_preemption();
+       }
+
+
        if ((current_debugger != NO_CUR_DB)) {                  /* If there is a debugger configured, enter it */
                printf("Debugger(%s)\n", message);
                TRAP_DEBUGGER;
@@ -400,25 +628,46 @@ Debugger(const char       *message) {
        }
 
        printf("\nNo debugger configured - dumping debug information\n");
-       printf("\nversion string : %s\n",version);
-       mfdbatu(store[0],0);
-       mfdbatl(store[1],0);    
-       mfdbatu(store[2],1);                                    
-       mfdbatl(store[3],1);                                    
-       mfdbatu(store[4],2);                            
-       mfdbatl(store[5],2);                                    
-       mfdbatu(store[6],3);                            
-       mfdbatl(store[7],3);                                    
-       printf("DBAT0: %08X %08X\n", store[0], store[1]);
-       printf("DBAT1: %08X %08X\n", store[2], store[3]);
-       printf("DBAT2: %08X %08X\n", store[4], store[5]);
-       printf("DBAT3: %08X %08X\n", store[6], store[7]);
        printf("MSR=%08X\n",mfmsr());
        print_backtrace(NULL);
        splx(spl);
        return;
 }
 
+/*
+ *             Here's where we attempt to get some diagnostic information dumped out
+ *             when the system is really confused.  We will try to get into the 
+ *             debugger as well.
+ *
+ *             We are here with interrupts disabled and on the debug stack.  The savearea
+ *             that was passed in is NOT chained to the activation.
+ *
+ *             save_r3 contains the failure reason code.
+ */
+
+void SysChoked(int type, savearea *sv) {                       /* The system is bad dead */
+
+       unsigned int failcode;
+       
+       mp_disable_preemption();
+       disableDebugOuput = FALSE;
+       debug_mode = TRUE;
+
+       failcode = (unsigned int)sv->save_r3;                   /* Get the failure code */
+       if(failcode > failUnknown) failcode = failUnknown;      /* Set unknown code code */
+       
+       kprintf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), (unsigned int)sv->save_r3, failNames[failcode]);
+       kdb_printf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), (unsigned int)sv->save_r3, failNames[failcode]);
+
+       print_backtrace(sv);                                                    /* Attempt to print backtrace */
+       Call_DebuggerC(type, sv);                                               /* Attempt to get into debugger */
+
+       if ((current_debugger != NO_CUR_DB)) Call_DebuggerC(type, sv);  /* Attempt to get into debugger */
+
+}
+
+
+
 /*
  *     When we get here, interruptions are disabled and we are on the debugger stack
  *     Never, ever, ever, ever enable interruptions from here on
@@ -426,18 +675,21 @@ Debugger(const char       *message) {
 
 int Call_DebuggerC(
         int    type,
-        struct ppc_saved_state *saved_state)
+        struct savearea *saved_state)
 {
        int                             directcall, wait;
-       vm_offset_t             instr_ptr;
+       addr64_t                instr_ptr;
+       ppnum_t                 instr_pp;
        unsigned int    instr;
-       int                     my_cpu, tcpu;
+       int                     my_cpu, tcpu, wasdebugger;
+       struct per_proc_info *pp;
+       uint64_t nowtime, poptime;
 
        my_cpu = cpu_number();                                                          /* Get our CPU */
 
 #if    MACH_KDB
        if((debugger_cpu == my_cpu) &&                                          /* Do we already own debugger? */
-         debugger_active[my_cpu] &&                                            /* and are we really active? */
+         PerProcTable[my_cpu].ppe_vaddr->debugger_active &&                                            /* and are we really active? */
          db_recover &&                                                                         /* and have we set up recovery? */
          (current_debugger == KDB_CUR_DB)) {                           /* and are we in KDB (only it handles recovery) */
                kdb_trap(type, saved_state);                                    /* Then reenter it... */
@@ -445,7 +697,8 @@ int Call_DebuggerC(
 #endif
        
        hw_atomic_add(&debug_mode, 1);                                          /* Indicate we are in debugger */
-       debugger_active[my_cpu]++;                                                      /* Show active on our CPU */
+       PerProcTable[my_cpu].ppe_vaddr->debugger_active++;      /* Show active on our CPU */
+       
        lock_debugger();                                                                        /* Insure that only one CPU is in debugger */
 
        if(db_im_stepping == my_cpu) {                                          /* Are we just back from a step? */
@@ -458,27 +711,32 @@ int Call_DebuggerC(
                kprintf("Call_DebuggerC(%d): %08X %08X, debact = %d\n", my_cpu, type, saved_state, debug_mode); /* (TEST/DEBUG) */
 #endif
                printf("Call_Debugger: enter - cpu %d, is_slave %d, debugger_cpu %d, pc %08X\n",
-                  my_cpu, debugger_is_slave[my_cpu], debugger_cpu, saved_state->srr0);
+                  my_cpu, PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave, debugger_cpu, saved_state->save_srr0);
        }
        
-       if (instr_ptr = (vm_offset_t)LRA(PPC_SID_KERNEL, (void *)(saved_state->srr0))) {
-               instr = ml_phys_read(instr_ptr);                                /* Get the trap that caused entry */
+       instr_pp = (vm_offset_t)pmap_find_phys(kernel_pmap, (addr64_t)(saved_state->save_srr0));
+
+       if (instr_pp) {
+               instr_ptr = (addr64_t)(((addr64_t)instr_pp << 12) | (saved_state->save_srr0 & 0xFFF));  /* Make physical address */
+               instr = ml_phys_read_64(instr_ptr);                             /* Get the trap that caused entry */
        } 
        else instr = 0;
 
 #if 0
-       if (debugger_debug) kprintf("Call_DebuggerC(%d): instr_ptr = %08X, instr = %08X\n", my_cpu, instr_ptr, instr);  /* (TEST/DEBUG) */
+       if (debugger_debug) kprintf("Call_DebuggerC(%d): instr_pp = %08X, instr_ptr = %016llX, instr = %08X\n", my_cpu, instr_pp, instr_ptr, instr);    /* (TEST/DEBUG) */
 #endif
 
        if (db_breakpoints_inserted) cpus_holding_bkpts++;      /* Bump up the holding count */
-       if (debugger_cpu == -1 && !debugger_is_slave[my_cpu]) {
+       if (debugger_cpu == -1 && !PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave) {
 #if 0
                if (debugger_debug) kprintf("Call_DebuggerC(%d): lasttrace = %08X\n", my_cpu, lastTrace);       /* (TEST/DEBUG) */
 #endif
                debugger_cpu = my_cpu;                                                  /* Show that we are debugger */
+
+
                lastTrace = LLTraceSet(0);                                              /* Disable low-level tracing */
 
-               for(tcpu = 0; tcpu < NCPUS; tcpu++) {                   /* Stop all the other guys */
+               for(tcpu = 0; tcpu < real_ncpus; tcpu++) {              /* Stop all the other guys */
                        if(tcpu == my_cpu) continue;                            /* Don't diddle ourselves */
                        hw_atomic_add(&debugger_sync, 1);                       /* Count signal sent */
                        (void)cpu_signal(tcpu, SIGPdebug, 0 ,0);        /* Tell 'em to enter debugger */
@@ -497,7 +755,7 @@ int Call_DebuggerC(
        switch_debugger = 0;                                                            /* Make sure switch request is off */
        directcall = 1;                                                                         /* Assume direct call */
 
-       if (saved_state->srr1 & MASK(SRR1_PRG_TRAP)) {          /* Trap instruction? */
+       if (saved_state->save_srr1 & MASK(SRR1_PRG_TRAP)) {     /* Trap instruction? */
                
                directcall = 0;                                                                 /* We had a trap not a direct call */
 
@@ -579,16 +837,20 @@ debugger_exit:
                instr, my_cpu, debugger_cpu, db_run_mode);      /* (TEST/DEBUG) */
 #endif
        if ((instr == TRAP_DEBUGGER_INST) ||                            /* Did we trap to enter debugger? */
-               (instr == TRAP_DIRECT_INST)) saved_state->srr0 += TRAP_INST_SIZE;       /* Yes, point past trap */
+               (instr == TRAP_DIRECT_INST)) saved_state->save_srr0 += TRAP_INST_SIZE;  /* Yes, point past trap */
 
-       if(debugger_cpu == my_cpu) LLTraceSet(lastTrace);       /* Enable tracing on the way out if we are debugger */
+       wasdebugger = 0;                                                                        /* Assume not debugger */
+       if(debugger_cpu == my_cpu) {                                            /* Are the debugger processor? */
+               wasdebugger = 1;                                                                /* Remember that we were the debugger */
+               LLTraceSet(lastTrace);                                                  /* Enable tracing on the way out if we are debugger */
+       }
 
        wait = FALSE;                                                                           /* Assume we are not going to wait */
        if (db_run_mode == STEP_CONTINUE) {                                     /* Are we going to run? */
                wait = TRUE;                                                                    /* Yeah, remember to wait for breakpoints to clear */
                debugger_cpu = -1;                                                              /* Release other processor's debuggers */
-               debugger_pending[0] = 0;                                                /* Release request (this is a HACK) */
-               debugger_pending[1] = 0;                                                /* Release request (this is a HACK) */
+               for(tcpu = 0; tcpu < real_ncpus; tcpu++)
+                       PerProcTable[tcpu].ppe_vaddr->debugger_pending = 0;     /* Release request (this is a HACK) */
                NMIss = 0;                                                                              /* Let NMI bounce */
        }
        
@@ -598,17 +860,18 @@ debugger_exit:
        }
 
        if (db_breakpoints_inserted) cpus_holding_bkpts--;      /* If any breakpoints, back off count */
-       if (debugger_is_slave[my_cpu]) debugger_is_slave[my_cpu]--;     /* If we were a slove, uncount us */
+       if (PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave) PerProcTable[my_cpu].ppe_vaddr->debugger_is_slave--;     /* If we were a slove, uncount us */
        if (debugger_debug)
                printf("Call_Debugger: exit - cpu %d, debugger_cpu %d, run_mode %d holds %d\n",
                          my_cpu, debugger_cpu, db_run_mode,
                          cpus_holding_bkpts);
 
        unlock_debugger();                                                                      /* Release the lock */
-       debugger_active[my_cpu]--;                                                      /* Say we aren't active anymore */
+       PerProcTable[my_cpu].ppe_vaddr->debugger_active--;      /* Say we aren't active anymore */
 
        if (wait) while(cpus_holding_bkpts);                            /* Wait for breakpoints to clear */
 
+
        hw_atomic_sub(&debug_mode, 1);                                          /* Set out of debug now */
 
        return(1);                                                                                      /* Exit debugger normally */
@@ -642,4 +905,38 @@ void unlock_debugger(void) {
 
 }
 
+struct pasc {
+  unsigned a: 7;
+  unsigned b: 7;
+  unsigned c: 7;
+  unsigned d: 7;
+  unsigned e: 7;
+  unsigned f: 7;
+  unsigned g: 7;
+  unsigned h: 7;
+}  __attribute__((packed));
 
+typedef struct pasc pasc_t;
+
+int packAsc (unsigned char *inbuf, unsigned int length)
+{
+  unsigned int i, j = 0;
+  pasc_t pack;
+
+  for (i = 0; i < length; i+=8)
+    {
+      pack.a = inbuf[i];
+      pack.b = inbuf[i+1];
+      pack.c = inbuf[i+2];
+      pack.d = inbuf[i+3];
+      pack.e = inbuf[i+4];
+      pack.f = inbuf[i+5];
+      pack.g = inbuf[i+6];
+      pack.h = inbuf[i+7];
+      bcopy ((char *) &pack, inbuf + j, 7);
+      j += 7;
+    }
+  if (0 != (i - length))
+    inbuf[j - (i - length)] &= 0xFF << (8-(i - length));
+  return j-(((i-length) == 7) ? 6 : (i - length));
+}