2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * @APPLE_FREE_COPYRIGHT@
29 * (c) Copyright 1988 HEWLETT-PACKARD COMPANY
31 * To anyone who acknowledges that this file is provided "AS IS"
32 * without any express or implied warranty:
33 * permission to use, copy, modify, and distribute this file
34 * for any purpose is hereby granted without fee, provided that
35 * the above copyright notice and this notice appears in all
36 * copies, and that the name of Hewlett-Packard Company not be
37 * used in advertising or publicity pertaining to distribution
38 * of the software without specific, written prior permission.
39 * Hewlett-Packard Company makes no representations about the
40 * suitability of this software for any purpose.
43 * Copyright (c) 1990,1991,1992,1994 The University of Utah and
44 * the Computer Systems Laboratory (CSL). All rights reserved.
46 * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS"
47 * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES
48 * WHATSOEVER RESULTING FROM ITS USE.
50 * CSL requests users of this software to return to csl-dist@cs.utah.edu any
51 * improvements that they make and grant CSL redistribution rights.
53 * Utah $Hdr: model_dep.c 1.34 94/12/14$
59 #include <db_machine_commands.h>
61 #include <kern/thread.h>
62 #include <machine/pmap.h>
63 #include <device/device_types.h>
65 #include <mach/vm_param.h>
66 #include <mach/clock_types.h>
67 #include <mach/machine.h>
68 #include <mach/kmod.h>
71 #include <kern/misc_protos.h>
72 #include <kern/startup.h>
73 #include <ppc/misc_protos.h>
74 #include <ppc/proc_reg.h>
75 #include <ppc/thread.h>
78 #include <ppc/Firmware.h>
79 #include <ppc/low_trace.h>
80 #include <ppc/mappings.h>
81 #include <ppc/FirmwareCalls.h>
82 #include <ppc/cpu_internal.h>
83 #include <ppc/exception.h>
84 #include <ppc/hw_perfmon.h>
85 #include <ppc/lowglobals.h>
87 #include <kern/clock.h>
88 #include <kern/debug.h>
89 #include <machine/trap.h>
91 #include <pexpert/pexpert.h>
93 #include <IOKit/IOPlatformExpert.h>
95 #include <mach/vm_prot.h>
97 #include <mach/time_value.h>
98 #include <machine/machparam.h> /* for btop */
101 #include <ddb/db_aout.h>
102 #include <ddb/db_output.h>
103 #include <ddb/db_command.h>
104 #include <machine/db_machdep.h>
106 extern struct db_command ppc_db_commands
[];
107 #endif /* MACH_KDB */
109 char kernel_args_buf
[256] = "/mach_kernel";
110 char boot_args_buf
[256] = "/mach_servers/bootstrap";
113 #define TRAP_DEBUGGER __asm__ volatile("tw 4,r3,r3");
114 #define TRAP_DEBUGGER_INST 0x7c831808
115 #define TRAP_DIRECT __asm__ volatile("tw 4,r4,r4");
116 #define TRAP_DIRECT_INST 0x7c842008
117 #define TRAP_INST_SIZE 4
118 #define BREAK_TO_KDP0 0x7fe00008
119 #define BREAK_TO_KDP1 0x7c800008
120 #define BREAK_TO_KDB0 0x7c810808
123 * Code used to synchronize debuggers among all cpus, one active at a time, switch
124 * from on to another using kdb_on! #cpu or cpu #cpu
127 hw_lock_data_t debugger_lock
; /* debugger lock */
128 hw_lock_data_t pbtlock
; /* backtrace print lock */
130 int debugger_cpu
= -1; /* current cpu running debugger */
131 int debugger_debug
= 0; /* Debug debugger */
132 int db_run_mode
; /* Debugger run mode */
133 unsigned int debugger_sync
= 0; /* Cross processor debugger entry sync */
134 extern unsigned int NMIss
; /* NMI debounce switch */
136 extern volatile int panicwait
;
137 volatile unsigned int pbtcnt
= 0;
138 volatile unsigned int pbtcpu
= -1;
140 unsigned int lastTrace
; /* Value of low-level exception trace controls */
143 volatile unsigned int cpus_holding_bkpts
; /* counter for number of cpus holding
144 breakpoints (ie: cpus that did not
145 insert back breakpoints) */
146 void unlock_debugger(void);
147 void lock_debugger(void);
148 void dump_backtrace(savearea
*sv
, unsigned int stackptr
, unsigned int fence
);
149 void dump_savearea(savearea
*sv
, unsigned int fence
);
151 int packAsc (unsigned char *inbuf
, unsigned int length
);
154 boolean_t db_breakpoints_inserted
= TRUE
;
155 jmp_buf_t
*db_recover
= 0;
159 #include <ddb/db_run.h>
161 extern boolean_t db_breakpoints_inserted
;
162 extern jmp_buf_t
*db_recover
;
163 #define KDB_READY 0x1
168 #define KDP_READY 0x1
171 boolean_t db_im_stepping
= 0xFFFFFFFF; /* Remember if we were stepping */
174 char *failNames
[] = {
176 "Debugging trap", /* failDebug */
177 "Corrupt stack", /* failStack */
178 "Corrupt mapping tables", /* failMapping */
179 "Corrupt context", /* failContext */
180 "No saveareas", /* failNoSavearea */
181 "Savearea corruption", /* failSaveareaCorr */
182 "Invalid live context", /* failBadLiveContext */
183 "Corrupt skip lists", /* failSkipLists */
184 "Unaligned stack", /* failUnalignedStk */
185 "Invalid pmap", /* failPmap */
186 "Unknown failure code" /* Unknown failure code - must always be last */
189 char *invxcption
= "Unknown code";
191 extern const char version
[];
192 extern char *trap_type
[];
195 void kdb_trap(int type
, struct savearea
*regs
);
196 void kdb_trap(int type
, struct savearea
*regs
) {
202 void kdp_trap(int type
, struct savearea
*regs
);
203 void kdp_trap(int type
, struct savearea
*regs
) {
209 machine_startup(boot_args
*args
)
213 unsigned int vmm_arg
;
215 if (PE_parse_boot_arg("cpus", &wncpu
)) {
216 if ((wncpu
> 0) && (wncpu
< MAX_CPUS
))
220 if( PE_get_hotkey( kPEControlKey
))
221 halt_in_debugger
= halt_in_debugger
? 0 : 1;
223 if (PE_parse_boot_arg("debug", &boot_arg
)) {
224 if (boot_arg
& DB_HALT
) halt_in_debugger
=1;
225 if (boot_arg
& DB_PRT
) disableDebugOuput
=FALSE
;
226 if (boot_arg
& DB_SLOG
) systemLogDiags
=TRUE
;
227 if (boot_arg
& DB_NMI
) panicDebugging
=TRUE
;
228 if (boot_arg
& DB_LOG_PI_SCRN
) logPanicDataToScreen
=TRUE
;
231 PE_parse_boot_arg("vmmforce", &lowGlo
.lgVMMforcedFeats
);
233 hw_lock_init(&debugger_lock
); /* initialize debugger lock */
234 hw_lock_init(&pbtlock
); /* initialize print backtrace lock */
240 #if DB_MACHINE_COMMANDS
241 db_machine_commands_install(ppc_db_commands
);
242 #endif /* DB_MACHINE_COMMANDS */
245 if (boot_arg
& DB_KDB
)
246 current_debugger
= KDB_CUR_DB
;
249 * Cause a breakpoint trap to the debugger before proceeding
250 * any further if the proper option bit was specified in
253 if (halt_in_debugger
&& (current_debugger
== KDB_CUR_DB
)) {
254 Debugger("inline call to debugger(machine_startup)");
255 halt_in_debugger
= 0;
258 #endif /* MACH_KDB */
259 if (PE_parse_boot_arg("preempt", &boot_arg
)) {
260 extern int default_preemption_rate
;
262 default_preemption_rate
= boot_arg
;
264 if (PE_parse_boot_arg("unsafe", &boot_arg
)) {
265 extern int max_unsafe_quanta
;
267 max_unsafe_quanta
= boot_arg
;
269 if (PE_parse_boot_arg("poll", &boot_arg
)) {
270 extern int max_poll_quanta
;
272 max_poll_quanta
= boot_arg
;
274 if (PE_parse_boot_arg("yield", &boot_arg
)) {
275 extern int sched_poll_yield_shift
;
277 sched_poll_yield_shift
= boot_arg
;
283 * Kick off the kernel bootstrap.
294 return(PE_boot_args());
300 machine_info
.memory_size
= mem_size
; /* Note that this will be 2 GB for >= 2 GB machines */
310 void slave_machine_init(void)
312 cpu_machine_init(); /* Initialize the processor */
313 clock_init(); /* Init the clock */
317 halt_all_cpus(boolean_t reboot
)
321 printf("MACH Reboot\n");
322 PEHaltRestart(kPERestartCPU
);
326 printf("CPU halted\n");
327 PEHaltRestart(kPEHaltCPU
);
335 halt_all_cpus(FALSE
);
340 * Machine-dependent routine to fill in an array with up to callstack_max
341 * levels of return pc information.
343 void machine_callstack(
345 vm_size_t callstack_max
)
348 #endif /* MACH_ASSERT */
352 print_backtrace(struct savearea
*ssp
)
354 unsigned int stackptr
, *raddr
, *rstack
, trans
, fence
;
355 int i
, frames_cnt
, skip_top_frames
, frames_max
;
356 unsigned int store
[8]; /* Buffer for real storage reads */
357 vm_offset_t backtrace_entries
[32];
358 savearea
*sv
, *svssp
;
363 * We need this lock to make sure we don't hang up when we double panic on an MP.
366 cpu
= cpu_number(); /* Just who are we anyways? */
367 if(pbtcpu
!= cpu
) { /* Allow recursion */
368 hw_atomic_add((uint32_t *)&pbtcnt
, 1); /* Remember we are trying */
369 while(!hw_lock_try(&pbtlock
)); /* Spin here until we can get in. If we never do, well, we're crashing anyhow... */
370 pbtcpu
= cpu
; /* Mark it as us */
373 svssp
= (savearea
*)ssp
; /* Make this easier */
375 if(current_thread()) sv
= (savearea
*)current_thread()->machine
.pcb
; /* Find most current savearea if system has started */
377 fence
= 0xFFFFFFFF; /* Show we go all the way */
378 if(sv
) fence
= (unsigned int)sv
->save_r1
; /* Stop at previous exception point */
380 if(!svssp
) { /* Should we start from stack? */
381 kdb_printf("Latest stack backtrace for cpu %d:\n", cpu_number());
382 __asm__
volatile("mr %0,r1" : "=r" (stackptr
)); /* Get current stack */
383 dump_backtrace((savearea
*)0,stackptr
, fence
); /* Dump the backtrace */
384 if(!sv
) { /* Leave if no saveareas */
385 kdb_printf("\nKernel version:\n%s\n",version
); /* Print kernel version */
386 hw_lock_unlock(&pbtlock
); /* Allow another back trace to happen */
390 else { /* Were we passed an exception? */
391 fence
= 0xFFFFFFFF; /* Show we go all the way */
392 if(svssp
->save_hdr
.save_prev
) {
393 if((svssp
->save_hdr
.save_prev
<= vm_last_addr
) && ((unsigned int)pmap_find_phys(kernel_pmap
, (addr64_t
)svssp
->save_hdr
.save_prev
))) { /* Valid address? */
394 psv
= (savearea
*)((unsigned int)svssp
->save_hdr
.save_prev
); /* Get the 64-bit back chain converted to a regualr pointer */
395 fence
= (unsigned int)psv
->save_r1
; /* Stop at previous exception point */
399 kdb_printf("Latest crash info for cpu %d:\n", cpu_number());
400 kdb_printf(" Exception state (sv=0x%08X)\n", sv
);
401 dump_savearea(svssp
, fence
); /* Dump this savearea */
404 if(!sv
) { /* Leave if no saveareas */
405 kdb_printf("\nKernel version:\n%s\n",version
); /* Print kernel version */
406 hw_lock_unlock(&pbtlock
); /* Allow another back trace to happen */
410 kdb_printf("Proceeding back via exception chain:\n");
412 while(sv
) { /* Do them all... */
413 if(!(((addr64_t
)((uintptr_t)sv
) <= vm_last_addr
) &&
414 (unsigned int)pmap_find_phys(kernel_pmap
, (addr64_t
)((uintptr_t)sv
)))) { /* Valid address? */
415 kdb_printf(" Exception state (sv=0x%08X) Not mapped or invalid. stopping...\n", sv
);
419 kdb_printf(" Exception state (sv=0x%08X)\n", sv
);
420 if(sv
== svssp
) { /* Did we dump it already? */
421 kdb_printf(" previously dumped as \"Latest\" state. skipping...\n");
424 fence
= 0xFFFFFFFF; /* Show we go all the way */
425 if(sv
->save_hdr
.save_prev
) {
426 if((sv
->save_hdr
.save_prev
<= vm_last_addr
) && ((unsigned int)pmap_find_phys(kernel_pmap
, (addr64_t
)sv
->save_hdr
.save_prev
))) { /* Valid address? */
427 psv
= (savearea
*)((unsigned int)sv
->save_hdr
.save_prev
); /* Get the 64-bit back chain converted to a regualr pointer */
428 fence
= (unsigned int)psv
->save_r1
; /* Stop at previous exception point */
431 dump_savearea(sv
, fence
); /* Dump this savearea */
434 sv
= CAST_DOWN(savearea
*, sv
->save_hdr
.save_prev
); /* Back chain */
437 kdb_printf("\nKernel version:\n%s\n",version
); /* Print kernel version */
439 pbtcpu
= -1; /* Mark as unowned */
440 hw_lock_unlock(&pbtlock
); /* Allow another back trace to happen */
441 hw_atomic_sub((uint32_t *) &pbtcnt
, 1); /* Show we are done */
443 while(pbtcnt
); /* Wait for completion */
448 void dump_savearea(savearea
*sv
, unsigned int fence
) {
452 if(sv
->save_exception
> T_MAX
) xcode
= invxcption
; /* Too big for table */
453 else xcode
= trap_type
[sv
->save_exception
/ 4]; /* Point to the type */
455 kdb_printf(" PC=0x%08X; MSR=0x%08X; DAR=0x%08X; DSISR=0x%08X; LR=0x%08X; R1=0x%08X; XCP=0x%08X (%s)\n",
456 (unsigned int)sv
->save_srr0
, (unsigned int)sv
->save_srr1
, (unsigned int)sv
->save_dar
, sv
->save_dsisr
,
457 (unsigned int)sv
->save_lr
, (unsigned int)sv
->save_r1
, sv
->save_exception
, xcode
);
459 if(!(sv
->save_srr1
& MASK(MSR_PR
))) { /* Are we in the kernel? */
460 dump_backtrace(sv
, (unsigned int)sv
->save_r1
, fence
); /* Dump the stack back trace from here if not user state */
468 #define DUMPFRAMES 34
471 void dump_backtrace(savearea
*sv
, unsigned int stackptr
, unsigned int fence
) {
473 unsigned int bframes
[DUMPFRAMES
];
474 unsigned int sframe
[8], raddr
, dumbo
;
477 kdb_printf(" Backtrace:\n");
478 if (sv
!= (savearea
*)0) {
479 bframes
[0] = (unsigned int)sv
->save_srr0
;
480 bframes
[1] = (unsigned int)sv
->save_lr
;
483 for(i
= index
; i
< DUMPFRAMES
; i
++) { /* Dump up to max frames */
485 if(!stackptr
|| (stackptr
== fence
)) break; /* Hit stop point or end... */
487 if(stackptr
& 0x0000000F) { /* Is stack pointer valid? */
488 kdb_printf("\n backtrace terminated - unaligned frame address: 0x%08X\n", stackptr
); /* No, tell 'em */
492 raddr
= (unsigned int)pmap_find_phys(kernel_pmap
, (addr64_t
)stackptr
); /* Get physical frame address */
493 if(!raddr
|| (stackptr
> vm_last_addr
)) { /* Is it mapped? */
494 kdb_printf("\n backtrace terminated - frame not mapped or invalid: 0x%08X\n", stackptr
); /* No, tell 'em */
498 if(!mapping_phys_lookup(raddr
, &dumbo
)) { /* Is it within physical RAM? */
499 kdb_printf("\n backtrace terminated - frame outside of RAM: v=0x%08X, p=%08X\n", stackptr
, raddr
); /* No, tell 'em */
503 ReadReal((addr64_t
)((raddr
<< 12) | (stackptr
& 4095)), &sframe
[0]); /* Fetch the stack frame */
505 bframes
[i
] = sframe
[LRindex
]; /* Save the link register */
507 if(!i
) kdb_printf(" "); /* Indent first time */
508 else if(!(i
& 7)) kdb_printf("\n "); /* Skip to new line every 8 */
509 kdb_printf("0x%08X ", bframes
[i
]); /* Dump the link register */
511 stackptr
= sframe
[0]; /* Chain back */
514 if(i
>= DUMPFRAMES
) kdb_printf(" backtrace continues...\n"); /* Say we terminated early */
515 if(i
) kmod_dump((vm_offset_t
*)&bframes
[0], i
); /* Show what kmods are in trace */
522 Debugger(const char *message
) {
525 unsigned int store
[8];
526 unsigned long pi_size
= 0;
529 spl
= splhigh(); /* No interruptions from here on */
532 * backtrace for Debugger() call from panic() if no current debugger
533 * backtrace and return for double panic() call
535 if ((panicstr
!= (char *)0) &&
536 (((nestedpanic
!= 0) && (current_debugger
== 1)) || (active_debugger
== 0))) {
537 print_backtrace(NULL
);
538 if (nestedpanic
!= 0) {
540 return; /* Yeah, don't enter again... */
544 if (debug_mode
&& getPerProc()->debugger_active
) { /* Are we already on debugger on this processor? */
546 return; /* Yeah, don't do it again... */
551 * The above stuff catches the double panic case so we shouldn't have to worry about that here.
553 if ( panicstr
!= (char *)0 )
555 /* diable kernel preemptions */
556 disable_preemption();
558 /* everything should be printed now so copy to NVRAM
560 if( debug_buf_size
> 0)
563 /* Do not compress the panic log unless kernel debugging
564 * is disabled - the panic log isn't synced to NVRAM if
565 * debugging is enabled, and the panic log is valuable
572 /* Now call the compressor */
573 bufpos
= packAsc (debug_buf
, (unsigned int) (debug_buf_ptr
- debug_buf
) );
574 /* If compression was successful, use the compressed length */
577 debug_buf_ptr
= debug_buf
+ bufpos
;
580 /* Truncate if the buffer is larger than a certain magic
581 * size - this really ought to be some appropriate fraction
582 * of the NVRAM image buffer, and is best done in the
583 * savePanicInfo() or PESavePanicInfo() calls
585 pi_size
= debug_buf_ptr
- debug_buf
;
586 pi_size
= PESavePanicInfo( debug_buf
, ((pi_size
> 2040) ? 2040 : pi_size
));
589 if( !panicDebugging
&& (pi_size
!= 0) ) {
593 my_cpu
= cpu_number();
594 debugger_cpu
= my_cpu
;
596 hw_atomic_add(&debug_mode
, 1);
597 PerProcTable
[my_cpu
].ppe_vaddr
->debugger_active
++;
600 for(tcpu
= 0; tcpu
< real_ncpus
; tcpu
++) {
601 if(tcpu
== my_cpu
) continue;
602 hw_atomic_add(&debugger_sync
, 1);
603 (void)cpu_signal(tcpu
, SIGPdebug
, 0 ,0);
605 (void)hw_cpu_sync(&debugger_sync
, LockTimeOut
);
611 if( !panicDebugging
&& (pi_size
!= 0))
612 PEHaltRestart( kPEHangCPU
);
618 if ((current_debugger
!= NO_CUR_DB
)) { /* If there is a debugger configured, enter it */
619 printf("Debugger(%s)\n", message
);
622 return; /* Done debugging for a while */
625 printf("\nNo debugger configured - dumping debug information\n");
626 printf("MSR=%08X\n",mfmsr());
627 print_backtrace(NULL
);
633 * Here's where we attempt to get some diagnostic information dumped out
634 * when the system is really confused. We will try to get into the
637 * We are here with interrupts disabled and on the debug stack. The savearea
638 * that was passed in is NOT chained to the activation.
640 * save_r3 contains the failure reason code.
643 void SysChoked(int type
, savearea
*sv
) { /* The system is bad dead */
645 unsigned int failcode
;
647 mp_disable_preemption();
648 disableDebugOuput
= FALSE
;
651 failcode
= (unsigned int)sv
->save_r3
; /* Get the failure code */
652 if(failcode
> failUnknown
) failcode
= failUnknown
; /* Set unknown code code */
654 kprintf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), (unsigned int)sv
->save_r3
, failNames
[failcode
]);
655 kdb_printf("System Failure: cpu=%d; code=%08X (%s)\n", cpu_number(), (unsigned int)sv
->save_r3
, failNames
[failcode
]);
657 print_backtrace(sv
); /* Attempt to print backtrace */
658 Call_DebuggerC(type
, sv
); /* Attempt to get into debugger */
660 if ((current_debugger
!= NO_CUR_DB
)) Call_DebuggerC(type
, sv
); /* Attempt to get into debugger */
667 * When we get here, interruptions are disabled and we are on the debugger stack
668 * Never, ever, ever, ever enable interruptions from here on
673 struct savearea
*saved_state
)
675 int directcall
, wait
;
679 int my_cpu
, tcpu
, wasdebugger
;
680 struct per_proc_info
*pp
;
681 uint64_t nowtime
, poptime
;
683 my_cpu
= cpu_number(); /* Get our CPU */
686 if((debugger_cpu
== my_cpu
) && /* Do we already own debugger? */
687 PerProcTable
[my_cpu
].ppe_vaddr
->debugger_active
&& /* and are we really active? */
688 db_recover
&& /* and have we set up recovery? */
689 (current_debugger
== KDB_CUR_DB
)) { /* and are we in KDB (only it handles recovery) */
690 kdb_trap(type
, saved_state
); /* Then reenter it... */
694 hw_atomic_add(&debug_mode
, 1); /* Indicate we are in debugger */
695 PerProcTable
[my_cpu
].ppe_vaddr
->debugger_active
++; /* Show active on our CPU */
697 lock_debugger(); /* Insure that only one CPU is in debugger */
699 if(db_im_stepping
== my_cpu
) { /* Are we just back from a step? */
700 enable_preemption_no_check(); /* Enable preemption now */
701 db_im_stepping
= 0xFFFFFFFF; /* Nobody stepping right now */
704 if (debugger_debug
) {
706 kprintf("Call_DebuggerC(%d): %08X %08X, debact = %d\n", my_cpu
, type
, saved_state
, debug_mode
); /* (TEST/DEBUG) */
708 printf("Call_Debugger: enter - cpu %d, is_slave %d, debugger_cpu %d, pc %08X\n",
709 my_cpu
, PerProcTable
[my_cpu
].ppe_vaddr
->debugger_is_slave
, debugger_cpu
, saved_state
->save_srr0
);
712 instr_pp
= (vm_offset_t
)pmap_find_phys(kernel_pmap
, (addr64_t
)(saved_state
->save_srr0
));
715 instr_ptr
= (addr64_t
)(((addr64_t
)instr_pp
<< 12) | (saved_state
->save_srr0
& 0xFFF)); /* Make physical address */
716 instr
= ml_phys_read_64(instr_ptr
); /* Get the trap that caused entry */
721 if (debugger_debug
) kprintf("Call_DebuggerC(%d): instr_pp = %08X, instr_ptr = %016llX, instr = %08X\n", my_cpu
, instr_pp
, instr_ptr
, instr
); /* (TEST/DEBUG) */
724 if (db_breakpoints_inserted
) cpus_holding_bkpts
++; /* Bump up the holding count */
725 if (debugger_cpu
== -1 && !PerProcTable
[my_cpu
].ppe_vaddr
->debugger_is_slave
) {
727 if (debugger_debug
) kprintf("Call_DebuggerC(%d): lasttrace = %08X\n", my_cpu
, lastTrace
); /* (TEST/DEBUG) */
729 debugger_cpu
= my_cpu
; /* Show that we are debugger */
732 lastTrace
= LLTraceSet(0); /* Disable low-level tracing */
734 for(tcpu
= 0; tcpu
< real_ncpus
; tcpu
++) { /* Stop all the other guys */
735 if(tcpu
== my_cpu
) continue; /* Don't diddle ourselves */
736 hw_atomic_add(&debugger_sync
, 1); /* Count signal sent */
737 (void)cpu_signal(tcpu
, SIGPdebug
, 0 ,0); /* Tell 'em to enter debugger */
739 (void)hw_cpu_sync(&debugger_sync
, LockTimeOut
); /* Wait for the other processors to enter debug */
740 debugger_sync
= 0; /* We're done with it */
742 else if (debugger_cpu
!= my_cpu
) goto debugger_exit
; /* We are not debugger, don't continue... */
745 if (instr
== TRAP_DIRECT_INST
) {
746 disableDebugOuput
= FALSE
;
747 print_backtrace(saved_state
);
750 switch_debugger
= 0; /* Make sure switch request is off */
751 directcall
= 1; /* Assume direct call */
753 if (saved_state
->save_srr1
& MASK(SRR1_PRG_TRAP
)) { /* Trap instruction? */
755 directcall
= 0; /* We had a trap not a direct call */
757 switch (instr
) { /* Select trap type */
760 case BREAK_TO_KDP0
: /* Breakpoint into KDP? */
761 case BREAK_TO_KDP1
: /* Breakpoint into KDP? */
762 current_debugger
= KDP_CUR_DB
; /* Yes, set KDP */
763 kdp_trap(type
, saved_state
); /* Enter it */
768 case BREAK_TO_KDB0
: /* Breakpoint to KDB (the "good" debugger)? */
769 current_debugger
= KDB_CUR_DB
; /* Yes, set it */
770 kdb_trap(type
, saved_state
); /* Enter it */
774 case TRAP_DEBUGGER_INST
: /* Should we enter the current debugger? */
775 case TRAP_DIRECT_INST
: /* Should we enter the current debugger? */
776 if (current_debugger
== KDP_CUR_DB
) /* Is current KDP? */
777 kdp_trap(type
, saved_state
); /* Yes, enter it */
778 else if (current_debugger
== KDB_CUR_DB
) /* Is this KDB? */
779 kdb_trap(type
, saved_state
); /* Yes, go ahead and enter */
780 else goto debugger_error
; /* No debugger active */
783 default: /* Unknown/bogus trap type */
788 while(1) { /* We are here to handle debugger switches */
790 if(!directcall
) { /* Was this a direct call? */
791 if(!switch_debugger
) break; /* No, then leave if no switch requested... */
794 * Note: we can only switch to a debugger we have. Ignore bogus switch requests.
797 if (debugger_debug
) kprintf("Call_DebuggerC(%d): switching debuggers\n", my_cpu
); /* (TEST/DEBUG) */
800 if(current_debugger
== KDP_CUR_DB
) current_debugger
= KDB_CUR_DB
; /* Switch to KDB */
806 if(current_debugger
== KDB_CUR_DB
) current_debugger
= KDP_CUR_DB
; /* Switch to KDP */
810 switch_debugger
= 0; /* Clear request */
811 directcall
= 0; /* Clear first-time direct call indication */
813 switch (current_debugger
) { /* Enter correct debugger */
815 case KDP_CUR_DB
: /* Enter KDP */
816 kdp_trap(type
, saved_state
);
819 case KDB_CUR_DB
: /* Enter KDB */
820 kdb_trap(type
, saved_state
);
823 default: /* No debugger installed */
831 if (debugger_debug
) kprintf("Call_DebuggerC(%d): exit - inst = %08X, cpu=%d(%d), run=%d\n", my_cpu
,
832 instr
, my_cpu
, debugger_cpu
, db_run_mode
); /* (TEST/DEBUG) */
834 if ((instr
== TRAP_DEBUGGER_INST
) || /* Did we trap to enter debugger? */
835 (instr
== TRAP_DIRECT_INST
)) saved_state
->save_srr0
+= TRAP_INST_SIZE
; /* Yes, point past trap */
837 wasdebugger
= 0; /* Assume not debugger */
838 if(debugger_cpu
== my_cpu
) { /* Are the debugger processor? */
839 wasdebugger
= 1; /* Remember that we were the debugger */
840 LLTraceSet(lastTrace
); /* Enable tracing on the way out if we are debugger */
843 wait
= FALSE
; /* Assume we are not going to wait */
844 if (db_run_mode
== STEP_CONTINUE
) { /* Are we going to run? */
845 wait
= TRUE
; /* Yeah, remember to wait for breakpoints to clear */
846 debugger_cpu
= -1; /* Release other processor's debuggers */
847 for(tcpu
= 0; tcpu
< real_ncpus
; tcpu
++)
848 PerProcTable
[tcpu
].ppe_vaddr
->debugger_pending
= 0; /* Release request (this is a HACK) */
849 NMIss
= 0; /* Let NMI bounce */
852 if(db_run_mode
== STEP_ONCE
) { /* Are we about to step? */
853 disable_preemption(); /* Disable preemption for the step */
854 db_im_stepping
= my_cpu
; /* Remember that I am about to step */
857 if (db_breakpoints_inserted
) cpus_holding_bkpts
--; /* If any breakpoints, back off count */
858 if (PerProcTable
[my_cpu
].ppe_vaddr
->debugger_is_slave
) PerProcTable
[my_cpu
].ppe_vaddr
->debugger_is_slave
--; /* If we were a slove, uncount us */
860 printf("Call_Debugger: exit - cpu %d, debugger_cpu %d, run_mode %d holds %d\n",
861 my_cpu
, debugger_cpu
, db_run_mode
,
864 unlock_debugger(); /* Release the lock */
865 PerProcTable
[my_cpu
].ppe_vaddr
->debugger_active
--; /* Say we aren't active anymore */
867 if (wait
) while(cpus_holding_bkpts
); /* Wait for breakpoints to clear */
870 hw_atomic_sub(&debug_mode
, 1); /* Set out of debug now */
872 return(1); /* Exit debugger normally */
875 if(db_run_mode
!= STEP_ONCE
) enable_preemption_no_check(); /* Enable preemption, but don't preempt here */
876 hw_atomic_sub(&debug_mode
, 1); /* Set out of debug now */
877 return(0); /* Return in shame... */
881 void lock_debugger(void) {
885 my_cpu
= cpu_number(); /* Get our CPU number */
887 while(1) { /* Check until we get it */
889 if (debugger_cpu
!= -1 && debugger_cpu
!= my_cpu
) continue; /* Someone, not us, is debugger... */
890 if (hw_lock_try(&debugger_lock
)) { /* Get the debug lock */
891 if (debugger_cpu
== -1 || debugger_cpu
== my_cpu
) break; /* Is it us? */
892 hw_lock_unlock(&debugger_lock
); /* Not us, release lock */
897 void unlock_debugger(void) {
899 hw_lock_unlock(&debugger_lock
);
912 } __attribute__((packed
));
914 typedef struct pasc pasc_t
;
916 int packAsc (unsigned char *inbuf
, unsigned int length
)
918 unsigned int i
, j
= 0;
921 for (i
= 0; i
< length
; i
+=8)
931 bcopy ((char *) &pack
, inbuf
+ j
, 7);
934 if (0 != (i
- length
))
935 inbuf
[j
- (i
- length
)] &= 0xFF << (8-(i
- length
));
936 return j
-(((i
-length
) == 7) ? 6 : (i
- length
));