]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/db_interface.c
xnu-792.6.61.tar.gz
[apple/xnu.git] / osfmk / i386 / db_interface.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * Interface to new debugger.
55 */
56 #include <platforms.h>
57 #include <time_stamp.h>
58 #include <mach_mp_debug.h>
59 #include <mach_ldebug.h>
60 #include <kern/spl.h>
61 #include <kern/cpu_number.h>
62 #include <kern/kern_types.h>
63 #include <kern/misc_protos.h>
64 #include <vm/pmap.h>
65
66 #include <i386/thread.h>
67 #include <i386/db_machdep.h>
68 #include <i386/seg.h>
69 #include <i386/trap.h>
70 #include <i386/setjmp.h>
71 #include <i386/pmap.h>
72 #include <i386/misc_protos.h>
73
74 #include <mach/vm_param.h>
75 #include <vm/vm_map.h>
76 #include <kern/thread.h>
77 #include <kern/task.h>
78
79 #include <ddb/db_command.h>
80 #include <ddb/db_task_thread.h>
81 #include <ddb/db_run.h>
82 #include <ddb/db_trap.h>
83 #include <ddb/db_output.h>
84 #include <ddb/db_access.h>
85 #include <ddb/db_sym.h>
86 #include <ddb/db_break.h>
87 #include <ddb/db_watch.h>
88
89 int db_active = 0;
90 struct i386_saved_state *i386_last_saved_statep;
91 struct i386_saved_state i386_nested_saved_state;
92 unsigned i386_last_kdb_sp;
93
94 extern thread_t db_default_act;
95 extern pt_entry_t *DMAP1;
96 extern caddr_t DADDR1;
97
98 #if MACH_MP_DEBUG
99 extern int masked_state_cnt[];
100 #endif /* MACH_MP_DEBUG */
101
102 /*
103 * Enter KDB through a keyboard trap.
104 * We show the registers as of the keyboard interrupt
105 * instead of those at its call to KDB.
106 */
107 struct int_regs {
108 int gs;
109 int fs;
110 int edi;
111 int esi;
112 int ebp;
113 int ebx;
114 struct i386_interrupt_state *is;
115 };
116
117 extern char * trap_type[];
118 extern int TRAP_TYPES;
119
120 /* Forward */
121
122 extern void kdbprinttrap(
123 int type,
124 int code,
125 int *pc,
126 int sp);
127 extern void kdb_kentry(
128 struct int_regs *int_regs);
129 extern int db_user_to_kernel_address(
130 task_t task,
131 vm_offset_t addr,
132 unsigned *kaddr,
133 int flag);
134 extern void db_write_bytes_user_space(
135 vm_offset_t addr,
136 int size,
137 char *data,
138 task_t task);
139 extern int db_search_null(
140 task_t task,
141 unsigned *svaddr,
142 unsigned evaddr,
143 unsigned *skaddr,
144 int flag);
145 extern int kdb_enter(int);
146 extern void kdb_leave(void);
147 extern void lock_kdb(void);
148 extern void unlock_kdb(void);
149
150 /*
151 * kdb_trap - field a TRACE or BPT trap
152 */
153
154
155 extern jmp_buf_t *db_recover;
156
157 /*
158 * Translate the state saved in a task state segment into an
159 * exception frame. Since we "know" we always want the state
160 * in a ktss, we hard-wire that in, rather than indexing the gdt
161 * with tss_sel to derive a pointer to the desired tss.
162 */
163 void
164 db_tss_to_frame(
165 int tss_sel,
166 struct i386_saved_state *regs)
167 {
168 extern struct i386_tss ktss;
169 int mycpu = cpu_number();
170 struct i386_tss *tss;
171
172 tss = cpu_datap(mycpu)->cpu_desc_index.cdi_ktss; /* XXX */
173
174 /*
175 * ddb will overwrite whatever's in esp, so put esp0 elsewhere, too.
176 */
177 regs->esp = tss->esp0;
178 regs->efl = tss->eflags;
179 regs->eip = tss->eip;
180 regs->trapno = tss->ss0; /* XXX */
181 regs->err = tss->esp0; /* XXX */
182 regs->eax = tss->eax;
183 regs->ecx = tss->ecx;
184 regs->edx = tss->edx;
185 regs->ebx = tss->ebx;
186 regs->uesp = tss->esp;
187 regs->ebp = tss->ebp;
188 regs->esi = tss->esi;
189 regs->edi = tss->edi;
190 regs->es = tss->es;
191 regs->ss = tss->ss;
192 regs->cs = tss->cs;
193 regs->ds = tss->ds;
194 regs->fs = tss->fs;
195 regs->gs = tss->gs;
196 }
197
198 /*
199 * Compose a call to the debugger from the saved state in regs. (No
200 * reason not to do this in C.)
201 */
202 boolean_t
203 db_trap_from_asm(
204 struct i386_saved_state *regs)
205 {
206 int code;
207 int type;
208
209 type = regs->trapno;
210 code = regs->err;
211 return (kdb_trap(type, code, regs));
212 }
213
214 int
215 kdb_trap(
216 int type,
217 int code,
218 struct i386_saved_state *regs)
219 {
220 extern char etext;
221 boolean_t trap_from_user;
222 spl_t s = splhigh();
223
224 switch (type) {
225 case T_DEBUG: /* single_step */
226 {
227 extern int dr_addr[];
228 int addr;
229 int status = dr6();
230
231 if (status & 0xf) { /* hmm hdw break */
232 addr = status & 0x8 ? dr_addr[3] :
233 status & 0x4 ? dr_addr[2] :
234 status & 0x2 ? dr_addr[1] :
235 dr_addr[0];
236 regs->efl |= EFL_RF;
237 db_single_step_cmd(addr, 0, 1, "p");
238 }
239 }
240 case T_INT3: /* breakpoint */
241 case T_WATCHPOINT: /* watchpoint */
242 case -1: /* keyboard interrupt */
243 break;
244
245 default:
246 if (db_recover) {
247 i386_nested_saved_state = *regs;
248 db_printf("Caught ");
249 if (type < 0 || type > TRAP_TYPES)
250 db_printf("type %d", type);
251 else
252 db_printf("%s", trap_type[type]);
253 db_printf(" trap, code = %x, pc = %x\n",
254 code, regs->eip);
255 splx(s);
256 db_error("");
257 /*NOTREACHED*/
258 }
259 kdbprinttrap(type, code, (int *)&regs->eip, regs->uesp);
260 }
261
262 disable_preemption();
263
264 current_cpu_datap()->cpu_kdb_saved_ipl = s;
265 current_cpu_datap()->cpu_kdb_saved_state = regs;
266
267 i386_last_saved_statep = regs;
268 i386_last_kdb_sp = (unsigned) &type;
269
270 if (!kdb_enter(regs->eip))
271 goto kdb_exit;
272
273 /* Should switch to kdb's own stack here. */
274
275 if (!IS_USER_TRAP(regs, &etext)) {
276 bzero((char *)&ddb_regs, sizeof (ddb_regs));
277 *(struct i386_saved_state_from_kernel *)&ddb_regs =
278 *(struct i386_saved_state_from_kernel *)regs;
279 trap_from_user = FALSE;
280 }
281 else {
282 ddb_regs = *regs;
283 trap_from_user = TRUE;
284 }
285 if (!trap_from_user) {
286 /*
287 * Kernel mode - esp and ss not saved
288 */
289 ddb_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
290 ddb_regs.ss = KERNEL_DS;
291 }
292
293 db_active++;
294 db_task_trap(type, code, trap_from_user);
295 db_active--;
296
297 regs->eip = ddb_regs.eip;
298 regs->efl = ddb_regs.efl;
299 regs->eax = ddb_regs.eax;
300 regs->ecx = ddb_regs.ecx;
301 regs->edx = ddb_regs.edx;
302 regs->ebx = ddb_regs.ebx;
303 if (trap_from_user) {
304 /*
305 * user mode - saved esp and ss valid
306 */
307 regs->uesp = ddb_regs.uesp; /* user stack pointer */
308 regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
309 }
310 regs->ebp = ddb_regs.ebp;
311 regs->esi = ddb_regs.esi;
312 regs->edi = ddb_regs.edi;
313 regs->es = ddb_regs.es & 0xffff;
314 regs->cs = ddb_regs.cs & 0xffff;
315 regs->ds = ddb_regs.ds & 0xffff;
316 regs->fs = ddb_regs.fs & 0xffff;
317 regs->gs = ddb_regs.gs & 0xffff;
318
319 if ((type == T_INT3) &&
320 (db_get_task_value(regs->eip,
321 BKPT_SIZE,
322 FALSE,
323 db_target_space(current_thread(),
324 trap_from_user))
325 == BKPT_INST))
326 regs->eip += BKPT_SIZE;
327
328 kdb_exit:
329 kdb_leave();
330
331 current_cpu_datap()->cpu_kdb_saved_state = 0;
332
333 #if MACH_MP_DEBUG
334 current_cpu_datap()->cpu_masked_state_cnt = 0;
335 #endif /* MACH_MP_DEBUG */
336
337 enable_preemption();
338
339 splx(s);
340
341 /* Allow continue to upper layers of exception handling if
342 * trap was not a debugging trap.
343 */
344
345 if (trap_from_user && type != T_DEBUG && type != T_INT3
346 && type != T_WATCHPOINT)
347 return 0;
348 else
349 return (1);
350 }
351
352 /*
353 * Enter KDB through a keyboard trap.
354 * We show the registers as of the keyboard interrupt
355 * instead of those at its call to KDB.
356 */
357
358 spl_t kdb_oldspl;
359
360 void
361 kdb_kentry(
362 struct int_regs *int_regs)
363 {
364 extern char etext;
365 boolean_t trap_from_user;
366 struct i386_interrupt_state *is = int_regs->is;
367 struct i386_saved_state regs;
368 spl_t s;
369
370 s = splhigh();
371 kdb_oldspl = s;
372
373 if (IS_USER_TRAP(is, &etext))
374 {
375 regs.uesp = ((int *)(is+1))[0];
376 regs.ss = ((int *)(is+1))[1];
377 }
378 else {
379 regs.ss = KERNEL_DS;
380 regs.uesp= (int)(is+1);
381 }
382 regs.efl = is->efl;
383 regs.cs = is->cs;
384 regs.eip = is->eip;
385 regs.eax = is->eax;
386 regs.ecx = is->ecx;
387 regs.edx = is->edx;
388 regs.ebx = int_regs->ebx;
389 regs.ebp = int_regs->ebp;
390 regs.esi = int_regs->esi;
391 regs.edi = int_regs->edi;
392 regs.ds = is->ds;
393 regs.es = is->es;
394 regs.fs = int_regs->fs;
395 regs.gs = int_regs->gs;
396
397 disable_preemption();
398
399 current_cpu_datap()->cpu_kdb_saved_state = &regs;
400
401 if (!kdb_enter(regs.eip))
402 goto kdb_exit;
403
404 bcopy((char *)&regs, (char *)&ddb_regs, sizeof (ddb_regs));
405 trap_from_user = IS_USER_TRAP(&ddb_regs, &etext);
406
407 db_active++;
408 db_task_trap(-1, 0, trap_from_user);
409 db_active--;
410
411 if (trap_from_user) {
412 ((int *)(is+1))[0] = ddb_regs.uesp;
413 ((int *)(is+1))[1] = ddb_regs.ss & 0xffff;
414 }
415 is->efl = ddb_regs.efl;
416 is->cs = ddb_regs.cs & 0xffff;
417 is->eip = ddb_regs.eip;
418 is->eax = ddb_regs.eax;
419 is->ecx = ddb_regs.ecx;
420 is->edx = ddb_regs.edx;
421 int_regs->ebx = ddb_regs.ebx;
422 int_regs->ebp = ddb_regs.ebp;
423 int_regs->esi = ddb_regs.esi;
424 int_regs->edi = ddb_regs.edi;
425 is->ds = ddb_regs.ds & 0xffff;
426 is->es = ddb_regs.es & 0xffff;
427 int_regs->fs = ddb_regs.fs & 0xffff;
428 int_regs->gs = ddb_regs.gs & 0xffff;
429
430 kdb_exit:
431 kdb_leave();
432 current_cpu_datap()->cpu_kdb_saved_state = 0;
433
434 enable_preemption();
435
436 splx(s);
437 }
438
439 /*
440 * Print trap reason.
441 */
442
443 void
444 kdbprinttrap(
445 int type,
446 int code,
447 int *pc,
448 int sp)
449 {
450 printf("kernel: ");
451 if (type < 0 || type > TRAP_TYPES)
452 db_printf("type %d", type);
453 else
454 db_printf("%s", trap_type[type]);
455 db_printf(" trap, code=%x eip@%x = %x esp=%x\n",
456 code, pc, *(int *)pc, sp);
457 db_run_mode = STEP_CONTINUE;
458 }
459
460 int
461 db_user_to_kernel_address(
462 task_t task,
463 vm_offset_t addr,
464 unsigned *kaddr,
465 int flag)
466 {
467 register pt_entry_t *ptp;
468
469 ptp = pmap_pte(task->map->pmap, addr);
470 if (ptp == PT_ENTRY_NULL || (*ptp & INTEL_PTE_VALID) == 0) {
471 if (flag) {
472 db_printf("\nno memory is assigned to address %08x\n", addr);
473 db_error(0);
474 /* NOTREACHED */
475 }
476 return(-1);
477 }
478
479 src = (vm_offset_t)pte_to_pa(*ptp);
480 *(int *) DMAP1 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
481 INTEL_PTE_REF | INTEL_PTE_MOD;
482 #if defined(I386_CPU)
483 if (cpu_class == CPUCLASS_386) {
484 invltlb();
485 } else
486 #endif
487 {
488 invlpg((u_int)DADDR1);
489 }
490
491 *kaddr = (unsigned)DADDR1 + (addr & PAGE_MASK);
492
493 return(0);
494 }
495
496 /*
497 * Read bytes from kernel address space for debugger.
498 */
499
500 void
501 db_read_bytes(
502 vm_offset_t addr,
503 int size,
504 char *data,
505 task_t task)
506 {
507 register char *src;
508 register int n;
509 unsigned kern_addr;
510
511 src = (char *)addr;
512 if (task == kernel_task || task == TASK_NULL) {
513 while (--size >= 0) {
514 if (addr++ > VM_MAX_KERNEL_ADDRESS) {
515 db_printf("\nbad address %x\n", addr);
516 db_error(0);
517 /* NOTREACHED */
518 }
519 *data++ = *src++;
520 }
521 return;
522 }
523 while (size > 0) {
524 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
525 return;
526 src = (char *)kern_addr;
527 n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
528 if (n > size)
529 n = size;
530 size -= n;
531 addr += n;
532 while (--n >= 0)
533 *data++ = *src++;
534 }
535 }
536
537 /*
538 * Write bytes to kernel address space for debugger.
539 */
540
541 void
542 db_write_bytes(
543 vm_offset_t addr,
544 int size,
545 char *data,
546 task_t task)
547 {
548 register char *dst;
549
550 register pt_entry_t *ptep0 = 0;
551 pt_entry_t oldmap0 = 0;
552 vm_offset_t addr1;
553 register pt_entry_t *ptep1 = 0;
554 pt_entry_t oldmap1 = 0;
555 extern char etext;
556
557 if (task && task != kernel_task) {
558 db_write_bytes_user_space(addr, size, data, task);
559 return;
560 }
561
562
563 if (addr >= VM_MIN_KERNEL_LOADED_ADDRESS) {
564 db_write_bytes_user_space(addr, size, data, kernel_task);
565 return;
566 }
567
568 if (addr >= VM_MIN_KERNEL_ADDRESS &&
569 addr <= (vm_offset_t)&etext)
570 {
571 ptep0 = pmap_pte(kernel_pmap, addr);
572 oldmap0 = *ptep0;
573 *ptep0 |= INTEL_PTE_WRITE;
574
575 addr1 = i386_trunc_page(addr + size - 1);
576 if (i386_trunc_page(addr) != addr1) {
577 /* data crosses a page boundary */
578
579 ptep1 = pmap_pte(kernel_pmap, addr1);
580 oldmap1 = *ptep1;
581 *ptep1 |= INTEL_PTE_WRITE;
582 }
583 flush_tlb();
584 }
585
586 dst = (char *)addr;
587
588 while (--size >= 0) {
589 if (addr++ > VM_MAX_KERNEL_ADDRESS) {
590 db_printf("\nbad address %x\n", addr);
591 db_error(0);
592 /* NOTREACHED */
593 }
594 *dst++ = *data++;
595 }
596
597 if (ptep0) {
598 *ptep0 = oldmap0;
599 if (ptep1) {
600 *ptep1 = oldmap1;
601 }
602 flush_tlb();
603 }
604 }
605
606 void
607 db_write_bytes_user_space(
608 vm_offset_t addr,
609 int size,
610 char *data,
611 task_t task)
612 {
613 register char *dst;
614 register int n;
615 unsigned kern_addr;
616
617 while (size > 0) {
618 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
619 return;
620 dst = (char *)kern_addr;
621 n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
622 if (n > size)
623 n = size;
624 size -= n;
625 addr += n;
626 while (--n >= 0)
627 *dst++ = *data++;
628 }
629 }
630
631 boolean_t
632 db_check_access(
633 vm_offset_t addr,
634 int size,
635 task_t task)
636 {
637 register n;
638 unsigned kern_addr;
639
640 if (task == kernel_task || task == TASK_NULL) {
641 if (kernel_task == TASK_NULL)
642 return(TRUE);
643 task = kernel_task;
644 } else if (task == TASK_NULL) {
645 if (current_thread() == THREAD_NULL)
646 return(FALSE);
647 task = current_thread()->task;
648 }
649 while (size > 0) {
650 if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0)
651 return(FALSE);
652 n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
653 if (n > size)
654 n = size;
655 size -= n;
656 addr += n;
657 }
658 return(TRUE);
659 }
660
661 boolean_t
662 db_phys_eq(
663 task_t task1,
664 vm_offset_t addr1,
665 task_t task2,
666 vm_offset_t addr2)
667 {
668 unsigned kern_addr1, kern_addr2;
669
670 if ((addr1 & (INTEL_PGBYTES-1)) != (addr2 & (INTEL_PGBYTES-1)))
671 return(FALSE);
672 if (task1 == TASK_NULL) {
673 if (current_thread() == THREAD_NULL)
674 return(FALSE);
675 task1 = current_thread()->task;
676 }
677 if (db_user_to_kernel_address(task1, addr1, &kern_addr1, 0) < 0 ||
678 db_user_to_kernel_address(task2, addr2, &kern_addr2, 0) < 0)
679 return(FALSE);
680 return(kern_addr1 == kern_addr2);
681 }
682
683 #define DB_USER_STACK_ADDR (VM_MIN_KERNEL_ADDRESS)
684 #define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(INTEL_PGBYTES*3))
685
686 int
687 db_search_null(
688 task_t task,
689 unsigned *svaddr,
690 unsigned evaddr,
691 unsigned *skaddr,
692 int flag)
693 {
694 register unsigned vaddr;
695 register unsigned *kaddr;
696
697 kaddr = (unsigned *)*skaddr;
698 for (vaddr = *svaddr; vaddr > evaddr; vaddr -= sizeof(unsigned)) {
699 if (vaddr % INTEL_PGBYTES == 0) {
700 vaddr -= sizeof(unsigned);
701 if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0)
702 return(-1);
703 kaddr = (unsigned *)*skaddr;
704 } else {
705 vaddr -= sizeof(unsigned);
706 kaddr--;
707 }
708 if ((*kaddr == 0) ^ (flag == 0)) {
709 *svaddr = vaddr;
710 *skaddr = (unsigned)kaddr;
711 return(0);
712 }
713 }
714 return(-1);
715 }
716
717 void
718 db_task_name(
719 task_t task)
720 {
721 register char *p;
722 register n;
723 unsigned vaddr, kaddr;
724
725 vaddr = DB_USER_STACK_ADDR;
726 kaddr = 0;
727
728 /*
729 * skip nulls at the end
730 */
731 if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0) < 0) {
732 db_printf(DB_NULL_TASK_NAME);
733 return;
734 }
735 /*
736 * search start of args
737 */
738 if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1) < 0) {
739 db_printf(DB_NULL_TASK_NAME);
740 return;
741 }
742
743 n = DB_TASK_NAME_LEN-1;
744 p = (char *)kaddr + sizeof(unsigned);
745 for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0;
746 vaddr++, p++, n--) {
747 if (vaddr % INTEL_PGBYTES == 0) {
748 (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
749 p = (char*)kaddr;
750 }
751 db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p);
752 }
753 while (n-- >= 0) /* compare with >= 0 for one more space */
754 db_printf(" ");
755 }
756
757 /*
758 * Code used to synchronize kdb among all cpus, one active at a time, switch
759 * from on to another using kdb_on! #cpu or cpu #cpu
760 */
761
762 decl_simple_lock_data(, kdb_lock) /* kdb lock */
763
764 #define db_simple_lock_init(l, e) hw_lock_init(&((l)->interlock))
765 #define db_simple_lock_try(l) hw_lock_try(&((l)->interlock))
766 #define db_simple_unlock(l) hw_lock_unlock(&((l)->interlock))
767
768 int kdb_cpu = -1; /* current cpu running kdb */
769 int kdb_debug = 0;
770 volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus holding
771 breakpoints (ie: cpus that did not
772 insert back breakpoints) */
773 extern boolean_t db_breakpoints_inserted;
774
775 void
776 db_machdep_init(void)
777 {
778 int c;
779
780 db_simple_lock_init(&kdb_lock, 0);
781 for (c = 0; c < real_ncpus; ++c) {
782 db_stacks[c] = (vm_offset_t) (db_stack_store +
783 (INTSTACK_SIZE * (c + 1)) - sizeof (natural_t));
784 if (c == master_cpu) {
785 dbtss.esp0 = (int)(db_task_stack_store +
786 (INTSTACK_SIZE * (c + 1)) - sizeof (natural_t));
787 dbtss.esp = dbtss.esp0;
788 dbtss.eip = (int)&db_task_start;
789 /*
790 * The TSS for the debugging task on each slave CPU
791 * is set up in mp_desc_init().
792 */
793 }
794 }
795 }
796
797 /*
798 * Called when entering kdb:
799 * Takes kdb lock. If if we were called remotely (slave state) we just
800 * wait for kdb_cpu to be equal to cpu_number(). Otherwise enter kdb if
801 * not active on another cpu.
802 * If db_pass_thru[cpu_number()] > 0, then kdb can't stop now.
803 */
804
805 int
806 kdb_enter(int pc)
807 {
808 int mycpu;
809 int retval;
810
811 disable_preemption();
812
813 mycpu = cpu_number();
814
815 if (current_cpu_datap()->cpu_db_pass_thru) {
816 retval = 0;
817 goto kdb_exit;
818 }
819
820 current_cpu_datap()->cpu_kdb_active++;
821 lock_kdb();
822
823 if (kdb_debug)
824 db_printf("kdb_enter: cpu %d, is_slave %d, kdb_cpu %d, run mode %d pc %x (%x) holds %d\n",
825 my_cpu, current_cpu_datap()->cpu_kdb_is_slave, kdb_cpu,
826 db_run_mode, pc, *(int *)pc, cpus_holding_bkpts);
827 if (db_breakpoints_inserted)
828 cpus_holding_bkpts++;
829 if (kdb_cpu == -1 && !current_cpu_datap()->cpu_kdb_is_slave) {
830 kdb_cpu = my_cpu;
831 remote_kdb(); /* stop other cpus */
832 retval = 1;
833 } else if (kdb_cpu == my_cpu)
834 retval = 1;
835 else
836 retval = 0;
837
838 kdb_exit:
839 enable_preemption();
840
841 return (retval);
842 }
843
844 void
845 kdb_leave(void)
846 {
847 int my_cpu;
848 boolean_t wait = FALSE;
849
850 disable_preemption();
851
852 my_cpu = cpu_number();
853
854 if (db_run_mode == STEP_CONTINUE) {
855 wait = TRUE;
856 kdb_cpu = -1;
857 }
858 if (db_breakpoints_inserted)
859 cpus_holding_bkpts--;
860 if (current_cpu_datap()->cpu_kdb_is_slave)
861 current_cpu_datap()->cpu_kdb_is_slave--;
862 if (kdb_debug)
863 db_printf("kdb_leave: cpu %d, kdb_cpu %d, run_mode %d pc %x (%x) holds %d\n",
864 my_cpu, kdb_cpu, db_run_mode,
865 ddb_regs.eip, *(int *)ddb_regs.eip,
866 cpus_holding_bkpts);
867 clear_kdb_intr();
868 unlock_kdb();
869 current_cpu_datap()->cpu_kdb_active--;
870
871 enable_preemption();
872
873 if (wait) {
874 while(cpus_holding_bkpts);
875 }
876 }
877
878 void
879 lock_kdb(void)
880 {
881 int my_cpu;
882 register i;
883 extern void kdb_console(void);
884
885 disable_preemption();
886
887 my_cpu = cpu_number();
888
889 for(;;) {
890 kdb_console();
891 if (kdb_cpu != -1 && kdb_cpu != my_cpu) {
892 continue;
893 }
894 if (db_simple_lock_try(&kdb_lock)) {
895 if (kdb_cpu == -1 || kdb_cpu == my_cpu)
896 break;
897 db_simple_unlock(&kdb_lock);
898 }
899 }
900
901 enable_preemption();
902 }
903
904 #if TIME_STAMP
905 extern unsigned old_time_stamp;
906 #endif /* TIME_STAMP */
907
908 void
909 unlock_kdb(void)
910 {
911 db_simple_unlock(&kdb_lock);
912 #if TIME_STAMP
913 old_time_stamp = 0;
914 #endif /* TIME_STAMP */
915 }
916
917
918 #ifdef __STDC__
919 #define KDB_SAVE(type, name) extern type name; type name##_save = name
920 #define KDB_RESTORE(name) name = name##_save
921 #else /* __STDC__ */
922 #define KDB_SAVE(type, name) extern type name; type name/**/_save = name
923 #define KDB_RESTORE(name) name = name/**/_save
924 #endif /* __STDC__ */
925
926 #define KDB_SAVE_CTXT() \
927 KDB_SAVE(int, db_run_mode); \
928 KDB_SAVE(boolean_t, db_sstep_print); \
929 KDB_SAVE(int, db_loop_count); \
930 KDB_SAVE(int, db_call_depth); \
931 KDB_SAVE(int, db_inst_count); \
932 KDB_SAVE(int, db_last_inst_count); \
933 KDB_SAVE(int, db_load_count); \
934 KDB_SAVE(int, db_store_count); \
935 KDB_SAVE(boolean_t, db_cmd_loop_done); \
936 KDB_SAVE(jmp_buf_t *, db_recover); \
937 KDB_SAVE(db_addr_t, db_dot); \
938 KDB_SAVE(db_addr_t, db_last_addr); \
939 KDB_SAVE(db_addr_t, db_prev); \
940 KDB_SAVE(db_addr_t, db_next); \
941 KDB_SAVE(db_regs_t, ddb_regs);
942
943 #define KDB_RESTORE_CTXT() \
944 KDB_RESTORE(db_run_mode); \
945 KDB_RESTORE(db_sstep_print); \
946 KDB_RESTORE(db_loop_count); \
947 KDB_RESTORE(db_call_depth); \
948 KDB_RESTORE(db_inst_count); \
949 KDB_RESTORE(db_last_inst_count); \
950 KDB_RESTORE(db_load_count); \
951 KDB_RESTORE(db_store_count); \
952 KDB_RESTORE(db_cmd_loop_done); \
953 KDB_RESTORE(db_recover); \
954 KDB_RESTORE(db_dot); \
955 KDB_RESTORE(db_last_addr); \
956 KDB_RESTORE(db_prev); \
957 KDB_RESTORE(db_next); \
958 KDB_RESTORE(ddb_regs);
959
960 /*
961 * switch to another cpu
962 */
963
964 void
965 kdb_on(
966 int cpu)
967 {
968 KDB_SAVE_CTXT();
969 if (cpu < 0 || cpu >= real_ncpus || !cpu_datap(cpu)->cpu_kdb_active)
970 return;
971 db_set_breakpoints();
972 db_set_watchpoints();
973 kdb_cpu = cpu;
974 unlock_kdb();
975 lock_kdb();
976 db_clear_breakpoints();
977 db_clear_watchpoints();
978 KDB_RESTORE_CTXT();
979 if (kdb_cpu == -1) {/* someone continued */
980 kdb_cpu = cpu_number();
981 db_continue_cmd(0, 0, 0, "");
982 }
983 }
984
985 void db_reboot(
986 db_expr_t addr,
987 boolean_t have_addr,
988 db_expr_t count,
989 char *modif)
990 {
991 boolean_t reboot = TRUE;
992 char *cp, c;
993
994 cp = modif;
995 while ((c = *cp++) != 0) {
996 if (c == 'r') /* reboot */
997 reboot = TRUE;
998 if (c == 'h') /* halt */
999 reboot = FALSE;
1000 }
1001 halt_all_cpus(reboot);
1002 }