]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/db_interface.c
7645f92fda845e86c5d5c3525c6a266d65e7923e
[apple/xnu.git] / osfmk / i386 / db_interface.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53
54 /*
55 * Interface to new debugger.
56 */
57 #include <platforms.h>
58 #include <time_stamp.h>
59 #include <mach_mp_debug.h>
60 #include <mach_ldebug.h>
61 #include <kern/spl.h>
62 #include <kern/cpu_number.h>
63 #include <kern/kern_types.h>
64 #include <kern/misc_protos.h>
65 #include <vm/pmap.h>
66
67 #include <i386/thread.h>
68 #include <i386/db_machdep.h>
69 #include <i386/seg.h>
70 #include <i386/trap.h>
71 #include <i386/setjmp.h>
72 #include <i386/pmap.h>
73 #include <i386/misc_protos.h>
74
75 #include <mach/vm_param.h>
76 #include <vm/vm_map.h>
77 #include <kern/thread.h>
78 #include <kern/task.h>
79
80 #include <ddb/db_command.h>
81 #include <ddb/db_task_thread.h>
82 #include <ddb/db_run.h>
83 #include <ddb/db_trap.h>
84 #include <ddb/db_output.h>
85 #include <ddb/db_access.h>
86 #include <ddb/db_sym.h>
87 #include <ddb/db_break.h>
88 #include <ddb/db_watch.h>
89
90 int db_active = 0;
91 struct i386_saved_state *i386_last_saved_statep;
92 struct i386_saved_state i386_nested_saved_state;
93 unsigned i386_last_kdb_sp;
94
95 extern thread_t db_default_act;
96 extern pt_entry_t *DMAP1;
97 extern caddr_t DADDR1;
98
99 #if MACH_MP_DEBUG
100 extern int masked_state_cnt[];
101 #endif /* MACH_MP_DEBUG */
102
103 /*
104 * Enter KDB through a keyboard trap.
105 * We show the registers as of the keyboard interrupt
106 * instead of those at its call to KDB.
107 */
108 struct int_regs {
109 int gs;
110 int fs;
111 int edi;
112 int esi;
113 int ebp;
114 int ebx;
115 struct i386_interrupt_state *is;
116 };
117
118 extern char * trap_type[];
119 extern int TRAP_TYPES;
120
121 /* Forward */
122
123 extern void kdbprinttrap(
124 int type,
125 int code,
126 int *pc,
127 int sp);
128 extern void kdb_kentry(
129 struct int_regs *int_regs);
130 extern int db_user_to_kernel_address(
131 task_t task,
132 vm_offset_t addr,
133 unsigned *kaddr,
134 int flag);
135 extern void db_write_bytes_user_space(
136 vm_offset_t addr,
137 int size,
138 char *data,
139 task_t task);
140 extern int db_search_null(
141 task_t task,
142 unsigned *svaddr,
143 unsigned evaddr,
144 unsigned *skaddr,
145 int flag);
146 extern int kdb_enter(int);
147 extern void kdb_leave(void);
148 extern void lock_kdb(void);
149 extern void unlock_kdb(void);
150
151 /*
152 * kdb_trap - field a TRACE or BPT trap
153 */
154
155
156 extern jmp_buf_t *db_recover;
157
158 /*
159 * Translate the state saved in a task state segment into an
160 * exception frame. Since we "know" we always want the state
161 * in a ktss, we hard-wire that in, rather than indexing the gdt
162 * with tss_sel to derive a pointer to the desired tss.
163 */
164 void
165 db_tss_to_frame(
166 int tss_sel,
167 struct i386_saved_state *regs)
168 {
169 extern struct i386_tss ktss;
170 int mycpu = cpu_number();
171 struct i386_tss *tss;
172
173 tss = cpu_datap(mycpu)->cpu_desc_index.cdi_ktss; /* XXX */
174
175 /*
176 * ddb will overwrite whatever's in esp, so put esp0 elsewhere, too.
177 */
178 regs->esp = tss->esp0;
179 regs->efl = tss->eflags;
180 regs->eip = tss->eip;
181 regs->trapno = tss->ss0; /* XXX */
182 regs->err = tss->esp0; /* XXX */
183 regs->eax = tss->eax;
184 regs->ecx = tss->ecx;
185 regs->edx = tss->edx;
186 regs->ebx = tss->ebx;
187 regs->uesp = tss->esp;
188 regs->ebp = tss->ebp;
189 regs->esi = tss->esi;
190 regs->edi = tss->edi;
191 regs->es = tss->es;
192 regs->ss = tss->ss;
193 regs->cs = tss->cs;
194 regs->ds = tss->ds;
195 regs->fs = tss->fs;
196 regs->gs = tss->gs;
197 }
198
199 /*
200 * Compose a call to the debugger from the saved state in regs. (No
201 * reason not to do this in C.)
202 */
203 boolean_t
204 db_trap_from_asm(
205 struct i386_saved_state *regs)
206 {
207 int code;
208 int type;
209
210 type = regs->trapno;
211 code = regs->err;
212 return (kdb_trap(type, code, regs));
213 }
214
215 int
216 kdb_trap(
217 int type,
218 int code,
219 struct i386_saved_state *regs)
220 {
221 extern char etext;
222 boolean_t trap_from_user;
223 spl_t s = splhigh();
224
225 switch (type) {
226 case T_DEBUG: /* single_step */
227 {
228 extern int dr_addr[];
229 int addr;
230 int status = dr6();
231
232 if (status & 0xf) { /* hmm hdw break */
233 addr = status & 0x8 ? dr_addr[3] :
234 status & 0x4 ? dr_addr[2] :
235 status & 0x2 ? dr_addr[1] :
236 dr_addr[0];
237 regs->efl |= EFL_RF;
238 db_single_step_cmd(addr, 0, 1, "p");
239 }
240 }
241 case T_INT3: /* breakpoint */
242 case T_WATCHPOINT: /* watchpoint */
243 case -1: /* keyboard interrupt */
244 break;
245
246 default:
247 if (db_recover) {
248 i386_nested_saved_state = *regs;
249 db_printf("Caught ");
250 if (type < 0 || type > TRAP_TYPES)
251 db_printf("type %d", type);
252 else
253 db_printf("%s", trap_type[type]);
254 db_printf(" trap, code = %x, pc = %x\n",
255 code, regs->eip);
256 splx(s);
257 db_error("");
258 /*NOTREACHED*/
259 }
260 kdbprinttrap(type, code, (int *)&regs->eip, regs->uesp);
261 }
262
263 disable_preemption();
264
265 current_cpu_datap()->cpu_kdb_saved_ipl = s;
266 current_cpu_datap()->cpu_kdb_saved_state = regs;
267
268 i386_last_saved_statep = regs;
269 i386_last_kdb_sp = (unsigned) &type;
270
271 if (!kdb_enter(regs->eip))
272 goto kdb_exit;
273
274 /* Should switch to kdb's own stack here. */
275
276 if (!IS_USER_TRAP(regs, &etext)) {
277 bzero((char *)&ddb_regs, sizeof (ddb_regs));
278 *(struct i386_saved_state_from_kernel *)&ddb_regs =
279 *(struct i386_saved_state_from_kernel *)regs;
280 trap_from_user = FALSE;
281 }
282 else {
283 ddb_regs = *regs;
284 trap_from_user = TRUE;
285 }
286 if (!trap_from_user) {
287 /*
288 * Kernel mode - esp and ss not saved
289 */
290 ddb_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
291 ddb_regs.ss = KERNEL_DS;
292 }
293
294 db_active++;
295 db_task_trap(type, code, trap_from_user);
296 db_active--;
297
298 regs->eip = ddb_regs.eip;
299 regs->efl = ddb_regs.efl;
300 regs->eax = ddb_regs.eax;
301 regs->ecx = ddb_regs.ecx;
302 regs->edx = ddb_regs.edx;
303 regs->ebx = ddb_regs.ebx;
304 if (trap_from_user) {
305 /*
306 * user mode - saved esp and ss valid
307 */
308 regs->uesp = ddb_regs.uesp; /* user stack pointer */
309 regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
310 }
311 regs->ebp = ddb_regs.ebp;
312 regs->esi = ddb_regs.esi;
313 regs->edi = ddb_regs.edi;
314 regs->es = ddb_regs.es & 0xffff;
315 regs->cs = ddb_regs.cs & 0xffff;
316 regs->ds = ddb_regs.ds & 0xffff;
317 regs->fs = ddb_regs.fs & 0xffff;
318 regs->gs = ddb_regs.gs & 0xffff;
319
320 if ((type == T_INT3) &&
321 (db_get_task_value(regs->eip,
322 BKPT_SIZE,
323 FALSE,
324 db_target_space(current_thread(),
325 trap_from_user))
326 == BKPT_INST))
327 regs->eip += BKPT_SIZE;
328
329 kdb_exit:
330 kdb_leave();
331
332 current_cpu_datap()->cpu_kdb_saved_state = 0;
333
334 #if MACH_MP_DEBUG
335 current_cpu_datap()->cpu_masked_state_cnt = 0;
336 #endif /* MACH_MP_DEBUG */
337
338 enable_preemption();
339
340 splx(s);
341
342 /* Allow continue to upper layers of exception handling if
343 * trap was not a debugging trap.
344 */
345
346 if (trap_from_user && type != T_DEBUG && type != T_INT3
347 && type != T_WATCHPOINT)
348 return 0;
349 else
350 return (1);
351 }
352
353 /*
354 * Enter KDB through a keyboard trap.
355 * We show the registers as of the keyboard interrupt
356 * instead of those at its call to KDB.
357 */
358
359 spl_t kdb_oldspl;
360
361 void
362 kdb_kentry(
363 struct int_regs *int_regs)
364 {
365 extern char etext;
366 boolean_t trap_from_user;
367 struct i386_interrupt_state *is = int_regs->is;
368 struct i386_saved_state regs;
369 spl_t s;
370
371 s = splhigh();
372 kdb_oldspl = s;
373
374 if (IS_USER_TRAP(is, &etext))
375 {
376 regs.uesp = ((int *)(is+1))[0];
377 regs.ss = ((int *)(is+1))[1];
378 }
379 else {
380 regs.ss = KERNEL_DS;
381 regs.uesp= (int)(is+1);
382 }
383 regs.efl = is->efl;
384 regs.cs = is->cs;
385 regs.eip = is->eip;
386 regs.eax = is->eax;
387 regs.ecx = is->ecx;
388 regs.edx = is->edx;
389 regs.ebx = int_regs->ebx;
390 regs.ebp = int_regs->ebp;
391 regs.esi = int_regs->esi;
392 regs.edi = int_regs->edi;
393 regs.ds = is->ds;
394 regs.es = is->es;
395 regs.fs = int_regs->fs;
396 regs.gs = int_regs->gs;
397
398 disable_preemption();
399
400 current_cpu_datap()->cpu_kdb_saved_state = &regs;
401
402 if (!kdb_enter(regs.eip))
403 goto kdb_exit;
404
405 bcopy((char *)&regs, (char *)&ddb_regs, sizeof (ddb_regs));
406 trap_from_user = IS_USER_TRAP(&ddb_regs, &etext);
407
408 db_active++;
409 db_task_trap(-1, 0, trap_from_user);
410 db_active--;
411
412 if (trap_from_user) {
413 ((int *)(is+1))[0] = ddb_regs.uesp;
414 ((int *)(is+1))[1] = ddb_regs.ss & 0xffff;
415 }
416 is->efl = ddb_regs.efl;
417 is->cs = ddb_regs.cs & 0xffff;
418 is->eip = ddb_regs.eip;
419 is->eax = ddb_regs.eax;
420 is->ecx = ddb_regs.ecx;
421 is->edx = ddb_regs.edx;
422 int_regs->ebx = ddb_regs.ebx;
423 int_regs->ebp = ddb_regs.ebp;
424 int_regs->esi = ddb_regs.esi;
425 int_regs->edi = ddb_regs.edi;
426 is->ds = ddb_regs.ds & 0xffff;
427 is->es = ddb_regs.es & 0xffff;
428 int_regs->fs = ddb_regs.fs & 0xffff;
429 int_regs->gs = ddb_regs.gs & 0xffff;
430
431 kdb_exit:
432 kdb_leave();
433 current_cpu_datap()->cpu_kdb_saved_state = 0;
434
435 enable_preemption();
436
437 splx(s);
438 }
439
440 /*
441 * Print trap reason.
442 */
443
444 void
445 kdbprinttrap(
446 int type,
447 int code,
448 int *pc,
449 int sp)
450 {
451 printf("kernel: ");
452 if (type < 0 || type > TRAP_TYPES)
453 db_printf("type %d", type);
454 else
455 db_printf("%s", trap_type[type]);
456 db_printf(" trap, code=%x eip@%x = %x esp=%x\n",
457 code, pc, *(int *)pc, sp);
458 db_run_mode = STEP_CONTINUE;
459 }
460
461 int
462 db_user_to_kernel_address(
463 task_t task,
464 vm_offset_t addr,
465 unsigned *kaddr,
466 int flag)
467 {
468 register pt_entry_t *ptp;
469
470 ptp = pmap_pte(task->map->pmap, addr);
471 if (ptp == PT_ENTRY_NULL || (*ptp & INTEL_PTE_VALID) == 0) {
472 if (flag) {
473 db_printf("\nno memory is assigned to address %08x\n", addr);
474 db_error(0);
475 /* NOTREACHED */
476 }
477 return(-1);
478 }
479
480 src = (vm_offset_t)pte_to_pa(*ptp);
481 *(int *) DMAP1 = INTEL_PTE_VALID | INTEL_PTE_RW | (src & PG_FRAME) |
482 INTEL_PTE_REF | INTEL_PTE_MOD;
483 #if defined(I386_CPU)
484 if (cpu_class == CPUCLASS_386) {
485 invltlb();
486 } else
487 #endif
488 {
489 invlpg((u_int)DADDR1);
490 }
491
492 *kaddr = (unsigned)DADDR1 + (addr & PAGE_MASK);
493
494 return(0);
495 }
496
497 /*
498 * Read bytes from kernel address space for debugger.
499 */
500
501 void
502 db_read_bytes(
503 vm_offset_t addr,
504 int size,
505 char *data,
506 task_t task)
507 {
508 register char *src;
509 register int n;
510 unsigned kern_addr;
511
512 src = (char *)addr;
513 if (task == kernel_task || task == TASK_NULL) {
514 while (--size >= 0) {
515 if (addr++ > VM_MAX_KERNEL_ADDRESS) {
516 db_printf("\nbad address %x\n", addr);
517 db_error(0);
518 /* NOTREACHED */
519 }
520 *data++ = *src++;
521 }
522 return;
523 }
524 while (size > 0) {
525 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
526 return;
527 src = (char *)kern_addr;
528 n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
529 if (n > size)
530 n = size;
531 size -= n;
532 addr += n;
533 while (--n >= 0)
534 *data++ = *src++;
535 }
536 }
537
538 /*
539 * Write bytes to kernel address space for debugger.
540 */
541
542 void
543 db_write_bytes(
544 vm_offset_t addr,
545 int size,
546 char *data,
547 task_t task)
548 {
549 register char *dst;
550
551 register pt_entry_t *ptep0 = 0;
552 pt_entry_t oldmap0 = 0;
553 vm_offset_t addr1;
554 register pt_entry_t *ptep1 = 0;
555 pt_entry_t oldmap1 = 0;
556 extern char etext;
557
558 if (task && task != kernel_task) {
559 db_write_bytes_user_space(addr, size, data, task);
560 return;
561 }
562
563
564 if (addr >= VM_MIN_KERNEL_LOADED_ADDRESS) {
565 db_write_bytes_user_space(addr, size, data, kernel_task);
566 return;
567 }
568
569 if (addr >= VM_MIN_KERNEL_ADDRESS &&
570 addr <= (vm_offset_t)&etext)
571 {
572 ptep0 = pmap_pte(kernel_pmap, addr);
573 oldmap0 = *ptep0;
574 *ptep0 |= INTEL_PTE_WRITE;
575
576 addr1 = i386_trunc_page(addr + size - 1);
577 if (i386_trunc_page(addr) != addr1) {
578 /* data crosses a page boundary */
579
580 ptep1 = pmap_pte(kernel_pmap, addr1);
581 oldmap1 = *ptep1;
582 *ptep1 |= INTEL_PTE_WRITE;
583 }
584 flush_tlb();
585 }
586
587 dst = (char *)addr;
588
589 while (--size >= 0) {
590 if (addr++ > VM_MAX_KERNEL_ADDRESS) {
591 db_printf("\nbad address %x\n", addr);
592 db_error(0);
593 /* NOTREACHED */
594 }
595 *dst++ = *data++;
596 }
597
598 if (ptep0) {
599 *ptep0 = oldmap0;
600 if (ptep1) {
601 *ptep1 = oldmap1;
602 }
603 flush_tlb();
604 }
605 }
606
607 void
608 db_write_bytes_user_space(
609 vm_offset_t addr,
610 int size,
611 char *data,
612 task_t task)
613 {
614 register char *dst;
615 register int n;
616 unsigned kern_addr;
617
618 while (size > 0) {
619 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
620 return;
621 dst = (char *)kern_addr;
622 n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
623 if (n > size)
624 n = size;
625 size -= n;
626 addr += n;
627 while (--n >= 0)
628 *dst++ = *data++;
629 }
630 }
631
632 boolean_t
633 db_check_access(
634 vm_offset_t addr,
635 int size,
636 task_t task)
637 {
638 register n;
639 unsigned kern_addr;
640
641 if (task == kernel_task || task == TASK_NULL) {
642 if (kernel_task == TASK_NULL)
643 return(TRUE);
644 task = kernel_task;
645 } else if (task == TASK_NULL) {
646 if (current_thread() == THREAD_NULL)
647 return(FALSE);
648 task = current_thread()->task;
649 }
650 while (size > 0) {
651 if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0)
652 return(FALSE);
653 n = intel_trunc_page(addr+INTEL_PGBYTES) - addr;
654 if (n > size)
655 n = size;
656 size -= n;
657 addr += n;
658 }
659 return(TRUE);
660 }
661
662 boolean_t
663 db_phys_eq(
664 task_t task1,
665 vm_offset_t addr1,
666 task_t task2,
667 vm_offset_t addr2)
668 {
669 unsigned kern_addr1, kern_addr2;
670
671 if ((addr1 & (INTEL_PGBYTES-1)) != (addr2 & (INTEL_PGBYTES-1)))
672 return(FALSE);
673 if (task1 == TASK_NULL) {
674 if (current_thread() == THREAD_NULL)
675 return(FALSE);
676 task1 = current_thread()->task;
677 }
678 if (db_user_to_kernel_address(task1, addr1, &kern_addr1, 0) < 0 ||
679 db_user_to_kernel_address(task2, addr2, &kern_addr2, 0) < 0)
680 return(FALSE);
681 return(kern_addr1 == kern_addr2);
682 }
683
684 #define DB_USER_STACK_ADDR (VM_MIN_KERNEL_ADDRESS)
685 #define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(INTEL_PGBYTES*3))
686
687 int
688 db_search_null(
689 task_t task,
690 unsigned *svaddr,
691 unsigned evaddr,
692 unsigned *skaddr,
693 int flag)
694 {
695 register unsigned vaddr;
696 register unsigned *kaddr;
697
698 kaddr = (unsigned *)*skaddr;
699 for (vaddr = *svaddr; vaddr > evaddr; vaddr -= sizeof(unsigned)) {
700 if (vaddr % INTEL_PGBYTES == 0) {
701 vaddr -= sizeof(unsigned);
702 if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0)
703 return(-1);
704 kaddr = (unsigned *)*skaddr;
705 } else {
706 vaddr -= sizeof(unsigned);
707 kaddr--;
708 }
709 if ((*kaddr == 0) ^ (flag == 0)) {
710 *svaddr = vaddr;
711 *skaddr = (unsigned)kaddr;
712 return(0);
713 }
714 }
715 return(-1);
716 }
717
718 void
719 db_task_name(
720 task_t task)
721 {
722 register char *p;
723 register n;
724 unsigned vaddr, kaddr;
725
726 vaddr = DB_USER_STACK_ADDR;
727 kaddr = 0;
728
729 /*
730 * skip nulls at the end
731 */
732 if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 0) < 0) {
733 db_printf(DB_NULL_TASK_NAME);
734 return;
735 }
736 /*
737 * search start of args
738 */
739 if (db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT, &kaddr, 1) < 0) {
740 db_printf(DB_NULL_TASK_NAME);
741 return;
742 }
743
744 n = DB_TASK_NAME_LEN-1;
745 p = (char *)kaddr + sizeof(unsigned);
746 for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0;
747 vaddr++, p++, n--) {
748 if (vaddr % INTEL_PGBYTES == 0) {
749 (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
750 p = (char*)kaddr;
751 }
752 db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p);
753 }
754 while (n-- >= 0) /* compare with >= 0 for one more space */
755 db_printf(" ");
756 }
757
758 /*
759 * Code used to synchronize kdb among all cpus, one active at a time, switch
760 * from on to another using kdb_on! #cpu or cpu #cpu
761 */
762
763 decl_simple_lock_data(, kdb_lock) /* kdb lock */
764
765 #define db_simple_lock_init(l, e) hw_lock_init(&((l)->interlock))
766 #define db_simple_lock_try(l) hw_lock_try(&((l)->interlock))
767 #define db_simple_unlock(l) hw_lock_unlock(&((l)->interlock))
768
769 int kdb_cpu = -1; /* current cpu running kdb */
770 int kdb_debug = 0;
771 volatile unsigned int cpus_holding_bkpts; /* counter for number of cpus holding
772 breakpoints (ie: cpus that did not
773 insert back breakpoints) */
774 extern boolean_t db_breakpoints_inserted;
775
776 void
777 db_machdep_init(void)
778 {
779 int c;
780
781 db_simple_lock_init(&kdb_lock, 0);
782 for (c = 0; c < real_ncpus; ++c) {
783 db_stacks[c] = (vm_offset_t) (db_stack_store +
784 (INTSTACK_SIZE * (c + 1)) - sizeof (natural_t));
785 if (c == master_cpu) {
786 dbtss.esp0 = (int)(db_task_stack_store +
787 (INTSTACK_SIZE * (c + 1)) - sizeof (natural_t));
788 dbtss.esp = dbtss.esp0;
789 dbtss.eip = (int)&db_task_start;
790 /*
791 * The TSS for the debugging task on each slave CPU
792 * is set up in mp_desc_init().
793 */
794 }
795 }
796 }
797
798 /*
799 * Called when entering kdb:
800 * Takes kdb lock. If if we were called remotely (slave state) we just
801 * wait for kdb_cpu to be equal to cpu_number(). Otherwise enter kdb if
802 * not active on another cpu.
803 * If db_pass_thru[cpu_number()] > 0, then kdb can't stop now.
804 */
805
806 int
807 kdb_enter(int pc)
808 {
809 int mycpu;
810 int retval;
811
812 disable_preemption();
813
814 mycpu = cpu_number();
815
816 if (current_cpu_datap()->cpu_db_pass_thru) {
817 retval = 0;
818 goto kdb_exit;
819 }
820
821 current_cpu_datap()->cpu_kdb_active++;
822 lock_kdb();
823
824 if (kdb_debug)
825 db_printf("kdb_enter: cpu %d, is_slave %d, kdb_cpu %d, run mode %d pc %x (%x) holds %d\n",
826 my_cpu, current_cpu_datap()->cpu_kdb_is_slave, kdb_cpu,
827 db_run_mode, pc, *(int *)pc, cpus_holding_bkpts);
828 if (db_breakpoints_inserted)
829 cpus_holding_bkpts++;
830 if (kdb_cpu == -1 && !current_cpu_datap()->cpu_kdb_is_slave) {
831 kdb_cpu = my_cpu;
832 remote_kdb(); /* stop other cpus */
833 retval = 1;
834 } else if (kdb_cpu == my_cpu)
835 retval = 1;
836 else
837 retval = 0;
838
839 kdb_exit:
840 enable_preemption();
841
842 return (retval);
843 }
844
845 void
846 kdb_leave(void)
847 {
848 int my_cpu;
849 boolean_t wait = FALSE;
850
851 disable_preemption();
852
853 my_cpu = cpu_number();
854
855 if (db_run_mode == STEP_CONTINUE) {
856 wait = TRUE;
857 kdb_cpu = -1;
858 }
859 if (db_breakpoints_inserted)
860 cpus_holding_bkpts--;
861 if (current_cpu_datap()->cpu_kdb_is_slave)
862 current_cpu_datap()->cpu_kdb_is_slave--;
863 if (kdb_debug)
864 db_printf("kdb_leave: cpu %d, kdb_cpu %d, run_mode %d pc %x (%x) holds %d\n",
865 my_cpu, kdb_cpu, db_run_mode,
866 ddb_regs.eip, *(int *)ddb_regs.eip,
867 cpus_holding_bkpts);
868 clear_kdb_intr();
869 unlock_kdb();
870 current_cpu_datap()->cpu_kdb_active--;
871
872 enable_preemption();
873
874 if (wait) {
875 while(cpus_holding_bkpts);
876 }
877 }
878
879 void
880 lock_kdb(void)
881 {
882 int my_cpu;
883 register i;
884 extern void kdb_console(void);
885
886 disable_preemption();
887
888 my_cpu = cpu_number();
889
890 for(;;) {
891 kdb_console();
892 if (kdb_cpu != -1 && kdb_cpu != my_cpu) {
893 continue;
894 }
895 if (db_simple_lock_try(&kdb_lock)) {
896 if (kdb_cpu == -1 || kdb_cpu == my_cpu)
897 break;
898 db_simple_unlock(&kdb_lock);
899 }
900 }
901
902 enable_preemption();
903 }
904
905 #if TIME_STAMP
906 extern unsigned old_time_stamp;
907 #endif /* TIME_STAMP */
908
909 void
910 unlock_kdb(void)
911 {
912 db_simple_unlock(&kdb_lock);
913 #if TIME_STAMP
914 old_time_stamp = 0;
915 #endif /* TIME_STAMP */
916 }
917
918
919 #ifdef __STDC__
920 #define KDB_SAVE(type, name) extern type name; type name##_save = name
921 #define KDB_RESTORE(name) name = name##_save
922 #else /* __STDC__ */
923 #define KDB_SAVE(type, name) extern type name; type name/**/_save = name
924 #define KDB_RESTORE(name) name = name/**/_save
925 #endif /* __STDC__ */
926
927 #define KDB_SAVE_CTXT() \
928 KDB_SAVE(int, db_run_mode); \
929 KDB_SAVE(boolean_t, db_sstep_print); \
930 KDB_SAVE(int, db_loop_count); \
931 KDB_SAVE(int, db_call_depth); \
932 KDB_SAVE(int, db_inst_count); \
933 KDB_SAVE(int, db_last_inst_count); \
934 KDB_SAVE(int, db_load_count); \
935 KDB_SAVE(int, db_store_count); \
936 KDB_SAVE(boolean_t, db_cmd_loop_done); \
937 KDB_SAVE(jmp_buf_t *, db_recover); \
938 KDB_SAVE(db_addr_t, db_dot); \
939 KDB_SAVE(db_addr_t, db_last_addr); \
940 KDB_SAVE(db_addr_t, db_prev); \
941 KDB_SAVE(db_addr_t, db_next); \
942 KDB_SAVE(db_regs_t, ddb_regs);
943
944 #define KDB_RESTORE_CTXT() \
945 KDB_RESTORE(db_run_mode); \
946 KDB_RESTORE(db_sstep_print); \
947 KDB_RESTORE(db_loop_count); \
948 KDB_RESTORE(db_call_depth); \
949 KDB_RESTORE(db_inst_count); \
950 KDB_RESTORE(db_last_inst_count); \
951 KDB_RESTORE(db_load_count); \
952 KDB_RESTORE(db_store_count); \
953 KDB_RESTORE(db_cmd_loop_done); \
954 KDB_RESTORE(db_recover); \
955 KDB_RESTORE(db_dot); \
956 KDB_RESTORE(db_last_addr); \
957 KDB_RESTORE(db_prev); \
958 KDB_RESTORE(db_next); \
959 KDB_RESTORE(ddb_regs);
960
961 /*
962 * switch to another cpu
963 */
964
965 void
966 kdb_on(
967 int cpu)
968 {
969 KDB_SAVE_CTXT();
970 if (cpu < 0 || cpu >= real_ncpus || !cpu_datap(cpu)->cpu_kdb_active)
971 return;
972 db_set_breakpoints();
973 db_set_watchpoints();
974 kdb_cpu = cpu;
975 unlock_kdb();
976 lock_kdb();
977 db_clear_breakpoints();
978 db_clear_watchpoints();
979 KDB_RESTORE_CTXT();
980 if (kdb_cpu == -1) {/* someone continued */
981 kdb_cpu = cpu_number();
982 db_continue_cmd(0, 0, 0, "");
983 }
984 }
985
986 void db_reboot(
987 db_expr_t addr,
988 boolean_t have_addr,
989 db_expr_t count,
990 char *modif)
991 {
992 boolean_t reboot = TRUE;
993 char *cp, c;
994
995 cp = modif;
996 while ((c = *cp++) != 0) {
997 if (c == 'r') /* reboot */
998 reboot = TRUE;
999 if (c == 'h') /* halt */
1000 reboot = FALSE;
1001 }
1002 halt_all_cpus(reboot);
1003 }