2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
36 #include <kdp/kdp_internal.h>
37 #include <kdp/kdp_callout.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/nlist.h>
40 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41 #include <kern/machine.h> /* for halt_all_cpus */
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47 #include <kern/kalloc.h>
49 #define KDP_TEST_HARNESS 0
51 #define dprintf(x) printf x
56 extern cpu_type_t
cpuid_cputype(void);
57 extern cpu_subtype_t
cpuid_cpusubtype(void);
59 void print_saved_state(void *);
62 boolean_t
kdp_call_kdb(void);
63 void kdp_getstate(i386_thread_state_t
*);
64 void kdp_setstate(i386_thread_state_t
*);
65 void kdp_print_phys(int);
68 machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
71 machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
74 machine_read64(addr64_t srcaddr
, caddr_t dstaddr
, uint32_t len
);
76 extern unsigned kdp_vm_read(caddr_t src
, caddr_t dst
, unsigned len
);
78 static void kdp_callouts(kdp_event_t event
);
84 unsigned short *remote_port
,
85 unsigned int exception
,
90 kdp_exception_t
*rq
= (kdp_exception_t
*)pkt
;
92 rq
->hdr
.request
= KDP_EXCEPTION
;
94 rq
->hdr
.seq
= kdp
.exception_seq
;
96 rq
->hdr
.len
= sizeof (*rq
);
99 rq
->exc_info
[0].cpu
= 0;
100 rq
->exc_info
[0].exception
= exception
;
101 rq
->exc_info
[0].code
= code
;
102 rq
->exc_info
[0].subcode
= subcode
;
104 rq
->hdr
.len
+= rq
->n_exc_info
* sizeof (kdp_exc_info_t
);
106 bcopy((char *)rq
, (char *)pkt
, rq
->hdr
.len
);
108 kdp
.exception_ack_needed
= TRUE
;
110 *remote_port
= kdp
.exception_port
;
120 kdp_exception_ack_t
*rq
= (kdp_exception_ack_t
*)pkt
;
122 if (((unsigned int) len
) < sizeof (*rq
))
125 if (!rq
->hdr
.is_reply
|| rq
->hdr
.request
!= KDP_EXCEPTION
)
128 dprintf(("kdp_exception_ack seq %x %x\n", rq
->hdr
.seq
, kdp
.exception_seq
));
130 if (rq
->hdr
.seq
== kdp
.exception_seq
) {
131 kdp
.exception_ack_needed
= FALSE
;
139 x86_thread_state32_t
*state
142 static x86_thread_state32_t null_state
;
143 x86_saved_state32_t
*saved_state
;
145 saved_state
= (x86_saved_state32_t
*)kdp
.saved_state
;
148 state
->eax
= saved_state
->eax
;
149 state
->ebx
= saved_state
->ebx
;
150 state
->ecx
= saved_state
->ecx
;
151 state
->edx
= saved_state
->edx
;
152 state
->edi
= saved_state
->edi
;
153 state
->esi
= saved_state
->esi
;
154 state
->ebp
= saved_state
->ebp
;
156 if ((saved_state
->cs
& SEL_PL
) == SEL_PL_K
) { /* Kernel state? */
157 if (cpu_mode_is64bit())
158 state
->esp
= (uint32_t) saved_state
->uesp
;
160 state
->esp
= ((uint32_t)saved_state
) + offsetof(x86_saved_state_t
, ss_32
) + sizeof(x86_saved_state32_t
);
161 state
->ss
= KERNEL_DS
;
163 state
->esp
= saved_state
->uesp
;
164 state
->ss
= saved_state
->ss
;
167 state
->eflags
= saved_state
->efl
;
168 state
->eip
= saved_state
->eip
;
169 state
->cs
= saved_state
->cs
;
170 state
->ds
= saved_state
->ds
;
171 state
->es
= saved_state
->es
;
172 state
->fs
= saved_state
->fs
;
173 state
->gs
= saved_state
->gs
;
179 x86_thread_state32_t
*state
182 x86_saved_state32_t
*saved_state
;
184 saved_state
= (x86_saved_state32_t
*)kdp
.saved_state
;
186 saved_state
->eax
= state
->eax
;
187 saved_state
->ebx
= state
->ebx
;
188 saved_state
->ecx
= state
->ecx
;
189 saved_state
->edx
= state
->edx
;
190 saved_state
->edi
= state
->edi
;
191 saved_state
->esi
= state
->esi
;
192 saved_state
->ebp
= state
->ebp
;
193 saved_state
->efl
= state
->eflags
;
195 saved_state
->frame
.eflags
&= ~( EFL_VM
| EFL_NT
| EFL_IOPL
| EFL_CLR
);
196 saved_state
->frame
.eflags
|= ( EFL_IF
| EFL_SET
);
198 saved_state
->eip
= state
->eip
;
203 kdp_machine_read_regs(
204 __unused
unsigned int cpu
,
205 __unused
unsigned int flavor
,
210 static x86_float_state32_t null_fpstate
;
214 case x86_THREAD_STATE32
:
215 dprintf(("kdp_readregs THREAD_STATE\n"));
216 kdp_getstate((x86_thread_state32_t
*)data
);
217 *size
= sizeof (x86_thread_state32_t
);
218 return KDPERR_NO_ERROR
;
220 case x86_FLOAT_STATE32
:
221 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
222 *(x86_float_state32_t
*)data
= null_fpstate
;
223 *size
= sizeof (x86_float_state32_t
);
224 return KDPERR_NO_ERROR
;
227 dprintf(("kdp_readregs bad flavor %d\n", flavor
));
229 return KDPERR_BADFLAVOR
;
234 kdp_machine_write_regs(
235 __unused
unsigned int cpu
,
243 case x86_THREAD_STATE32
:
244 dprintf(("kdp_writeregs THREAD_STATE\n"));
245 kdp_setstate((x86_thread_state32_t
*)data
);
246 return KDPERR_NO_ERROR
;
248 case x86_FLOAT_STATE32
:
249 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
250 return KDPERR_NO_ERROR
;
253 dprintf(("kdp_writeregs bad flavor %d\n"));
254 return KDPERR_BADFLAVOR
;
261 kdp_machine_hostinfo(
262 kdp_hostinfo_t
*hostinfo
267 hostinfo
->cpus_mask
= 0;
269 for (i
= 0; i
< machine_info
.max_cpus
; i
++) {
270 if (cpu_data_ptr
[i
] == NULL
)
273 hostinfo
->cpus_mask
|= (1 << i
);
276 hostinfo
->cpu_type
= cpuid_cputype();
277 hostinfo
->cpu_subtype
= cpuid_cpusubtype();
282 #if CONFIG_NO_KPRINTF_STRINGS
283 __unused
const char *msg
289 kprintf("kdp panic: %s\n", msg
);
290 __asm__
volatile("hlt");
297 printf("Attempting system restart...");
298 /* Call the platform specific restart*/
300 (*PE_halt_restart
)(kPERestartCPU
);
301 /* If we do reach this, give up */
324 kdp_us_spin(int usec
)
329 void print_saved_state(void *state
)
331 x86_saved_state32_t
*saved_state
;
335 kprintf("pc = 0x%x\n", saved_state
->eip
);
336 kprintf("cr2= 0x%x\n", saved_state
->cr2
);
337 kprintf("rp = TODO FIXME\n");
338 kprintf("sp = %p\n", saved_state
);
345 return; /* No op here. */
351 __asm__
volatile ("int $3"); /* Let the processor do the work */
355 typedef struct _cframe_t
{
356 struct _cframe_t
*prev
;
361 #include <i386/pmap.h>
362 extern pt_entry_t
*DMAP2
;
363 extern caddr_t DADDR2
;
366 kdp_print_phys(int src
)
371 *(int *) DMAP2
= 0x63 | (src
& 0xfffff000);
372 invlpg((u_int
) DADDR2
);
373 iptr
= (unsigned int *) DADDR2
;
374 for (i
= 0; i
< 100; i
++) {
375 kprintf("0x%x ", *iptr
++);
387 x86_saved_state32_t
*saved_state
,
388 kern_return_t result
,
392 unsigned int exception
, subcode
= 0, code
;
394 if (trapno
!= T_INT3
&& trapno
!= T_DEBUG
) {
395 kprintf("Debugger: Unexpected kernel trap number: "
396 "0x%x, EIP: 0x%x, CR2: 0x%x\n",
397 trapno
, saved_state
->eip
, saved_state
->cr2
);
403 kdp_callouts(KDP_EVENT_ENTER
);
405 if (saved_state
->efl
& EFL_TF
) {
406 enable_preemption_no_check();
412 exception
= EXC_ARITHMETIC
;
413 code
= EXC_I386_DIVERR
;
417 exception
= EXC_SOFTWARE
;
418 code
= EXC_I386_INTOFLT
;
421 case T_OUT_OF_BOUNDS
:
422 exception
= EXC_ARITHMETIC
;
423 code
= EXC_I386_BOUNDFLT
;
426 case T_INVALID_OPCODE
:
427 exception
= EXC_BAD_INSTRUCTION
;
428 code
= EXC_I386_INVOPFLT
;
431 case T_SEGMENT_NOT_PRESENT
:
432 exception
= EXC_BAD_INSTRUCTION
;
433 code
= EXC_I386_SEGNPFLT
;
434 subcode
= saved_state
->err
;
438 exception
= EXC_BAD_INSTRUCTION
;
439 code
= EXC_I386_STKFLT
;
440 subcode
= saved_state
->err
;
443 case T_GENERAL_PROTECTION
:
444 exception
= EXC_BAD_INSTRUCTION
;
445 code
= EXC_I386_GPFLT
;
446 subcode
= saved_state
->err
;
450 exception
= EXC_BAD_ACCESS
;
456 exception
= EXC_SOFTWARE
;
457 code
= EXC_I386_ALIGNFLT
;
462 exception
= EXC_BREAKPOINT
;
463 code
= EXC_I386_BPTFLT
;
467 exception
= EXC_BAD_INSTRUCTION
;
472 kdp_raise_exception(exception
, code
, subcode
, saved_state
);
473 /* If the instruction single step bit is set, disable kernel preemption
475 if (saved_state
->efl
& EFL_TF
) {
476 disable_preemption();
479 kdp_callouts(KDP_EVENT_EXIT
);
493 kdp_ml_get_breakinsn(void)
498 extern pmap_t kdp_pmap
;
499 extern uint32_t kdp_src_high32
;
501 #define RETURN_OFFSET 4
503 machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
)
505 uint32_t *tracebuf
= (uint32_t *)tracepos
;
507 uint32_t stackptr
= 0;
508 uint32_t stacklimit
= 0xfc000000;
510 uint32_t init_eip
= 0;
512 uint32_t framesize
= 2 * sizeof(vm_offset_t
);
515 x86_saved_state32_t
*iss32
;
517 iss32
= USER_REGS32(thread
);
519 init_eip
= iss32
->eip
;
520 stackptr
= iss32
->ebp
;
522 /* This bound isn't useful, but it doesn't hinder us*/
523 stacklimit
= 0xffffffff;
524 kdp_pmap
= thread
->task
->map
->pmap
;
527 /*Examine the i386_saved_state at the base of the kernel stack*/
528 stackptr
= STACK_IKS(thread
->kernel_stack
)->k_ebp
;
529 init_eip
= STACK_IKS(thread
->kernel_stack
)->k_eip
;
532 *tracebuf
++ = init_eip
;
534 for (framecount
= 0; framecount
< nframes
; framecount
++) {
536 if ((uint32_t)(tracebound
- ((char *)tracebuf
)) < (4 * framesize
)) {
541 *tracebuf
++ = stackptr
;
542 /* Invalid frame, or hit fence */
543 if (!stackptr
|| (stackptr
== fence
)) {
546 /* Stack grows downward */
547 if (stackptr
< prevsp
) {
550 /* Unaligned frame */
551 if (stackptr
& 0x0000003) {
554 if (stackptr
> stacklimit
) {
558 if (kdp_vm_read((caddr_t
) (stackptr
+ RETURN_OFFSET
), (caddr_t
) tracebuf
, sizeof(caddr_t
)) != sizeof(caddr_t
)) {
564 if (kdp_vm_read((caddr_t
) stackptr
, (caddr_t
) &stackptr
, sizeof(caddr_t
)) != sizeof(caddr_t
)) {
572 return (uint32_t) (((char *) tracebuf
) - tracepos
);
575 #define RETURN_OFFSET64 8
576 /* Routine to encapsulate the 64-bit address read hack*/
578 machine_read64(addr64_t srcaddr
, caddr_t dstaddr
, uint32_t len
)
580 uint32_t kdp_vm_read_low32
;
583 kdp_src_high32
= srcaddr
>> 32;
584 kdp_vm_read_low32
= srcaddr
& 0x00000000FFFFFFFFUL
;
585 retval
= kdp_vm_read((caddr_t
)kdp_vm_read_low32
, dstaddr
, len
);
591 machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
)
593 uint64_t *tracebuf
= (uint64_t *)tracepos
;
595 addr64_t stackptr
= 0;
596 uint64_t stacklimit
= 0xfc000000;
598 addr64_t init_rip
= 0;
600 unsigned framesize
= 2 * sizeof(addr64_t
);
603 x86_saved_state64_t
*iss64
;
604 iss64
= USER_REGS64(thread
);
605 init_rip
= iss64
->isf
.rip
;
606 stackptr
= iss64
->rbp
;
607 stacklimit
= 0xffffffffffffffffULL
;
608 kdp_pmap
= thread
->task
->map
->pmap
;
611 /* DRK: This would need to adapt for a 64-bit kernel, if any */
612 stackptr
= STACK_IKS(thread
->kernel_stack
)->k_ebp
;
613 init_rip
= STACK_IKS(thread
->kernel_stack
)->k_eip
;
616 *tracebuf
++ = init_rip
;
618 for (framecount
= 0; framecount
< nframes
; framecount
++) {
620 if ((uint32_t)(tracebound
- ((char *)tracebuf
)) < (4 * framesize
)) {
625 *tracebuf
++ = stackptr
;
627 if (!stackptr
|| (stackptr
== fence
)){
630 if (stackptr
< prevsp
) {
633 if (stackptr
& 0x0000003) {
636 if (stackptr
> stacklimit
) {
640 if (machine_read64(stackptr
+ RETURN_OFFSET64
, (caddr_t
) tracebuf
, sizeof(addr64_t
)) != sizeof(addr64_t
)) {
646 if (machine_read64(stackptr
, (caddr_t
) &stackptr
, sizeof(addr64_t
)) != sizeof(addr64_t
)) {
654 return (uint32_t) (((char *) tracebuf
) - tracepos
);
657 static struct kdp_callout
{
658 struct kdp_callout
*callout_next
;
659 kdp_callout_fn_t callout_fn
;
661 } *kdp_callout_list
= NULL
;
665 * Called from kernel context to register a kdp event callout.
668 kdp_register_callout(
672 struct kdp_callout
*kcp
;
673 struct kdp_callout
*list_head
;
675 kcp
= kalloc(sizeof(*kcp
));
677 panic("kdp_register_callout() kalloc failed");
679 kcp
->callout_fn
= fn
;
680 kcp
->callout_arg
= arg
;
682 /* Lock-less list insertion using compare and exchange. */
684 list_head
= kdp_callout_list
;
685 kcp
->callout_next
= list_head
;
686 } while(!atomic_cmpxchg((uint32_t *) &kdp_callout_list
,
687 (uint32_t) list_head
,
692 * Called at exception/panic time when extering or exiting kdp.
693 * We are single-threaded at this time and so we don't use locks.
696 kdp_callouts(kdp_event_t event
)
698 struct kdp_callout
*kcp
= kdp_callout_list
;
701 kcp
->callout_fn(kcp
->callout_arg
, event
);
702 kcp
= kcp
->callout_next
;
707 kdp_ml_enter_debugger(void)
709 __asm__
__volatile__("int3");