2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
36 #include <kdp/kdp_internal.h>
37 #include <kdp/kdp_callout.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/nlist.h>
40 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41 #include <kern/machine.h> /* for halt_all_cpus */
42 #include <libkern/OSAtomic.h>
44 #include <kern/thread.h>
45 #include <i386/thread.h>
46 #include <vm/vm_map.h>
47 #include <i386/pmap.h>
48 #include <kern/kalloc.h>
50 #define KDP_TEST_HARNESS 0
52 #define dprintf(x) printf x
57 extern cpu_type_t
cpuid_cputype(void);
58 extern cpu_subtype_t
cpuid_cpusubtype(void);
60 void print_saved_state(void *);
63 boolean_t
kdp_call_kdb(void);
64 void kdp_getstate(x86_thread_state64_t
*);
65 void kdp_setstate(x86_thread_state64_t
*);
66 void kdp_print_phys(int);
69 machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
72 machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
);
75 machine_read64(addr64_t srcaddr
, caddr_t dstaddr
, uint32_t len
);
77 static void kdp_callouts(kdp_event_t event
);
83 unsigned short *remote_port
,
84 unsigned int exception
,
89 kdp_exception_t
*rq
= (kdp_exception_t
*)pkt
;
91 rq
->hdr
.request
= KDP_EXCEPTION
;
93 rq
->hdr
.seq
= kdp
.exception_seq
;
95 rq
->hdr
.len
= sizeof (*rq
);
98 rq
->exc_info
[0].cpu
= 0;
99 rq
->exc_info
[0].exception
= exception
;
100 rq
->exc_info
[0].code
= code
;
101 rq
->exc_info
[0].subcode
= subcode
;
103 rq
->hdr
.len
+= rq
->n_exc_info
* sizeof (kdp_exc_info_t
);
105 bcopy((char *)rq
, (char *)pkt
, rq
->hdr
.len
);
107 kdp
.exception_ack_needed
= TRUE
;
109 *remote_port
= kdp
.exception_port
;
119 kdp_exception_ack_t
*rq
= (kdp_exception_ack_t
*)pkt
;
121 if (((unsigned int) len
) < sizeof (*rq
))
124 if (!rq
->hdr
.is_reply
|| rq
->hdr
.request
!= KDP_EXCEPTION
)
127 dprintf(("kdp_exception_ack seq %x %x\n", rq
->hdr
.seq
, kdp
.exception_seq
));
129 if (rq
->hdr
.seq
== kdp
.exception_seq
) {
130 kdp
.exception_ack_needed
= FALSE
;
138 x86_thread_state64_t
*state
141 x86_saved_state64_t
*saved_state
;
143 saved_state
= (x86_saved_state64_t
*)kdp
.saved_state
;
145 state
->rax
= saved_state
->rax
;
146 state
->rbx
= saved_state
->rbx
;
147 state
->rcx
= saved_state
->rcx
;
148 state
->rdx
= saved_state
->rdx
;
149 state
->rdi
= saved_state
->rdi
;
150 state
->rsi
= saved_state
->rsi
;
151 state
->rbp
= saved_state
->rbp
;
153 state
->r8
= saved_state
->r8
;
154 state
->r9
= saved_state
->r9
;
155 state
->r10
= saved_state
->r10
;
156 state
->r11
= saved_state
->r11
;
157 state
->r12
= saved_state
->r12
;
158 state
->r13
= saved_state
->r13
;
159 state
->r14
= saved_state
->r14
;
160 state
->r15
= saved_state
->r15
;
162 state
->rsp
= saved_state
->isf
.rsp
;
163 state
->rflags
= saved_state
->isf
.rflags
;
164 state
->rip
= saved_state
->isf
.rip
;
166 state
->cs
= saved_state
->isf
.cs
;
167 state
->fs
= saved_state
->fs
;
168 state
->gs
= saved_state
->gs
;
174 x86_thread_state64_t
*state
177 x86_saved_state64_t
*saved_state
;
179 saved_state
= (x86_saved_state64_t
*)kdp
.saved_state
;
180 saved_state
->rax
= state
->rax
;
181 saved_state
->rbx
= state
->rbx
;
182 saved_state
->rcx
= state
->rcx
;
183 saved_state
->rdx
= state
->rdx
;
184 saved_state
->rdi
= state
->rdi
;
185 saved_state
->rsi
= state
->rsi
;
186 saved_state
->rbp
= state
->rbp
;
187 saved_state
->r8
= state
->r8
;
188 saved_state
->r9
= state
->r9
;
189 saved_state
->r10
= state
->r10
;
190 saved_state
->r11
= state
->r11
;
191 saved_state
->r12
= state
->r12
;
192 saved_state
->r13
= state
->r13
;
193 saved_state
->r14
= state
->r14
;
194 saved_state
->r15
= state
->r15
;
196 saved_state
->isf
.rflags
= state
->rflags
;
197 saved_state
->isf
.rsp
= state
->rsp
;
198 saved_state
->isf
.rip
= state
->rip
;
200 saved_state
->fs
= (uint32_t)state
->fs
;
201 saved_state
->gs
= (uint32_t)state
->gs
;
206 kdp_machine_read_regs(
207 __unused
unsigned int cpu
,
213 static x86_float_state64_t null_fpstate
;
217 case x86_THREAD_STATE64
:
218 dprintf(("kdp_readregs THREAD_STATE64\n"));
219 kdp_getstate((x86_thread_state64_t
*)data
);
220 *size
= sizeof (x86_thread_state64_t
);
221 return KDPERR_NO_ERROR
;
223 case x86_FLOAT_STATE64
:
224 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
225 *(x86_float_state64_t
*)data
= null_fpstate
;
226 *size
= sizeof (x86_float_state64_t
);
227 return KDPERR_NO_ERROR
;
230 dprintf(("kdp_readregs bad flavor %d\n", flavor
));
232 return KDPERR_BADFLAVOR
;
237 kdp_machine_write_regs(
238 __unused
unsigned int cpu
,
246 case x86_THREAD_STATE64
:
247 dprintf(("kdp_writeregs THREAD_STATE64\n"));
248 kdp_setstate((x86_thread_state64_t
*)data
);
249 return KDPERR_NO_ERROR
;
251 case x86_FLOAT_STATE64
:
252 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
253 return KDPERR_NO_ERROR
;
256 dprintf(("kdp_writeregs bad flavor %d\n", flavor
));
257 return KDPERR_BADFLAVOR
;
264 kdp_machine_hostinfo(
265 kdp_hostinfo_t
*hostinfo
270 hostinfo
->cpus_mask
= 0;
272 for (i
= 0; i
< machine_info
.max_cpus
; i
++) {
273 if (cpu_data_ptr
[i
] == NULL
)
276 hostinfo
->cpus_mask
|= (1 << i
);
279 hostinfo
->cpu_type
= cpuid_cputype() | CPU_ARCH_ABI64
;
280 hostinfo
->cpu_subtype
= cpuid_cpusubtype();
288 kprintf("kdp panic: %s\n", msg
);
289 __asm__
volatile("hlt");
294 kdp_machine_reboot(void)
296 printf("Attempting system restart...");
297 /* Call the platform specific restart*/
299 (*PE_halt_restart
)(kPERestartCPU
);
300 /* If we do reach this, give up */
323 kdp_us_spin(int usec
)
328 void print_saved_state(void *state
)
330 x86_saved_state64_t
*saved_state
;
334 kprintf("pc = 0x%llx\n", saved_state
->isf
.rip
);
335 kprintf("cr2= 0x%llx\n", saved_state
->cr2
);
336 kprintf("rp = TODO FIXME\n");
337 kprintf("sp = %p\n", saved_state
);
344 return; /* No op here. */
350 __asm__
volatile ("int $3"); /* Let the processor do the work */
354 typedef struct _cframe_t
{
355 struct _cframe_t
*prev
;
360 extern pt_entry_t
*DMAP2
;
361 extern caddr_t DADDR2
;
364 kdp_print_phys(int src
)
369 *(int *) DMAP2
= 0x63 | (src
& 0xfffff000);
370 invlpg((uintptr_t) DADDR2
);
371 iptr
= (unsigned int *) DADDR2
;
372 for (i
= 0; i
< 100; i
++) {
373 kprintf("0x%x ", *iptr
++);
385 x86_saved_state64_t
*saved_state
,
386 kern_return_t result
,
390 unsigned int exception
, subcode
= 0, code
;
392 if (trapno
!= T_INT3
&& trapno
!= T_DEBUG
) {
393 kprintf("Debugger: Unexpected kernel trap number: "
394 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
395 trapno
, saved_state
->isf
.rip
, saved_state
->cr2
);
401 kdp_callouts(KDP_EVENT_ENTER
);
403 if (saved_state
->isf
.rflags
& EFL_TF
) {
404 enable_preemption_no_check();
410 exception
= EXC_ARITHMETIC
;
411 code
= EXC_I386_DIVERR
;
415 exception
= EXC_SOFTWARE
;
416 code
= EXC_I386_INTOFLT
;
419 case T_OUT_OF_BOUNDS
:
420 exception
= EXC_ARITHMETIC
;
421 code
= EXC_I386_BOUNDFLT
;
424 case T_INVALID_OPCODE
:
425 exception
= EXC_BAD_INSTRUCTION
;
426 code
= EXC_I386_INVOPFLT
;
429 case T_SEGMENT_NOT_PRESENT
:
430 exception
= EXC_BAD_INSTRUCTION
;
431 code
= EXC_I386_SEGNPFLT
;
432 subcode
= (unsigned int)saved_state
->isf
.err
;
436 exception
= EXC_BAD_INSTRUCTION
;
437 code
= EXC_I386_STKFLT
;
438 subcode
= (unsigned int)saved_state
->isf
.err
;
441 case T_GENERAL_PROTECTION
:
442 exception
= EXC_BAD_INSTRUCTION
;
443 code
= EXC_I386_GPFLT
;
444 subcode
= (unsigned int)saved_state
->isf
.err
;
448 exception
= EXC_BAD_ACCESS
;
450 subcode
= (unsigned int)va
;
454 exception
= EXC_SOFTWARE
;
455 code
= EXC_I386_ALIGNFLT
;
460 exception
= EXC_BREAKPOINT
;
461 code
= EXC_I386_BPTFLT
;
465 exception
= EXC_BAD_INSTRUCTION
;
470 if (current_cpu_datap()->cpu_fatal_trap_state
) {
471 current_cpu_datap()->cpu_post_fatal_trap_state
= saved_state
;
472 saved_state
= current_cpu_datap()->cpu_fatal_trap_state
;
475 kdp_raise_exception(exception
, code
, subcode
, saved_state
);
476 /* If the instruction single step bit is set, disable kernel preemption
478 if (saved_state
->isf
.rflags
& EFL_TF
) {
479 disable_preemption();
482 kdp_callouts(KDP_EVENT_EXIT
);
496 kdp_machine_get_breakinsn(
505 extern pmap_t kdp_pmap
;
507 #define RETURN_OFFSET 4
510 machine_trace_thread(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
)
512 uint32_t *tracebuf
= (uint32_t *)tracepos
;
514 uint32_t stackptr
= 0;
515 uint32_t stacklimit
= 0xfc000000;
517 uint32_t init_eip
= 0;
519 uint32_t framesize
= 2 * sizeof(vm_offset_t
);
522 x86_saved_state32_t
*iss32
;
524 iss32
= USER_REGS32(thread
);
525 init_eip
= iss32
->eip
;
526 stackptr
= iss32
->ebp
;
528 stacklimit
= 0xffffffff;
529 kdp_pmap
= thread
->task
->map
->pmap
;
532 panic("32-bit trace attempted on 64-bit kernel");
534 *tracebuf
++ = init_eip
;
536 for (framecount
= 0; framecount
< nframes
; framecount
++) {
538 if ((tracebound
- ((char *)tracebuf
)) < (4 * framesize
)) {
543 *tracebuf
++ = stackptr
;
544 /* Invalid frame, or hit fence */
545 if (!stackptr
|| (stackptr
== fence
)) {
549 /* Unaligned frame */
550 if (stackptr
& 0x0000003) {
554 if (stackptr
<= prevsp
) {
558 if (stackptr
> stacklimit
) {
562 if (kdp_machine_vm_read((mach_vm_address_t
)(stackptr
+ RETURN_OFFSET
), (caddr_t
) tracebuf
, sizeof(*tracebuf
)) != sizeof(*tracebuf
)) {
568 if (kdp_machine_vm_read((mach_vm_address_t
)stackptr
, (caddr_t
) &stackptr
, sizeof(stackptr
)) != sizeof(stackptr
)) {
576 return (uint32_t) (((char *) tracebuf
) - tracepos
);
580 #define RETURN_OFFSET64 8
581 /* Routine to encapsulate the 64-bit address read hack*/
583 machine_read64(addr64_t srcaddr
, caddr_t dstaddr
, uint32_t len
)
585 return (unsigned)kdp_machine_vm_read(srcaddr
, dstaddr
, len
);
589 machine_trace_thread64(thread_t thread
, char *tracepos
, char *tracebound
, int nframes
, boolean_t user_p
)
591 uint64_t *tracebuf
= (uint64_t *)tracepos
;
593 addr64_t stackptr
= 0;
595 addr64_t init_rip
= 0;
597 unsigned framesize
= 2 * sizeof(addr64_t
);
600 x86_saved_state64_t
*iss64
;
601 iss64
= USER_REGS64(thread
);
602 init_rip
= iss64
->isf
.rip
;
603 stackptr
= iss64
->rbp
;
604 kdp_pmap
= thread
->task
->map
->pmap
;
607 stackptr
= STACK_IKS(thread
->kernel_stack
)->k_rbp
;
608 init_rip
= STACK_IKS(thread
->kernel_stack
)->k_rip
;
612 *tracebuf
++ = init_rip
;
614 for (framecount
= 0; framecount
< nframes
; framecount
++) {
616 if ((uint32_t)(tracebound
- ((char *)tracebuf
)) < (4 * framesize
)) {
621 *tracebuf
++ = stackptr
;
623 if (!stackptr
|| (stackptr
== fence
)){
627 if (stackptr
& 0x0000003) {
631 if (stackptr
<= prevsp
) {
635 if (machine_read64(stackptr
+ RETURN_OFFSET64
, (caddr_t
) tracebuf
, sizeof(addr64_t
)) != sizeof(addr64_t
)) {
641 if (machine_read64(stackptr
, (caddr_t
) &stackptr
, sizeof(addr64_t
)) != sizeof(addr64_t
)) {
649 return (uint32_t) (((char *) tracebuf
) - tracepos
);
652 static struct kdp_callout
{
653 struct kdp_callout
*callout_next
;
654 kdp_callout_fn_t callout_fn
;
656 } *kdp_callout_list
= NULL
;
660 * Called from kernel context to register a kdp event callout.
663 kdp_register_callout(
667 struct kdp_callout
*kcp
;
668 struct kdp_callout
*list_head
;
670 kcp
= kalloc(sizeof(*kcp
));
672 panic("kdp_register_callout() kalloc failed");
674 kcp
->callout_fn
= fn
;
675 kcp
->callout_arg
= arg
;
677 /* Lock-less list insertion using compare and exchange. */
679 list_head
= kdp_callout_list
;
680 kcp
->callout_next
= list_head
;
681 } while (!OSCompareAndSwapPtr(list_head
, kcp
, (void * volatile *)&kdp_callout_list
));
685 * Called at exception/panic time when extering or exiting kdp.
686 * We are single-threaded at this time and so we don't use locks.
689 kdp_callouts(kdp_event_t event
)
691 struct kdp_callout
*kcp
= kdp_callout_list
;
694 kcp
->callout_fn(kcp
->callout_arg
, event
);
695 kcp
= kcp
->callout_next
;
700 kdp_ml_enter_debugger(void)
702 __asm__
__volatile__("int3");