2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
36 #include <kdp/kdp_internal.h>
37 #include <mach-o/loader.h>
38 #include <mach-o/nlist.h>
39 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
40 #include <kern/machine.h> /* for halt_all_cpus */
41 #include <libkern/OSAtomic.h>
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47 #include <kern/kalloc.h>
49 #define KDP_TEST_HARNESS 0
51 #define dprintf(x) printf x
56 extern cpu_type_t
cpuid_cputype(void);
57 extern cpu_subtype_t
cpuid_cpusubtype(void);
59 extern vm_offset_t
machine_trace_thread_get_kva(vm_offset_t cur_target_addr
, vm_map_t map
, uint32_t *thread_trace_flags
);
60 extern void machine_trace_thread_clear_validation_cache(void);
61 extern vm_map_t kernel_map
;
63 void print_saved_state(void *);
66 void kdp_getstate(x86_thread_state64_t
*);
67 void kdp_setstate(x86_thread_state64_t
*);
68 unsigned machine_read64(addr64_t srcaddr
, caddr_t dstaddr
, uint32_t len
);
74 unsigned short *remote_port
,
75 unsigned int exception
,
80 kdp_exception_t
*rq
= (kdp_exception_t
*)pkt
;
82 rq
->hdr
.request
= KDP_EXCEPTION
;
84 rq
->hdr
.seq
= kdp
.exception_seq
;
86 rq
->hdr
.len
= sizeof(*rq
);
89 rq
->exc_info
[0].cpu
= 0;
90 rq
->exc_info
[0].exception
= exception
;
91 rq
->exc_info
[0].code
= code
;
92 rq
->exc_info
[0].subcode
= subcode
;
94 rq
->hdr
.len
+= rq
->n_exc_info
* sizeof(kdp_exc_info_t
);
96 bcopy((char *)rq
, (char *)pkt
, rq
->hdr
.len
);
98 kdp
.exception_ack_needed
= TRUE
;
100 *remote_port
= kdp
.exception_port
;
110 kdp_exception_ack_t
*rq
= (kdp_exception_ack_t
*)pkt
;
112 if (((unsigned int) len
) < sizeof(*rq
)) {
116 if (!rq
->hdr
.is_reply
|| rq
->hdr
.request
!= KDP_EXCEPTION
) {
120 dprintf(("kdp_exception_ack seq %x %x\n", rq
->hdr
.seq
, kdp
.exception_seq
));
122 if (rq
->hdr
.seq
== kdp
.exception_seq
) {
123 kdp
.exception_ack_needed
= FALSE
;
131 x86_thread_state64_t
*state
134 x86_saved_state64_t
*saved_state
;
136 saved_state
= (x86_saved_state64_t
*)kdp
.saved_state
;
138 state
->rax
= saved_state
->rax
;
139 state
->rbx
= saved_state
->rbx
;
140 state
->rcx
= saved_state
->rcx
;
141 state
->rdx
= saved_state
->rdx
;
142 state
->rdi
= saved_state
->rdi
;
143 state
->rsi
= saved_state
->rsi
;
144 state
->rbp
= saved_state
->rbp
;
146 state
->r8
= saved_state
->r8
;
147 state
->r9
= saved_state
->r9
;
148 state
->r10
= saved_state
->r10
;
149 state
->r11
= saved_state
->r11
;
150 state
->r12
= saved_state
->r12
;
151 state
->r13
= saved_state
->r13
;
152 state
->r14
= saved_state
->r14
;
153 state
->r15
= saved_state
->r15
;
155 state
->rsp
= saved_state
->isf
.rsp
;
156 state
->rflags
= saved_state
->isf
.rflags
;
157 state
->rip
= saved_state
->isf
.rip
;
159 state
->cs
= saved_state
->isf
.cs
;
160 state
->fs
= saved_state
->fs
;
161 state
->gs
= saved_state
->gs
;
167 x86_thread_state64_t
*state
170 x86_saved_state64_t
*saved_state
;
172 saved_state
= (x86_saved_state64_t
*)kdp
.saved_state
;
173 saved_state
->rax
= state
->rax
;
174 saved_state
->rbx
= state
->rbx
;
175 saved_state
->rcx
= state
->rcx
;
176 saved_state
->rdx
= state
->rdx
;
177 saved_state
->rdi
= state
->rdi
;
178 saved_state
->rsi
= state
->rsi
;
179 saved_state
->rbp
= state
->rbp
;
180 saved_state
->r8
= state
->r8
;
181 saved_state
->r9
= state
->r9
;
182 saved_state
->r10
= state
->r10
;
183 saved_state
->r11
= state
->r11
;
184 saved_state
->r12
= state
->r12
;
185 saved_state
->r13
= state
->r13
;
186 saved_state
->r14
= state
->r14
;
187 saved_state
->r15
= state
->r15
;
189 saved_state
->isf
.rflags
= state
->rflags
;
190 saved_state
->isf
.rsp
= state
->rsp
;
191 saved_state
->isf
.rip
= state
->rip
;
193 saved_state
->fs
= (uint32_t)state
->fs
;
194 saved_state
->gs
= (uint32_t)state
->gs
;
199 kdp_machine_read_regs(
200 __unused
unsigned int cpu
,
206 static x86_float_state64_t null_fpstate
;
209 case x86_THREAD_STATE64
:
210 dprintf(("kdp_readregs THREAD_STATE64\n"));
211 kdp_getstate((x86_thread_state64_t
*)data
);
212 *size
= sizeof(x86_thread_state64_t
);
213 return KDPERR_NO_ERROR
;
215 case x86_FLOAT_STATE64
:
216 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
217 *(x86_float_state64_t
*)data
= null_fpstate
;
218 *size
= sizeof(x86_float_state64_t
);
219 return KDPERR_NO_ERROR
;
222 dprintf(("kdp_readregs bad flavor %d\n", flavor
));
224 return KDPERR_BADFLAVOR
;
229 kdp_machine_write_regs(
230 __unused
unsigned int cpu
,
237 case x86_THREAD_STATE64
:
238 dprintf(("kdp_writeregs THREAD_STATE64\n"));
239 kdp_setstate((x86_thread_state64_t
*)data
);
240 return KDPERR_NO_ERROR
;
242 case x86_FLOAT_STATE64
:
243 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
244 return KDPERR_NO_ERROR
;
247 dprintf(("kdp_writeregs bad flavor %d\n", flavor
));
248 return KDPERR_BADFLAVOR
;
255 kdp_machine_hostinfo(
256 kdp_hostinfo_t
*hostinfo
261 hostinfo
->cpus_mask
= 0;
263 for (i
= 0; i
< machine_info
.max_cpus
; i
++) {
264 if (cpu_data_ptr
[i
] == NULL
) {
268 hostinfo
->cpus_mask
|= (1 << i
);
271 hostinfo
->cpu_type
= cpuid_cputype() | CPU_ARCH_ABI64
;
272 hostinfo
->cpu_subtype
= cpuid_cpusubtype();
280 kprintf("kdp panic: %s\n", msg
);
281 __asm__
volatile ("hlt");
303 kdp_us_spin(int usec
)
309 print_saved_state(void *state
)
311 x86_saved_state64_t
*saved_state
;
315 kprintf("pc = 0x%llx\n", saved_state
->isf
.rip
);
316 kprintf("cr2= 0x%llx\n", saved_state
->cr2
);
317 kprintf("rp = TODO FIXME\n");
318 kprintf("sp = %p\n", saved_state
);
324 return; /* No op here. */
330 __asm__
volatile ("int $3"); /* Let the processor do the work */
334 typedef struct _cframe_t
{
335 struct _cframe_t
*prev
;
343 x86_saved_state64_t
*saved_state
,
344 kern_return_t result
,
348 unsigned int exception
, code
, subcode
= 0;
349 boolean_t prev_interrupts_state
;
351 if (trapno
!= T_INT3
&& trapno
!= T_DEBUG
) {
352 kprintf("Debugger: Unexpected kernel trap number: "
353 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
354 trapno
, saved_state
->isf
.rip
, saved_state
->cr2
);
360 prev_interrupts_state
= ml_set_interrupts_enabled(FALSE
);
361 disable_preemption();
363 if (saved_state
->isf
.rflags
& EFL_TF
) {
364 enable_preemption_no_check();
369 exception
= EXC_ARITHMETIC
;
370 code
= EXC_I386_DIVERR
;
374 exception
= EXC_SOFTWARE
;
375 code
= EXC_I386_INTOFLT
;
378 case T_OUT_OF_BOUNDS
:
379 exception
= EXC_ARITHMETIC
;
380 code
= EXC_I386_BOUNDFLT
;
383 case T_INVALID_OPCODE
:
384 exception
= EXC_BAD_INSTRUCTION
;
385 code
= EXC_I386_INVOPFLT
;
388 case T_SEGMENT_NOT_PRESENT
:
389 exception
= EXC_BAD_INSTRUCTION
;
390 code
= EXC_I386_SEGNPFLT
;
391 subcode
= (unsigned int)saved_state
->isf
.err
;
395 exception
= EXC_BAD_INSTRUCTION
;
396 code
= EXC_I386_STKFLT
;
397 subcode
= (unsigned int)saved_state
->isf
.err
;
400 case T_GENERAL_PROTECTION
:
401 exception
= EXC_BAD_INSTRUCTION
;
402 code
= EXC_I386_GPFLT
;
403 subcode
= (unsigned int)saved_state
->isf
.err
;
407 exception
= EXC_BAD_ACCESS
;
409 subcode
= (unsigned int)va
;
413 exception
= EXC_SOFTWARE
;
414 code
= EXC_I386_ALIGNFLT
;
419 exception
= EXC_BREAKPOINT
;
420 code
= EXC_I386_BPTFLT
;
424 exception
= EXC_BAD_INSTRUCTION
;
429 if (current_cpu_datap()->cpu_fatal_trap_state
) {
430 current_cpu_datap()->cpu_post_fatal_trap_state
= saved_state
;
431 saved_state
= current_cpu_datap()->cpu_fatal_trap_state
;
434 handle_debugger_trap(exception
, code
, subcode
, saved_state
);
437 ml_set_interrupts_enabled(prev_interrupts_state
);
439 /* If the instruction single step bit is set, disable kernel preemption
441 if (saved_state
->isf
.rflags
& EFL_TF
) {
442 disable_preemption();
449 kdp_machine_get_breakinsn(
458 #define RETURN_OFFSET 4
461 machine_trace_thread(thread_t thread
,
467 uint32_t * thread_trace_flags
)
469 uint32_t * tracebuf
= (uint32_t *)tracepos
;
470 uint32_t framesize
= (trace_fp
? 2 : 1) * sizeof(uint32_t);
473 uint32_t stackptr
= 0;
474 uint32_t stacklimit
= 0xfc000000;
476 uint32_t prev_eip
= 0;
478 vm_offset_t kern_virt_addr
= 0;
479 vm_map_t bt_vm_map
= VM_MAP_NULL
;
481 nframes
= (tracebound
> tracepos
) ? MIN(nframes
, (int)((tracebound
- tracepos
) / framesize
)) : 0;
484 x86_saved_state32_t
*iss32
;
486 iss32
= USER_REGS32(thread
);
487 prev_eip
= iss32
->eip
;
488 stackptr
= iss32
->ebp
;
490 stacklimit
= 0xffffffff;
491 bt_vm_map
= thread
->task
->map
;
493 panic("32-bit trace attempted on 64-bit kernel");
496 for (framecount
= 0; framecount
< nframes
; framecount
++) {
497 *tracebuf
++ = prev_eip
;
499 *tracebuf
++ = stackptr
;
502 /* Invalid frame, or hit fence */
503 if (!stackptr
|| (stackptr
== fence
)) {
507 /* Unaligned frame */
508 if (stackptr
& 0x0000003) {
512 if (stackptr
<= prevsp
) {
516 if (stackptr
> stacklimit
) {
520 kern_virt_addr
= machine_trace_thread_get_kva(stackptr
+ RETURN_OFFSET
, bt_vm_map
, thread_trace_flags
);
522 if (!kern_virt_addr
) {
523 if (thread_trace_flags
) {
524 *thread_trace_flags
|= kThreadTruncatedBT
;
529 prev_eip
= *(uint32_t *)kern_virt_addr
;
533 kern_virt_addr
= machine_trace_thread_get_kva(stackptr
, bt_vm_map
, thread_trace_flags
);
535 if (kern_virt_addr
) {
536 stackptr
= *(uint32_t *)kern_virt_addr
;
539 if (thread_trace_flags
) {
540 *thread_trace_flags
|= kThreadTruncatedBT
;
545 machine_trace_thread_clear_validation_cache();
547 return (uint32_t) (((char *) tracebuf
) - tracepos
);
551 #define RETURN_OFFSET64 8
552 /* Routine to encapsulate the 64-bit address read hack*/
554 machine_read64(addr64_t srcaddr
, caddr_t dstaddr
, uint32_t len
)
556 return (unsigned)kdp_machine_vm_read(srcaddr
, dstaddr
, len
);
560 machine_trace_thread64(thread_t thread
,
566 uint32_t * thread_trace_flags
,
569 uint64_t * tracebuf
= (uint64_t *)tracepos
;
570 unsigned framesize
= (trace_fp
? 2 : 1) * sizeof(addr64_t
);
573 addr64_t stackptr
= 0;
575 addr64_t prev_rip
= 0;
577 vm_offset_t kern_virt_addr
= 0;
578 vm_map_t bt_vm_map
= VM_MAP_NULL
;
580 nframes
= (tracebound
> tracepos
) ? MIN(nframes
, (int)((tracebound
- tracepos
) / framesize
)) : 0;
583 x86_saved_state64_t
*iss64
;
584 iss64
= USER_REGS64(thread
);
585 prev_rip
= iss64
->isf
.rip
;
586 stackptr
= iss64
->rbp
;
587 bt_vm_map
= thread
->task
->map
;
589 *sp
= iss64
->isf
.rsp
;
592 stackptr
= STACK_IKS(thread
->kernel_stack
)->k_rbp
;
593 prev_rip
= STACK_IKS(thread
->kernel_stack
)->k_rip
;
594 prev_rip
= VM_KERNEL_UNSLIDE(prev_rip
);
595 bt_vm_map
= kernel_map
;
598 for (framecount
= 0; framecount
< nframes
; framecount
++) {
599 *tracebuf
++ = prev_rip
;
601 *tracebuf
++ = stackptr
;
604 if (!stackptr
|| (stackptr
== fence
)) {
607 if (stackptr
& 0x0000007) {
610 if (stackptr
<= prevsp
) {
614 kern_virt_addr
= machine_trace_thread_get_kva(stackptr
+ RETURN_OFFSET64
, bt_vm_map
, thread_trace_flags
);
615 if (!kern_virt_addr
) {
616 if (thread_trace_flags
) {
617 *thread_trace_flags
|= kThreadTruncatedBT
;
622 prev_rip
= *(uint64_t *)kern_virt_addr
;
624 prev_rip
= VM_KERNEL_UNSLIDE(prev_rip
);
629 kern_virt_addr
= machine_trace_thread_get_kva(stackptr
, bt_vm_map
, thread_trace_flags
);
631 if (kern_virt_addr
) {
632 stackptr
= *(uint64_t *)kern_virt_addr
;
635 if (thread_trace_flags
) {
636 *thread_trace_flags
|= kThreadTruncatedBT
;
641 machine_trace_thread_clear_validation_cache();
643 return (uint32_t) (((char *) tracebuf
) - tracepos
);
647 kdp_ml_enter_debugger(void)
649 __asm__
__volatile__ ("int3");