2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
31 #include <arm/exception.h>
33 #include <arm/proc_reg.h>
34 #include <arm/thread.h>
36 #include <arm/cpu_data_internal.h>
37 #include <kdp/kdp_internal.h>
38 #include <kern/debug.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <libkern/OSAtomic.h>
41 #include <vm/vm_map.h>
43 #if defined(HAS_APPLE_PAC)
47 #define KDP_TEST_HARNESS 0
49 #define dprintf(x) kprintf x
51 #define dprintf(x) do {} while (0)
54 void halt_all_cpus(boolean_t
);
57 int machine_trace_thread(thread_t thread
,
62 uint32_t * thread_trace_flags
);
63 int machine_trace_thread64(thread_t thread
,
68 uint32_t * thread_trace_flags
,
72 void kdp_trap(unsigned int, struct arm_saved_state
* saved_state
);
74 extern vm_offset_t
machine_trace_thread_get_kva(vm_offset_t cur_target_addr
, vm_map_t map
, uint32_t *thread_trace_flags
);
75 extern void machine_trace_thread_clear_validation_cache(void);
76 extern vm_map_t kernel_map
;
78 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
81 unsigned char * pkt
, int * len
, unsigned short * remote_port
, unsigned int exception
, unsigned int code
, unsigned int subcode
)
88 kdp_exception_t
* rq
= (kdp_exception_t
*)&aligned_pkt
;
90 bcopy((char *)pkt
, (char *)rq
, sizeof(*rq
));
91 rq
->hdr
.request
= KDP_EXCEPTION
;
93 rq
->hdr
.seq
= kdp
.exception_seq
;
95 rq
->hdr
.len
= sizeof(*rq
) + sizeof(kdp_exc_info_t
);
98 rq
->exc_info
[0].cpu
= 0;
99 rq
->exc_info
[0].exception
= exception
;
100 rq
->exc_info
[0].code
= code
;
101 rq
->exc_info
[0].subcode
= subcode
;
103 rq
->hdr
.len
+= rq
->n_exc_info
* sizeof(kdp_exc_info_t
);
105 bcopy((char *)rq
, (char *)pkt
, rq
->hdr
.len
);
107 kdp
.exception_ack_needed
= TRUE
;
109 *remote_port
= kdp
.exception_port
;
114 kdp_exception_ack(unsigned char * pkt
, int len
)
116 kdp_exception_ack_t aligned_pkt
;
117 kdp_exception_ack_t
* rq
= (kdp_exception_ack_t
*)&aligned_pkt
;
119 if ((unsigned)len
< sizeof(*rq
)) {
123 bcopy((char *)pkt
, (char *)rq
, sizeof(*rq
));
125 if (!rq
->hdr
.is_reply
|| rq
->hdr
.request
!= KDP_EXCEPTION
) {
129 dprintf(("kdp_exception_ack seq %x %x\n", rq
->hdr
.seq
, kdp
.exception_seq
));
131 if (rq
->hdr
.seq
== kdp
.exception_seq
) {
132 kdp
.exception_ack_needed
= FALSE
;
139 kdp_getintegerstate(char * out_state
)
142 struct arm_thread_state thread_state
;
143 struct arm_saved_state
*saved_state
;
145 saved_state
= kdp
.saved_state
;
147 bzero((char *) &thread_state
, sizeof(struct arm_thread_state
));
149 saved_state_to_thread_state32(saved_state
, &thread_state
);
151 bcopy((char *) &thread_state
, (char *) out_state
, sizeof(struct arm_thread_state
));
152 #elif defined(__arm64__)
153 struct arm_thread_state64 thread_state64
;
154 arm_saved_state_t
*saved_state
;
156 saved_state
= kdp
.saved_state
;
157 assert(is_saved_state64(saved_state
));
159 bzero((char *) &thread_state64
, sizeof(struct arm_thread_state64
));
161 saved_state_to_thread_state64(saved_state
, &thread_state64
);
163 bcopy((char *) &thread_state64
, (char *) out_state
, sizeof(struct arm_thread_state64
));
165 #error Unknown architecture.
170 kdp_machine_read_regs(__unused
unsigned int cpu
, unsigned int flavor
, char * data
, int * size
)
174 case ARM_THREAD_STATE
:
175 dprintf(("kdp_readregs THREAD_STATE\n"));
176 kdp_getintegerstate(data
);
177 *size
= ARM_THREAD_STATE_COUNT
* sizeof(int);
178 return KDPERR_NO_ERROR
;
179 #elif defined(__arm64__)
180 case ARM_THREAD_STATE64
:
181 dprintf(("kdp_readregs THREAD_STATE64\n"));
182 kdp_getintegerstate(data
);
183 *size
= ARM_THREAD_STATE64_COUNT
* sizeof(int);
184 return KDPERR_NO_ERROR
;
188 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
189 bzero((char *) data
, sizeof(struct arm_vfp_state
));
190 *size
= ARM_VFP_STATE_COUNT
* sizeof(int);
191 return KDPERR_NO_ERROR
;
194 dprintf(("kdp_readregs bad flavor %d\n"));
195 return KDPERR_BADFLAVOR
;
200 kdp_setintegerstate(char * state_in
)
203 struct arm_thread_state thread_state
;
204 struct arm_saved_state
*saved_state
;
206 bcopy((char *) state_in
, (char *) &thread_state
, sizeof(struct arm_thread_state
));
207 saved_state
= kdp
.saved_state
;
209 thread_state32_to_saved_state(&thread_state
, saved_state
);
210 #elif defined(__arm64__)
211 struct arm_thread_state64 thread_state64
;
212 struct arm_saved_state
*saved_state
;
214 bcopy((char *) state_in
, (char *) &thread_state64
, sizeof(struct arm_thread_state64
));
215 saved_state
= kdp
.saved_state
;
216 assert(is_saved_state64(saved_state
));
218 thread_state64_to_saved_state(&thread_state64
, saved_state
);
220 #error Unknown architecture.
225 kdp_machine_write_regs(__unused
unsigned int cpu
, unsigned int flavor
, char * data
, __unused
int * size
)
229 case ARM_THREAD_STATE
:
230 dprintf(("kdp_writeregs THREAD_STATE\n"));
231 kdp_setintegerstate(data
);
232 return KDPERR_NO_ERROR
;
233 #elif defined(__arm64__)
234 case ARM_THREAD_STATE64
:
235 dprintf(("kdp_writeregs THREAD_STATE64\n"));
236 kdp_setintegerstate(data
);
237 return KDPERR_NO_ERROR
;
241 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
242 return KDPERR_NO_ERROR
;
245 dprintf(("kdp_writeregs bad flavor %d\n"));
246 return KDPERR_BADFLAVOR
;
251 kdp_machine_hostinfo(kdp_hostinfo_t
* hostinfo
)
253 hostinfo
->cpus_mask
= 1;
254 hostinfo
->cpu_type
= slot_type(0);
255 hostinfo
->cpu_subtype
= slot_subtype(0);
258 __attribute__((noreturn
))
260 kdp_panic(const char * fmt
, ...)
266 (void) snprintf(kdp_fmt
, sizeof(kdp_fmt
), "kdp panic: %s", fmt
);
267 vprintf(kdp_fmt
, args
);
288 kdp_us_spin(int usec
)
296 Debugger("inline call to debugger(machine_startup)");
306 kdp_machine_get_breakinsn(uint8_t * bytes
, uint32_t * size
)
308 *(uint32_t *)bytes
= GDB_TRAP_INSTR1
;
309 *size
= sizeof(uint32_t);
318 kdp_machine_ioport_read(kdp_readioport_req_t
* rq
, caddr_t data
, uint16_t lcpu
)
320 #pragma unused(rq, data, lcpu)
325 kdp_machine_ioport_write(kdp_writeioport_req_t
* rq
, caddr_t data
, uint16_t lcpu
)
327 #pragma unused(rq, data, lcpu)
332 kdp_machine_msr64_read(kdp_readmsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
334 #pragma unused(rq, data, lcpu)
339 kdp_machine_msr64_write(kdp_writemsr64_req_t
*rq
, caddr_t data
, uint16_t lcpu
)
341 #pragma unused(rq, data, lcpu)
344 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
347 kdp_trap(unsigned int exception
, struct arm_saved_state
* saved_state
)
349 handle_debugger_trap(exception
, 0, 0, saved_state
);
352 if (saved_state
->cpsr
& PSR_TF
) {
353 unsigned short instr
= *((unsigned short *)(saved_state
->pc
));
354 if ((instr
== (GDB_TRAP_INSTR1
& 0xFFFF)) || (instr
== (GDB_TRAP_INSTR2
& 0xFFFF))) {
355 saved_state
->pc
+= 2;
358 unsigned int instr
= *((unsigned int *)(saved_state
->pc
));
359 if ((instr
== GDB_TRAP_INSTR1
) || (instr
== GDB_TRAP_INSTR2
)) {
360 saved_state
->pc
+= 4;
364 #elif defined(__arm64__)
365 assert(is_saved_state64(saved_state
));
367 uint32_t instr
= *((uint32_t *)get_saved_state_pc(saved_state
));
370 * As long as we are using the arm32 trap encoding to handling
371 * traps to the debugger, we should identify both variants and
372 * increment for both of them.
374 if ((instr
== GDB_TRAP_INSTR1
) || (instr
== GDB_TRAP_INSTR2
)) {
375 add_saved_state_pc(saved_state
, 4);
378 #error Unknown architecture.
382 #define ARM32_LR_OFFSET 4
383 #define ARM64_LR_OFFSET 8
386 * Since sizeof (struct thread_snapshot) % 4 == 2
387 * make sure the compiler does not try to use word-aligned
388 * access to this data, which can result in alignment faults
389 * that can't be emulated in KDP context.
391 typedef uint32_t uint32_align2_t
__attribute__((aligned(2)));
394 machine_trace_thread(thread_t thread
,
399 uint32_t * thread_trace_flags
)
401 uint32_align2_t
* tracebuf
= (uint32_align2_t
*)tracepos
;
403 vm_size_t framesize
= sizeof(uint32_t);
405 vm_offset_t stacklimit
= 0;
406 vm_offset_t stacklimit_bottom
= 0;
408 uint32_t short_fp
= 0;
411 vm_offset_t prevfp
= 0;
413 struct arm_saved_state
* state
;
414 vm_offset_t kern_virt_addr
= 0;
415 vm_map_t bt_vm_map
= VM_MAP_NULL
;
417 nframes
= (tracebound
> tracepos
) ? MIN(nframes
, (int)((tracebound
- tracepos
) / framesize
)) : 0;
424 /* Examine the user savearea */
425 state
= get_user_regs(thread
);
426 stacklimit
= VM_MAX_ADDRESS
;
427 stacklimit_bottom
= VM_MIN_ADDRESS
;
429 /* Fake up a stack frame for the PC */
430 *tracebuf
++ = (uint32_t)get_saved_state_pc(state
);
432 bt_vm_map
= thread
->task
->map
;
434 #if defined(__arm64__)
435 panic("Attempted to trace kernel thread_t %p as a 32-bit context", thread
);
437 #elif defined(__arm__)
438 /* kstackptr may not always be there, so recompute it */
439 state
= &thread_get_kernel_state(thread
)->machine
;
441 stacklimit
= VM_MAX_KERNEL_ADDRESS
;
442 stacklimit_bottom
= VM_MIN_KERNEL_ADDRESS
;
443 bt_vm_map
= kernel_map
;
445 #error Unknown architecture.
449 /* Get the frame pointer */
450 fp
= get_saved_state_fp(state
);
452 /* Fill in the current link register */
453 prevlr
= (uint32_t)get_saved_state_lr(state
);
454 pc
= get_saved_state_pc(state
);
455 sp
= get_saved_state_sp(state
);
457 if (!user_p
&& !prevlr
&& !fp
&& !sp
&& !pc
) {
462 /* This is safe since we will panic above on __arm64__ if !user_p */
463 prevlr
= (uint32_t)VM_KERNEL_UNSLIDE(prevlr
);
466 for (; framecount
< nframes
; framecount
++) {
467 *tracebuf
++ = prevlr
;
473 /* Unaligned frame */
474 if (fp
& 0x0000003) {
477 /* Frame is out of range, maybe a user FP while doing kernel BT */
478 if (fp
> stacklimit
) {
481 if (fp
< stacklimit_bottom
) {
484 /* Stack grows downward */
486 boolean_t prev_in_interrupt_stack
= FALSE
;
490 * As a special case, sometimes we are backtracing out of an interrupt
491 * handler, and the stack jumps downward because of the memory allocation
492 * pattern during early boot due to KASLR.
495 int max_cpu
= ml_get_max_cpu_number();
497 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
498 cpu_data_t
*target_cpu_datap
;
500 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
501 if (target_cpu_datap
== (cpu_data_t
*)NULL
) {
505 if (prevfp
>= (target_cpu_datap
->intstack_top
- INTSTACK_SIZE
) && prevfp
< target_cpu_datap
->intstack_top
) {
506 prev_in_interrupt_stack
= TRUE
;
511 if (prevfp
>= (target_cpu_datap
->fiqstack_top
- FIQSTACK_SIZE
) && prevfp
< target_cpu_datap
->fiqstack_top
) {
512 prev_in_interrupt_stack
= TRUE
;
515 #elif defined(__arm64__)
516 if (prevfp
>= (target_cpu_datap
->excepstack_top
- EXCEPSTACK_SIZE
) && prevfp
< target_cpu_datap
->excepstack_top
) {
517 prev_in_interrupt_stack
= TRUE
;
524 if (!prev_in_interrupt_stack
) {
525 /* Corrupt frame pointer? */
529 /* Assume there's a saved link register, and read it */
530 kern_virt_addr
= machine_trace_thread_get_kva(fp
+ ARM32_LR_OFFSET
, bt_vm_map
, thread_trace_flags
);
532 if (!kern_virt_addr
) {
533 if (thread_trace_flags
) {
534 *thread_trace_flags
|= kThreadTruncatedBT
;
539 prevlr
= *(uint32_t *)kern_virt_addr
;
541 /* This is safe since we will panic above on __arm64__ if !user_p */
542 prevlr
= (uint32_t)VM_KERNEL_UNSLIDE(prevlr
);
548 * Next frame; read the fp value into short_fp first
551 kern_virt_addr
= machine_trace_thread_get_kva(fp
, bt_vm_map
, thread_trace_flags
);
553 if (kern_virt_addr
) {
554 short_fp
= *(uint32_t *)kern_virt_addr
;
555 fp
= (vm_offset_t
) short_fp
;
558 if (thread_trace_flags
) {
559 *thread_trace_flags
|= kThreadTruncatedBT
;
563 /* Reset the target pmap */
564 machine_trace_thread_clear_validation_cache();
565 return (int)(((char *)tracebuf
) - tracepos
);
569 machine_trace_thread64(thread_t thread
,
574 uint32_t * thread_trace_flags
,
578 #pragma unused(sp_out)
580 #pragma unused(thread, tracepos, tracebound, nframes, user_p, thread_trace_flags, fp)
582 #elif defined(__arm64__)
584 uint64_t * tracebuf
= (uint64_t *)tracepos
;
585 vm_size_t framesize
= sizeof(uint64_t);
587 vm_offset_t stacklimit
= 0;
588 vm_offset_t stacklimit_bottom
= 0;
592 vm_offset_t prevfp
= 0;
594 vm_offset_t kern_virt_addr
= 0;
595 vm_map_t bt_vm_map
= VM_MAP_NULL
;
597 const boolean_t is_64bit_addr
= thread_is_64bit_addr(thread
);
599 nframes
= (tracebound
> tracepos
) ? MIN(nframes
, (int)((tracebound
- tracepos
) / framesize
)) : 0;
606 /* Examine the user savearea */
607 struct arm_saved_state
* state
= thread
->machine
.upcb
;
608 stacklimit
= (is_64bit_addr
) ? MACH_VM_MAX_ADDRESS
: VM_MAX_ADDRESS
;
609 stacklimit_bottom
= (is_64bit_addr
) ? MACH_VM_MIN_ADDRESS
: VM_MIN_ADDRESS
;
611 /* Fake up a stack frame for the PC */
612 *tracebuf
++ = get_saved_state_pc(state
);
614 bt_vm_map
= thread
->task
->map
;
616 /* Get the frame pointer */
618 fp
= get_saved_state_fp(state
);
621 /* Fill in the current link register */
622 prevlr
= get_saved_state_lr(state
);
623 pc
= get_saved_state_pc(state
);
624 sp
= get_saved_state_sp(state
);
626 struct arm_saved_state
*state
= thread
->machine
.kpcb
;
629 fp
= state
->ss_64
.fp
;
632 prevlr
= state
->ss_64
.lr
;
633 pc
= state
->ss_64
.pc
;
634 sp
= state
->ss_64
.sp
;
636 /* kstackptr may not always be there, so recompute it */
637 arm_kernel_saved_state_t
*kstate
= &thread_get_kernel_state(thread
)->machine
.ss
;
647 stacklimit
= VM_MAX_KERNEL_ADDRESS
;
648 stacklimit_bottom
= VM_MIN_KERNEL_ADDRESS
;
649 bt_vm_map
= kernel_map
;
652 if (!user_p
&& !prevlr
&& !fp
&& !sp
&& !pc
) {
657 prevlr
= VM_KERNEL_UNSLIDE(prevlr
);
660 for (; framecount
< nframes
; framecount
++) {
661 *tracebuf
++ = prevlr
;
668 * Unaligned frame; given that the stack register must always be
669 * 16-byte aligned, we are assured 8-byte alignment of the saved
670 * frame pointer and link register.
672 if (fp
& 0x0000007) {
675 /* Frame is out of range, maybe a user FP while doing kernel BT */
676 if (fp
> stacklimit
) {
679 if (fp
< stacklimit_bottom
) {
682 /* Stack grows downward */
684 boolean_t switched_stacks
= FALSE
;
688 * As a special case, sometimes we are backtracing out of an interrupt
689 * handler, and the stack jumps downward because of the memory allocation
690 * pattern during early boot due to KASLR.
693 int max_cpu
= ml_get_max_cpu_number();
695 for (cpu
= 0; cpu
<= max_cpu
; cpu
++) {
696 cpu_data_t
*target_cpu_datap
;
698 target_cpu_datap
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
699 if (target_cpu_datap
== (cpu_data_t
*)NULL
) {
703 if (prevfp
>= (target_cpu_datap
->intstack_top
- INTSTACK_SIZE
) && prevfp
< target_cpu_datap
->intstack_top
) {
704 switched_stacks
= TRUE
;
708 if (prevfp
>= (target_cpu_datap
->fiqstack_top
- FIQSTACK_SIZE
) && prevfp
< target_cpu_datap
->fiqstack_top
) {
709 switched_stacks
= TRUE
;
712 #elif defined(__arm64__)
713 if (prevfp
>= (target_cpu_datap
->excepstack_top
- EXCEPSTACK_SIZE
) && prevfp
< target_cpu_datap
->excepstack_top
) {
714 switched_stacks
= TRUE
;
721 vm_offset_t cpu_base
= (vm_offset_t
)pmap_stacks_start
;
722 vm_offset_t cpu_top
= (vm_offset_t
)pmap_stacks_end
;
724 if (((prevfp
>= cpu_base
) && (prevfp
< cpu_top
)) !=
725 ((fp
>= cpu_base
) && (fp
< cpu_top
))) {
726 switched_stacks
= TRUE
;
732 if (!switched_stacks
) {
733 /* Corrupt frame pointer? */
738 /* Assume there's a saved link register, and read it */
739 kern_virt_addr
= machine_trace_thread_get_kva(fp
+ ARM64_LR_OFFSET
, bt_vm_map
, thread_trace_flags
);
741 if (!kern_virt_addr
) {
742 if (thread_trace_flags
) {
743 *thread_trace_flags
|= kThreadTruncatedBT
;
748 prevlr
= *(uint64_t *)kern_virt_addr
;
749 #if defined(HAS_APPLE_PAC)
750 /* return addresses on stack signed by arm64e ABI */
751 prevlr
= (uint64_t) ptrauth_strip((void *)prevlr
, ptrauth_key_return_address
);
754 prevlr
= VM_KERNEL_UNSLIDE(prevlr
);
759 kern_virt_addr
= machine_trace_thread_get_kva(fp
, bt_vm_map
, thread_trace_flags
);
761 if (kern_virt_addr
) {
762 fp
= *(uint64_t *)kern_virt_addr
;
765 if (thread_trace_flags
) {
766 *thread_trace_flags
|= kThreadTruncatedBT
;
770 /* Reset the target pmap */
771 machine_trace_thread_clear_validation_cache();
772 return (int)(((char *)tracebuf
) - tracepos
);
774 #error Unknown architecture.
779 kdp_ml_enter_debugger(void)
781 __asm__
volatile (".long 0xe7ffdefe");