]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/arm/kdp_machdep.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / arm / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
31 #include <arm/exception.h>
32 #include <arm/pmap.h>
33 #include <arm/proc_reg.h>
34 #include <arm/thread.h>
35 #include <arm/trap.h>
36 #include <arm/cpu_data_internal.h>
37 #include <kdp/kdp_internal.h>
38 #include <kern/debug.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <kern/kalloc.h>
41 #include <libkern/OSAtomic.h>
42 #include <vm/vm_map.h>
43
44 #if defined(HAS_APPLE_PAC)
45 #include <ptrauth.h>
46 #endif
47
48 #define KDP_TEST_HARNESS 0
49 #if KDP_TEST_HARNESS
50 #define dprintf(x) kprintf x
51 #else
52 #define dprintf(x) do {} while (0)
53 #endif
54
55 void halt_all_cpus(boolean_t);
56 void kdp_call(void);
57 int kdp_getc(void);
58 int machine_trace_thread(thread_t thread,
59 char * tracepos,
60 char * tracebound,
61 int nframes,
62 boolean_t user_p,
63 boolean_t trace_fp,
64 uint32_t * thread_trace_flags);
65 int machine_trace_thread64(thread_t thread,
66 char * tracepos,
67 char * tracebound,
68 int nframes,
69 boolean_t user_p,
70 boolean_t trace_fp,
71 uint32_t * thread_trace_flags,
72 uint64_t *sp);
73
74 void kdp_trap(unsigned int, struct arm_saved_state * saved_state);
75
76 extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
77 extern void machine_trace_thread_clear_validation_cache(void);
78 extern vm_map_t kernel_map;
79
80 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
81 void
82 kdp_exception(
83 unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode)
84 {
85 struct {
86 kdp_exception_t pkt;
87 kdp_exc_info_t exc;
88 } aligned_pkt;
89
90 kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt;
91
92 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
93 rq->hdr.request = KDP_EXCEPTION;
94 rq->hdr.is_reply = 0;
95 rq->hdr.seq = kdp.exception_seq;
96 rq->hdr.key = 0;
97 rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t);
98
99 rq->n_exc_info = 1;
100 rq->exc_info[0].cpu = 0;
101 rq->exc_info[0].exception = exception;
102 rq->exc_info[0].code = code;
103 rq->exc_info[0].subcode = subcode;
104
105 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
106
107 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
108
109 kdp.exception_ack_needed = TRUE;
110
111 *remote_port = kdp.exception_port;
112 *len = rq->hdr.len;
113 }
114
115 boolean_t
116 kdp_exception_ack(unsigned char * pkt, int len)
117 {
118 kdp_exception_ack_t aligned_pkt;
119 kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt;
120
121 if ((unsigned)len < sizeof(*rq)) {
122 return FALSE;
123 }
124
125 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
126
127 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
128 return FALSE;
129 }
130
131 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
132
133 if (rq->hdr.seq == kdp.exception_seq) {
134 kdp.exception_ack_needed = FALSE;
135 kdp.exception_seq++;
136 }
137 return TRUE;
138 }
139
140 static void
141 kdp_getintegerstate(char * out_state)
142 {
143 #if defined(__arm__)
144 struct arm_thread_state thread_state;
145 struct arm_saved_state *saved_state;
146
147 saved_state = kdp.saved_state;
148
149 bzero((char *) &thread_state, sizeof(struct arm_thread_state));
150
151 saved_state_to_thread_state32(saved_state, &thread_state);
152
153 bcopy((char *) &thread_state, (char *) out_state, sizeof(struct arm_thread_state));
154 #elif defined(__arm64__)
155 struct arm_thread_state64 thread_state64;
156 arm_saved_state_t *saved_state;
157
158 saved_state = kdp.saved_state;
159 assert(is_saved_state64(saved_state));
160
161 bzero((char *) &thread_state64, sizeof(struct arm_thread_state64));
162
163 saved_state_to_thread_state64(saved_state, &thread_state64);
164
165 bcopy((char *) &thread_state64, (char *) out_state, sizeof(struct arm_thread_state64));
166 #else
167 #error Unknown architecture.
168 #endif
169 }
170
171 kdp_error_t
172 kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size)
173 {
174 switch (flavor) {
175 #if defined(__arm__)
176 case ARM_THREAD_STATE:
177 dprintf(("kdp_readregs THREAD_STATE\n"));
178 kdp_getintegerstate(data);
179 *size = ARM_THREAD_STATE_COUNT * sizeof(int);
180 return KDPERR_NO_ERROR;
181 #elif defined(__arm64__)
182 case ARM_THREAD_STATE64:
183 dprintf(("kdp_readregs THREAD_STATE64\n"));
184 kdp_getintegerstate(data);
185 *size = ARM_THREAD_STATE64_COUNT * sizeof(int);
186 return KDPERR_NO_ERROR;
187 #endif
188
189 case ARM_VFP_STATE:
190 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
191 bzero((char *) data, sizeof(struct arm_vfp_state));
192 *size = ARM_VFP_STATE_COUNT * sizeof(int);
193 return KDPERR_NO_ERROR;
194
195 default:
196 dprintf(("kdp_readregs bad flavor %d\n"));
197 return KDPERR_BADFLAVOR;
198 }
199 }
200
201 static void
202 kdp_setintegerstate(char * state_in)
203 {
204 #if defined(__arm__)
205 struct arm_thread_state thread_state;
206 struct arm_saved_state *saved_state;
207
208 bcopy((char *) state_in, (char *) &thread_state, sizeof(struct arm_thread_state));
209 saved_state = kdp.saved_state;
210
211 thread_state32_to_saved_state(&thread_state, saved_state);
212 #elif defined(__arm64__)
213 struct arm_thread_state64 thread_state64;
214 struct arm_saved_state *saved_state;
215
216 bcopy((char *) state_in, (char *) &thread_state64, sizeof(struct arm_thread_state64));
217 saved_state = kdp.saved_state;
218 assert(is_saved_state64(saved_state));
219
220 thread_state64_to_saved_state(&thread_state64, saved_state);
221 #else
222 #error Unknown architecture.
223 #endif
224 }
225
226 kdp_error_t
227 kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size)
228 {
229 switch (flavor) {
230 #if defined(__arm__)
231 case ARM_THREAD_STATE:
232 dprintf(("kdp_writeregs THREAD_STATE\n"));
233 kdp_setintegerstate(data);
234 return KDPERR_NO_ERROR;
235 #elif defined(__arm64__)
236 case ARM_THREAD_STATE64:
237 dprintf(("kdp_writeregs THREAD_STATE64\n"));
238 kdp_setintegerstate(data);
239 return KDPERR_NO_ERROR;
240 #endif
241
242 case ARM_VFP_STATE:
243 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
244 return KDPERR_NO_ERROR;
245
246 default:
247 dprintf(("kdp_writeregs bad flavor %d\n"));
248 return KDPERR_BADFLAVOR;
249 }
250 }
251
252 void
253 kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)
254 {
255 hostinfo->cpus_mask = 1;
256 hostinfo->cpu_type = slot_type(0);
257 hostinfo->cpu_subtype = slot_subtype(0);
258 }
259
260 __attribute__((noreturn))
261 void
262 kdp_panic(const char * msg)
263 {
264 printf("kdp panic: %s\n", msg);
265 while (1) {
266 }
267 ;
268 }
269
270 int
271 kdp_intr_disbl(void)
272 {
273 return splhigh();
274 }
275
276 void
277 kdp_intr_enbl(int s)
278 {
279 splx(s);
280 }
281
282 void
283 kdp_us_spin(int usec)
284 {
285 delay(usec / 100);
286 }
287
288 void
289 kdp_call(void)
290 {
291 Debugger("inline call to debugger(machine_startup)");
292 }
293
294 int
295 kdp_getc(void)
296 {
297 return cnmaygetc();
298 }
299
300 void
301 kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size)
302 {
303 *(uint32_t *)bytes = GDB_TRAP_INSTR1;
304 *size = sizeof(uint32_t);
305 }
306
307 void
308 kdp_sync_cache(void)
309 {
310 }
311
312 int
313 kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu)
314 {
315 #pragma unused(rq, data, lcpu)
316 return 0;
317 }
318
319 int
320 kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu)
321 {
322 #pragma unused(rq, data, lcpu)
323 return 0;
324 }
325
326 int
327 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
328 {
329 #pragma unused(rq, data, lcpu)
330 return 0;
331 }
332
333 int
334 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
335 {
336 #pragma unused(rq, data, lcpu)
337 return 0;
338 }
339 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
340
341 void
342 kdp_trap(unsigned int exception, struct arm_saved_state * saved_state)
343 {
344 handle_debugger_trap(exception, 0, 0, saved_state);
345
346 #if defined(__arm__)
347 if (saved_state->cpsr & PSR_TF) {
348 unsigned short instr = *((unsigned short *)(saved_state->pc));
349 if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF))) {
350 saved_state->pc += 2;
351 }
352 } else {
353 unsigned int instr = *((unsigned int *)(saved_state->pc));
354 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
355 saved_state->pc += 4;
356 }
357 }
358
359 #elif defined(__arm64__)
360 assert(is_saved_state64(saved_state));
361
362 uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state));
363
364 /*
365 * As long as we are using the arm32 trap encoding to handling
366 * traps to the debugger, we should identify both variants and
367 * increment for both of them.
368 */
369 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
370 add_saved_state_pc(saved_state, 4);
371 }
372 #else
373 #error Unknown architecture.
374 #endif
375 }
376
377 #define ARM32_LR_OFFSET 4
378 #define ARM64_LR_OFFSET 8
379
380 /*
381 * Since sizeof (struct thread_snapshot) % 4 == 2
382 * make sure the compiler does not try to use word-aligned
383 * access to this data, which can result in alignment faults
384 * that can't be emulated in KDP context.
385 */
386 typedef uint32_t uint32_align2_t __attribute__((aligned(2)));
387
388 int
389 machine_trace_thread(thread_t thread,
390 char * tracepos,
391 char * tracebound,
392 int nframes,
393 boolean_t user_p,
394 boolean_t trace_fp,
395 uint32_t * thread_trace_flags)
396 {
397 uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos;
398
399 vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t);
400
401 vm_offset_t stacklimit = 0;
402 vm_offset_t stacklimit_bottom = 0;
403 int framecount = 0;
404 uint32_t short_fp = 0;
405 vm_offset_t fp = 0;
406 vm_offset_t pc, sp;
407 vm_offset_t prevfp = 0;
408 uint32_t prevlr = 0;
409 struct arm_saved_state * state;
410 vm_offset_t kern_virt_addr = 0;
411 vm_map_t bt_vm_map = VM_MAP_NULL;
412
413 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
414 if (!nframes) {
415 return 0;
416 }
417 framecount = 0;
418
419 if (user_p) {
420 /* Examine the user savearea */
421 state = get_user_regs(thread);
422 stacklimit = VM_MAX_ADDRESS;
423 stacklimit_bottom = VM_MIN_ADDRESS;
424
425 /* Fake up a stack frame for the PC */
426 *tracebuf++ = (uint32_t)get_saved_state_pc(state);
427 if (trace_fp) {
428 *tracebuf++ = (uint32_t)get_saved_state_sp(state);
429 }
430 framecount++;
431 bt_vm_map = thread->task->map;
432 } else {
433 #if defined(__arm64__)
434 panic("Attempted to trace kernel thread_t %p as a 32-bit context", thread);
435 return 0;
436 #elif defined(__arm__)
437 /* kstackptr may not always be there, so recompute it */
438 state = &thread_get_kernel_state(thread)->machine;
439
440 stacklimit = VM_MAX_KERNEL_ADDRESS;
441 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
442 bt_vm_map = kernel_map;
443 #else
444 #error Unknown architecture.
445 #endif
446 }
447
448 /* Get the frame pointer */
449 fp = get_saved_state_fp(state);
450
451 /* Fill in the current link register */
452 prevlr = (uint32_t)get_saved_state_lr(state);
453 pc = get_saved_state_pc(state);
454 sp = get_saved_state_sp(state);
455
456 if (!user_p && !prevlr && !fp && !sp && !pc) {
457 return 0;
458 }
459
460 if (!user_p) {
461 /* This is safe since we will panic above on __arm64__ if !user_p */
462 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
463 }
464
465 for (; framecount < nframes; framecount++) {
466 *tracebuf++ = prevlr;
467 if (trace_fp) {
468 *tracebuf++ = (uint32_t)fp;
469 }
470
471 /* Invalid frame */
472 if (!fp) {
473 break;
474 }
475 /* Unaligned frame */
476 if (fp & 0x0000003) {
477 break;
478 }
479 /* Frame is out of range, maybe a user FP while doing kernel BT */
480 if (fp > stacklimit) {
481 break;
482 }
483 if (fp < stacklimit_bottom) {
484 break;
485 }
486 /* Stack grows downward */
487 if (fp < prevfp) {
488 boolean_t prev_in_interrupt_stack = FALSE;
489
490 if (!user_p) {
491 /*
492 * As a special case, sometimes we are backtracing out of an interrupt
493 * handler, and the stack jumps downward because of the memory allocation
494 * pattern during early boot due to KASLR.
495 */
496 int cpu;
497 int max_cpu = ml_get_max_cpu_number();
498
499 for (cpu = 0; cpu <= max_cpu; cpu++) {
500 cpu_data_t *target_cpu_datap;
501
502 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
503 if (target_cpu_datap == (cpu_data_t *)NULL) {
504 continue;
505 }
506
507 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
508 prev_in_interrupt_stack = TRUE;
509 break;
510 }
511
512 #if defined(__arm__)
513 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
514 prev_in_interrupt_stack = TRUE;
515 break;
516 }
517 #elif defined(__arm64__)
518 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
519 prev_in_interrupt_stack = TRUE;
520 break;
521 }
522 #endif
523 }
524 }
525
526 if (!prev_in_interrupt_stack) {
527 /* Corrupt frame pointer? */
528 break;
529 }
530 }
531 /* Assume there's a saved link register, and read it */
532 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM32_LR_OFFSET, bt_vm_map, thread_trace_flags);
533
534 if (!kern_virt_addr) {
535 if (thread_trace_flags) {
536 *thread_trace_flags |= kThreadTruncatedBT;
537 }
538 break;
539 }
540
541 prevlr = *(uint32_t *)kern_virt_addr;
542 if (!user_p) {
543 /* This is safe since we will panic above on __arm64__ if !user_p */
544 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
545 }
546
547 prevfp = fp;
548
549 /*
550 * Next frame; read the fp value into short_fp first
551 * as it is 32-bit.
552 */
553 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
554
555 if (kern_virt_addr) {
556 short_fp = *(uint32_t *)kern_virt_addr;
557 fp = (vm_offset_t) short_fp;
558 } else {
559 fp = 0;
560 if (thread_trace_flags) {
561 *thread_trace_flags |= kThreadTruncatedBT;
562 }
563 }
564 }
565 /* Reset the target pmap */
566 machine_trace_thread_clear_validation_cache();
567 return (int)(((char *)tracebuf) - tracepos);
568 }
569
570 int
571 machine_trace_thread64(thread_t thread,
572 char * tracepos,
573 char * tracebound,
574 int nframes,
575 boolean_t user_p,
576 boolean_t trace_fp,
577 uint32_t * thread_trace_flags,
578 uint64_t *sp_out)
579 {
580 #pragma unused(sp_out)
581 #if defined(__arm__)
582 #pragma unused(thread, tracepos, tracebound, nframes, user_p, trace_fp, thread_trace_flags)
583 return 0;
584 #elif defined(__arm64__)
585
586 uint64_t * tracebuf = (uint64_t *)tracepos;
587 vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint64_t);
588
589 vm_offset_t stacklimit = 0;
590 vm_offset_t stacklimit_bottom = 0;
591 int framecount = 0;
592 vm_offset_t fp = 0;
593 vm_offset_t pc = 0;
594 vm_offset_t sp = 0;
595 vm_offset_t prevfp = 0;
596 uint64_t prevlr = 0;
597 struct arm_saved_state * state;
598 vm_offset_t kern_virt_addr = 0;
599 vm_map_t bt_vm_map = VM_MAP_NULL;
600
601 const boolean_t is_64bit_addr = thread_is_64bit_addr(thread);
602
603 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
604 if (!nframes) {
605 return 0;
606 }
607 framecount = 0;
608
609 if (user_p) {
610 /* Examine the user savearea */
611 state = thread->machine.upcb;
612 stacklimit = (is_64bit_addr) ? MACH_VM_MAX_ADDRESS : VM_MAX_ADDRESS;
613 stacklimit_bottom = (is_64bit_addr) ? MACH_VM_MIN_ADDRESS : VM_MIN_ADDRESS;
614
615 /* Fake up a stack frame for the PC */
616 *tracebuf++ = get_saved_state_pc(state);
617 if (trace_fp) {
618 *tracebuf++ = get_saved_state_sp(state);
619 }
620 framecount++;
621 bt_vm_map = thread->task->map;
622 } else {
623 /* kstackptr may not always be there, so recompute it */
624 state = &thread_get_kernel_state(thread)->machine.ss;
625 stacklimit = VM_MAX_KERNEL_ADDRESS;
626 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
627 bt_vm_map = kernel_map;
628 }
629
630 /* Get the frame pointer */
631 fp = get_saved_state_fp(state);
632
633 /* Fill in the current link register */
634 prevlr = get_saved_state_lr(state);
635 pc = get_saved_state_pc(state);
636 sp = get_saved_state_sp(state);
637
638 if (!user_p && !prevlr && !fp && !sp && !pc) {
639 return 0;
640 }
641
642 if (!user_p) {
643 prevlr = VM_KERNEL_UNSLIDE(prevlr);
644 }
645
646 for (; framecount < nframes; framecount++) {
647 *tracebuf++ = prevlr;
648 if (trace_fp) {
649 *tracebuf++ = fp;
650 }
651
652 /* Invalid frame */
653 if (!fp) {
654 break;
655 }
656 /*
657 * Unaligned frame; given that the stack register must always be
658 * 16-byte aligned, we are assured 8-byte alignment of the saved
659 * frame pointer and link register.
660 */
661 if (fp & 0x0000007) {
662 break;
663 }
664 /* Frame is out of range, maybe a user FP while doing kernel BT */
665 if (fp > stacklimit) {
666 break;
667 }
668 if (fp < stacklimit_bottom) {
669 break;
670 }
671 /* Stack grows downward */
672 if (fp < prevfp) {
673 boolean_t switched_stacks = FALSE;
674
675 if (!user_p) {
676 /*
677 * As a special case, sometimes we are backtracing out of an interrupt
678 * handler, and the stack jumps downward because of the memory allocation
679 * pattern during early boot due to KASLR.
680 */
681 int cpu;
682 int max_cpu = ml_get_max_cpu_number();
683
684 for (cpu = 0; cpu <= max_cpu; cpu++) {
685 cpu_data_t *target_cpu_datap;
686
687 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
688 if (target_cpu_datap == (cpu_data_t *)NULL) {
689 continue;
690 }
691
692 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
693 switched_stacks = TRUE;
694 break;
695 }
696 #if defined(__arm__)
697 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
698 switched_stacks = TRUE;
699 break;
700 }
701 #elif defined(__arm64__)
702 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
703 switched_stacks = TRUE;
704 break;
705 }
706 #endif
707 }
708
709 }
710
711 if (!switched_stacks) {
712 /* Corrupt frame pointer? */
713 break;
714 }
715 }
716
717 /* Assume there's a saved link register, and read it */
718 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM64_LR_OFFSET, bt_vm_map, thread_trace_flags);
719
720 if (!kern_virt_addr) {
721 if (thread_trace_flags) {
722 *thread_trace_flags |= kThreadTruncatedBT;
723 }
724 break;
725 }
726
727 prevlr = *(uint64_t *)kern_virt_addr;
728 #if defined(HAS_APPLE_PAC)
729 /* return addresses on stack signed by arm64e ABI */
730 prevlr = (uint64_t) ptrauth_strip((void *)prevlr, ptrauth_key_return_address);
731 #endif
732 if (!user_p) {
733 prevlr = VM_KERNEL_UNSLIDE(prevlr);
734 }
735
736 prevfp = fp;
737 /* Next frame */
738 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
739
740 if (kern_virt_addr) {
741 fp = *(uint64_t *)kern_virt_addr;
742 } else {
743 fp = 0;
744 if (thread_trace_flags) {
745 *thread_trace_flags |= kThreadTruncatedBT;
746 }
747 }
748 }
749 /* Reset the target pmap */
750 machine_trace_thread_clear_validation_cache();
751 return (int)(((char *)tracebuf) - tracepos);
752 #else
753 #error Unknown architecture.
754 #endif
755 }
756
757 void
758 kdp_ml_enter_debugger(void)
759 {
760 __asm__ volatile (".long 0xe7ffdefe");
761 }