]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/arm/kdp_machdep.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / arm / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
31 #include <arm/exception.h>
32 #include <arm/pmap.h>
33 #include <arm/proc_reg.h>
34 #include <arm/thread.h>
35 #include <arm/trap.h>
36 #include <arm/cpu_data_internal.h>
37 #include <kdp/kdp_internal.h>
38 #include <kern/debug.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <libkern/OSAtomic.h>
41 #include <vm/vm_map.h>
42
43 #if defined(HAS_APPLE_PAC)
44 #include <ptrauth.h>
45 #endif
46
47 #define KDP_TEST_HARNESS 0
48 #if KDP_TEST_HARNESS
49 #define dprintf(x) kprintf x
50 #else
51 #define dprintf(x) do {} while (0)
52 #endif
53
54 void halt_all_cpus(boolean_t);
55 void kdp_call(void);
56 int kdp_getc(void);
57 int machine_trace_thread(thread_t thread,
58 char * tracepos,
59 char * tracebound,
60 int nframes,
61 boolean_t user_p,
62 uint32_t * thread_trace_flags);
63 int machine_trace_thread64(thread_t thread,
64 char * tracepos,
65 char * tracebound,
66 int nframes,
67 boolean_t user_p,
68 uint32_t * thread_trace_flags,
69 uint64_t *sp,
70 vm_offset_t fp);
71
72 void kdp_trap(unsigned int, struct arm_saved_state * saved_state);
73
74 extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
75 extern void machine_trace_thread_clear_validation_cache(void);
76 extern vm_map_t kernel_map;
77
78 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
79 void
80 kdp_exception(
81 unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode)
82 {
83 struct {
84 kdp_exception_t pkt;
85 kdp_exc_info_t exc;
86 } aligned_pkt;
87
88 kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt;
89
90 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
91 rq->hdr.request = KDP_EXCEPTION;
92 rq->hdr.is_reply = 0;
93 rq->hdr.seq = kdp.exception_seq;
94 rq->hdr.key = 0;
95 rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t);
96
97 rq->n_exc_info = 1;
98 rq->exc_info[0].cpu = 0;
99 rq->exc_info[0].exception = exception;
100 rq->exc_info[0].code = code;
101 rq->exc_info[0].subcode = subcode;
102
103 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
104
105 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
106
107 kdp.exception_ack_needed = TRUE;
108
109 *remote_port = kdp.exception_port;
110 *len = rq->hdr.len;
111 }
112
113 boolean_t
114 kdp_exception_ack(unsigned char * pkt, int len)
115 {
116 kdp_exception_ack_t aligned_pkt;
117 kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt;
118
119 if ((unsigned)len < sizeof(*rq)) {
120 return FALSE;
121 }
122
123 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
124
125 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
126 return FALSE;
127 }
128
129 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
130
131 if (rq->hdr.seq == kdp.exception_seq) {
132 kdp.exception_ack_needed = FALSE;
133 kdp.exception_seq++;
134 }
135 return TRUE;
136 }
137
138 static void
139 kdp_getintegerstate(char * out_state)
140 {
141 #if defined(__arm__)
142 struct arm_thread_state thread_state;
143 struct arm_saved_state *saved_state;
144
145 saved_state = kdp.saved_state;
146
147 bzero((char *) &thread_state, sizeof(struct arm_thread_state));
148
149 saved_state_to_thread_state32(saved_state, &thread_state);
150
151 bcopy((char *) &thread_state, (char *) out_state, sizeof(struct arm_thread_state));
152 #elif defined(__arm64__)
153 struct arm_thread_state64 thread_state64;
154 arm_saved_state_t *saved_state;
155
156 saved_state = kdp.saved_state;
157 assert(is_saved_state64(saved_state));
158
159 bzero((char *) &thread_state64, sizeof(struct arm_thread_state64));
160
161 saved_state_to_thread_state64(saved_state, &thread_state64);
162
163 bcopy((char *) &thread_state64, (char *) out_state, sizeof(struct arm_thread_state64));
164 #else
165 #error Unknown architecture.
166 #endif
167 }
168
169 kdp_error_t
170 kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size)
171 {
172 switch (flavor) {
173 #if defined(__arm__)
174 case ARM_THREAD_STATE:
175 dprintf(("kdp_readregs THREAD_STATE\n"));
176 kdp_getintegerstate(data);
177 *size = ARM_THREAD_STATE_COUNT * sizeof(int);
178 return KDPERR_NO_ERROR;
179 #elif defined(__arm64__)
180 case ARM_THREAD_STATE64:
181 dprintf(("kdp_readregs THREAD_STATE64\n"));
182 kdp_getintegerstate(data);
183 *size = ARM_THREAD_STATE64_COUNT * sizeof(int);
184 return KDPERR_NO_ERROR;
185 #endif
186
187 case ARM_VFP_STATE:
188 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
189 bzero((char *) data, sizeof(struct arm_vfp_state));
190 *size = ARM_VFP_STATE_COUNT * sizeof(int);
191 return KDPERR_NO_ERROR;
192
193 default:
194 dprintf(("kdp_readregs bad flavor %d\n"));
195 return KDPERR_BADFLAVOR;
196 }
197 }
198
199 static void
200 kdp_setintegerstate(char * state_in)
201 {
202 #if defined(__arm__)
203 struct arm_thread_state thread_state;
204 struct arm_saved_state *saved_state;
205
206 bcopy((char *) state_in, (char *) &thread_state, sizeof(struct arm_thread_state));
207 saved_state = kdp.saved_state;
208
209 thread_state32_to_saved_state(&thread_state, saved_state);
210 #elif defined(__arm64__)
211 struct arm_thread_state64 thread_state64;
212 struct arm_saved_state *saved_state;
213
214 bcopy((char *) state_in, (char *) &thread_state64, sizeof(struct arm_thread_state64));
215 saved_state = kdp.saved_state;
216 assert(is_saved_state64(saved_state));
217
218 thread_state64_to_saved_state(&thread_state64, saved_state);
219 #else
220 #error Unknown architecture.
221 #endif
222 }
223
224 kdp_error_t
225 kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size)
226 {
227 switch (flavor) {
228 #if defined(__arm__)
229 case ARM_THREAD_STATE:
230 dprintf(("kdp_writeregs THREAD_STATE\n"));
231 kdp_setintegerstate(data);
232 return KDPERR_NO_ERROR;
233 #elif defined(__arm64__)
234 case ARM_THREAD_STATE64:
235 dprintf(("kdp_writeregs THREAD_STATE64\n"));
236 kdp_setintegerstate(data);
237 return KDPERR_NO_ERROR;
238 #endif
239
240 case ARM_VFP_STATE:
241 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
242 return KDPERR_NO_ERROR;
243
244 default:
245 dprintf(("kdp_writeregs bad flavor %d\n"));
246 return KDPERR_BADFLAVOR;
247 }
248 }
249
250 void
251 kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)
252 {
253 hostinfo->cpus_mask = 1;
254 hostinfo->cpu_type = slot_type(0);
255 hostinfo->cpu_subtype = slot_subtype(0);
256 }
257
258 __attribute__((noreturn))
259 void
260 kdp_panic(const char * fmt, ...)
261 {
262 char kdp_fmt[256];
263 va_list args;
264
265 va_start(args, fmt);
266 (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
267 vprintf(kdp_fmt, args);
268 va_end(args);
269
270 while (1) {
271 }
272 ;
273 }
274
275 int
276 kdp_intr_disbl(void)
277 {
278 return splhigh();
279 }
280
281 void
282 kdp_intr_enbl(int s)
283 {
284 splx(s);
285 }
286
287 void
288 kdp_us_spin(int usec)
289 {
290 delay(usec / 100);
291 }
292
293 void
294 kdp_call(void)
295 {
296 Debugger("inline call to debugger(machine_startup)");
297 }
298
299 int
300 kdp_getc(void)
301 {
302 return cnmaygetc();
303 }
304
305 void
306 kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size)
307 {
308 *(uint32_t *)bytes = GDB_TRAP_INSTR1;
309 *size = sizeof(uint32_t);
310 }
311
312 void
313 kdp_sync_cache(void)
314 {
315 }
316
317 int
318 kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu)
319 {
320 #pragma unused(rq, data, lcpu)
321 return 0;
322 }
323
324 int
325 kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu)
326 {
327 #pragma unused(rq, data, lcpu)
328 return 0;
329 }
330
331 int
332 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
333 {
334 #pragma unused(rq, data, lcpu)
335 return 0;
336 }
337
338 int
339 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
340 {
341 #pragma unused(rq, data, lcpu)
342 return 0;
343 }
344 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
345
346 void
347 kdp_trap(unsigned int exception, struct arm_saved_state * saved_state)
348 {
349 handle_debugger_trap(exception, 0, 0, saved_state);
350
351 #if defined(__arm__)
352 if (saved_state->cpsr & PSR_TF) {
353 unsigned short instr = *((unsigned short *)(saved_state->pc));
354 if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF))) {
355 saved_state->pc += 2;
356 }
357 } else {
358 unsigned int instr = *((unsigned int *)(saved_state->pc));
359 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
360 saved_state->pc += 4;
361 }
362 }
363
364 #elif defined(__arm64__)
365 assert(is_saved_state64(saved_state));
366
367 uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state));
368
369 /*
370 * As long as we are using the arm32 trap encoding to handling
371 * traps to the debugger, we should identify both variants and
372 * increment for both of them.
373 */
374 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
375 add_saved_state_pc(saved_state, 4);
376 }
377 #else
378 #error Unknown architecture.
379 #endif
380 }
381
382 #define ARM32_LR_OFFSET 4
383 #define ARM64_LR_OFFSET 8
384
385 /*
386 * Since sizeof (struct thread_snapshot) % 4 == 2
387 * make sure the compiler does not try to use word-aligned
388 * access to this data, which can result in alignment faults
389 * that can't be emulated in KDP context.
390 */
391 typedef uint32_t uint32_align2_t __attribute__((aligned(2)));
392
393 int
394 machine_trace_thread(thread_t thread,
395 char * tracepos,
396 char * tracebound,
397 int nframes,
398 boolean_t user_p,
399 uint32_t * thread_trace_flags)
400 {
401 uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos;
402
403 vm_size_t framesize = sizeof(uint32_t);
404
405 vm_offset_t stacklimit = 0;
406 vm_offset_t stacklimit_bottom = 0;
407 int framecount = 0;
408 uint32_t short_fp = 0;
409 vm_offset_t fp = 0;
410 vm_offset_t pc, sp;
411 vm_offset_t prevfp = 0;
412 uint32_t prevlr = 0;
413 struct arm_saved_state * state;
414 vm_offset_t kern_virt_addr = 0;
415 vm_map_t bt_vm_map = VM_MAP_NULL;
416
417 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
418 if (!nframes) {
419 return 0;
420 }
421 framecount = 0;
422
423 if (user_p) {
424 /* Examine the user savearea */
425 state = get_user_regs(thread);
426 stacklimit = VM_MAX_ADDRESS;
427 stacklimit_bottom = VM_MIN_ADDRESS;
428
429 /* Fake up a stack frame for the PC */
430 *tracebuf++ = (uint32_t)get_saved_state_pc(state);
431 framecount++;
432 bt_vm_map = thread->task->map;
433 } else {
434 #if defined(__arm64__)
435 panic("Attempted to trace kernel thread_t %p as a 32-bit context", thread);
436 return 0;
437 #elif defined(__arm__)
438 /* kstackptr may not always be there, so recompute it */
439 state = &thread_get_kernel_state(thread)->machine;
440
441 stacklimit = VM_MAX_KERNEL_ADDRESS;
442 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
443 bt_vm_map = kernel_map;
444 #else
445 #error Unknown architecture.
446 #endif
447 }
448
449 /* Get the frame pointer */
450 fp = get_saved_state_fp(state);
451
452 /* Fill in the current link register */
453 prevlr = (uint32_t)get_saved_state_lr(state);
454 pc = get_saved_state_pc(state);
455 sp = get_saved_state_sp(state);
456
457 if (!user_p && !prevlr && !fp && !sp && !pc) {
458 return 0;
459 }
460
461 if (!user_p) {
462 /* This is safe since we will panic above on __arm64__ if !user_p */
463 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
464 }
465
466 for (; framecount < nframes; framecount++) {
467 *tracebuf++ = prevlr;
468
469 /* Invalid frame */
470 if (!fp) {
471 break;
472 }
473 /* Unaligned frame */
474 if (fp & 0x0000003) {
475 break;
476 }
477 /* Frame is out of range, maybe a user FP while doing kernel BT */
478 if (fp > stacklimit) {
479 break;
480 }
481 if (fp < stacklimit_bottom) {
482 break;
483 }
484 /* Stack grows downward */
485 if (fp < prevfp) {
486 boolean_t prev_in_interrupt_stack = FALSE;
487
488 if (!user_p) {
489 /*
490 * As a special case, sometimes we are backtracing out of an interrupt
491 * handler, and the stack jumps downward because of the memory allocation
492 * pattern during early boot due to KASLR.
493 */
494 int cpu;
495 int max_cpu = ml_get_max_cpu_number();
496
497 for (cpu = 0; cpu <= max_cpu; cpu++) {
498 cpu_data_t *target_cpu_datap;
499
500 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
501 if (target_cpu_datap == (cpu_data_t *)NULL) {
502 continue;
503 }
504
505 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
506 prev_in_interrupt_stack = TRUE;
507 break;
508 }
509
510 #if defined(__arm__)
511 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
512 prev_in_interrupt_stack = TRUE;
513 break;
514 }
515 #elif defined(__arm64__)
516 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
517 prev_in_interrupt_stack = TRUE;
518 break;
519 }
520 #endif
521 }
522 }
523
524 if (!prev_in_interrupt_stack) {
525 /* Corrupt frame pointer? */
526 break;
527 }
528 }
529 /* Assume there's a saved link register, and read it */
530 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM32_LR_OFFSET, bt_vm_map, thread_trace_flags);
531
532 if (!kern_virt_addr) {
533 if (thread_trace_flags) {
534 *thread_trace_flags |= kThreadTruncatedBT;
535 }
536 break;
537 }
538
539 prevlr = *(uint32_t *)kern_virt_addr;
540 if (!user_p) {
541 /* This is safe since we will panic above on __arm64__ if !user_p */
542 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
543 }
544
545 prevfp = fp;
546
547 /*
548 * Next frame; read the fp value into short_fp first
549 * as it is 32-bit.
550 */
551 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
552
553 if (kern_virt_addr) {
554 short_fp = *(uint32_t *)kern_virt_addr;
555 fp = (vm_offset_t) short_fp;
556 } else {
557 fp = 0;
558 if (thread_trace_flags) {
559 *thread_trace_flags |= kThreadTruncatedBT;
560 }
561 }
562 }
563 /* Reset the target pmap */
564 machine_trace_thread_clear_validation_cache();
565 return (int)(((char *)tracebuf) - tracepos);
566 }
567
568 int
569 machine_trace_thread64(thread_t thread,
570 char * tracepos,
571 char * tracebound,
572 int nframes,
573 boolean_t user_p,
574 uint32_t * thread_trace_flags,
575 uint64_t *sp_out,
576 vm_offset_t fp)
577 {
578 #pragma unused(sp_out)
579 #if defined(__arm__)
580 #pragma unused(thread, tracepos, tracebound, nframes, user_p, thread_trace_flags, fp)
581 return 0;
582 #elif defined(__arm64__)
583
584 uint64_t * tracebuf = (uint64_t *)tracepos;
585 vm_size_t framesize = sizeof(uint64_t);
586
587 vm_offset_t stacklimit = 0;
588 vm_offset_t stacklimit_bottom = 0;
589 int framecount = 0;
590 vm_offset_t pc = 0;
591 vm_offset_t sp = 0;
592 vm_offset_t prevfp = 0;
593 uint64_t prevlr = 0;
594 vm_offset_t kern_virt_addr = 0;
595 vm_map_t bt_vm_map = VM_MAP_NULL;
596
597 const boolean_t is_64bit_addr = thread_is_64bit_addr(thread);
598
599 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
600 if (!nframes) {
601 return 0;
602 }
603 framecount = 0;
604
605 if (user_p) {
606 /* Examine the user savearea */
607 struct arm_saved_state * state = thread->machine.upcb;
608 stacklimit = (is_64bit_addr) ? MACH_VM_MAX_ADDRESS : VM_MAX_ADDRESS;
609 stacklimit_bottom = (is_64bit_addr) ? MACH_VM_MIN_ADDRESS : VM_MIN_ADDRESS;
610
611 /* Fake up a stack frame for the PC */
612 *tracebuf++ = get_saved_state_pc(state);
613 framecount++;
614 bt_vm_map = thread->task->map;
615
616 /* Get the frame pointer */
617 if (fp == 0) {
618 fp = get_saved_state_fp(state);
619 }
620
621 /* Fill in the current link register */
622 prevlr = get_saved_state_lr(state);
623 pc = get_saved_state_pc(state);
624 sp = get_saved_state_sp(state);
625 } else {
626 struct arm_saved_state *state = thread->machine.kpcb;
627 if (state != NULL) {
628 if (fp == 0) {
629 fp = state->ss_64.fp;
630 }
631
632 prevlr = state->ss_64.lr;
633 pc = state->ss_64.pc;
634 sp = state->ss_64.sp;
635 } else {
636 /* kstackptr may not always be there, so recompute it */
637 arm_kernel_saved_state_t *kstate = &thread_get_kernel_state(thread)->machine.ss;
638
639 if (fp == 0) {
640 fp = kstate->fp;
641 }
642 prevlr = kstate->lr;
643 pc = kstate->pc;
644 sp = kstate->sp;
645 }
646
647 stacklimit = VM_MAX_KERNEL_ADDRESS;
648 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
649 bt_vm_map = kernel_map;
650 }
651
652 if (!user_p && !prevlr && !fp && !sp && !pc) {
653 return 0;
654 }
655
656 if (!user_p) {
657 prevlr = VM_KERNEL_UNSLIDE(prevlr);
658 }
659
660 for (; framecount < nframes; framecount++) {
661 *tracebuf++ = prevlr;
662
663 /* Invalid frame */
664 if (!fp) {
665 break;
666 }
667 /*
668 * Unaligned frame; given that the stack register must always be
669 * 16-byte aligned, we are assured 8-byte alignment of the saved
670 * frame pointer and link register.
671 */
672 if (fp & 0x0000007) {
673 break;
674 }
675 /* Frame is out of range, maybe a user FP while doing kernel BT */
676 if (fp > stacklimit) {
677 break;
678 }
679 if (fp < stacklimit_bottom) {
680 break;
681 }
682 /* Stack grows downward */
683 if (fp < prevfp) {
684 boolean_t switched_stacks = FALSE;
685
686 if (!user_p) {
687 /*
688 * As a special case, sometimes we are backtracing out of an interrupt
689 * handler, and the stack jumps downward because of the memory allocation
690 * pattern during early boot due to KASLR.
691 */
692 int cpu;
693 int max_cpu = ml_get_max_cpu_number();
694
695 for (cpu = 0; cpu <= max_cpu; cpu++) {
696 cpu_data_t *target_cpu_datap;
697
698 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
699 if (target_cpu_datap == (cpu_data_t *)NULL) {
700 continue;
701 }
702
703 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
704 switched_stacks = TRUE;
705 break;
706 }
707 #if defined(__arm__)
708 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
709 switched_stacks = TRUE;
710 break;
711 }
712 #elif defined(__arm64__)
713 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
714 switched_stacks = TRUE;
715 break;
716 }
717 #endif
718 }
719
720 #if XNU_MONITOR
721 vm_offset_t cpu_base = (vm_offset_t)pmap_stacks_start;
722 vm_offset_t cpu_top = (vm_offset_t)pmap_stacks_end;
723
724 if (((prevfp >= cpu_base) && (prevfp < cpu_top)) !=
725 ((fp >= cpu_base) && (fp < cpu_top))) {
726 switched_stacks = TRUE;
727 break;
728 }
729 #endif
730 }
731
732 if (!switched_stacks) {
733 /* Corrupt frame pointer? */
734 break;
735 }
736 }
737
738 /* Assume there's a saved link register, and read it */
739 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM64_LR_OFFSET, bt_vm_map, thread_trace_flags);
740
741 if (!kern_virt_addr) {
742 if (thread_trace_flags) {
743 *thread_trace_flags |= kThreadTruncatedBT;
744 }
745 break;
746 }
747
748 prevlr = *(uint64_t *)kern_virt_addr;
749 #if defined(HAS_APPLE_PAC)
750 /* return addresses on stack signed by arm64e ABI */
751 prevlr = (uint64_t) ptrauth_strip((void *)prevlr, ptrauth_key_return_address);
752 #endif
753 if (!user_p) {
754 prevlr = VM_KERNEL_UNSLIDE(prevlr);
755 }
756
757 prevfp = fp;
758 /* Next frame */
759 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
760
761 if (kern_virt_addr) {
762 fp = *(uint64_t *)kern_virt_addr;
763 } else {
764 fp = 0;
765 if (thread_trace_flags) {
766 *thread_trace_flags |= kThreadTruncatedBT;
767 }
768 }
769 }
770 /* Reset the target pmap */
771 machine_trace_thread_clear_validation_cache();
772 return (int)(((char *)tracebuf) - tracepos);
773 #else
774 #error Unknown architecture.
775 #endif
776 }
777
778 void
779 kdp_ml_enter_debugger(void)
780 {
781 __asm__ volatile (".long 0xe7ffdefe");
782 }