]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/ml/arm/kdp_machdep.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / arm / kdp_machdep.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/exception_types.h>
31#include <arm/exception.h>
32#include <arm/pmap.h>
33#include <arm/proc_reg.h>
34#include <arm/thread.h>
35#include <arm/trap.h>
36#include <arm/cpu_data_internal.h>
37#include <kdp/kdp_internal.h>
38#include <kern/debug.h>
39#include <IOKit/IOPlatformExpert.h>
40#include <kern/kalloc.h>
41#include <libkern/OSAtomic.h>
42#include <vm/vm_map.h>
43
44
45#define KDP_TEST_HARNESS 0
46#if KDP_TEST_HARNESS
47#define dprintf(x) kprintf x
48#else
49#define dprintf(x) do {} while (0)
50#endif
51
52void halt_all_cpus(boolean_t);
53void kdp_call(void);
54int kdp_getc(void);
55int machine_trace_thread(thread_t thread,
0a7de745
A
56 char * tracepos,
57 char * tracebound,
58 int nframes,
59 boolean_t user_p,
60 boolean_t trace_fp,
61 uint32_t * thread_trace_flags);
5ba3f43e 62int machine_trace_thread64(thread_t thread,
0a7de745
A
63 char * tracepos,
64 char * tracebound,
65 int nframes,
66 boolean_t user_p,
67 boolean_t trace_fp,
68 uint32_t * thread_trace_flags,
69 uint64_t *sp);
5ba3f43e
A
70
71void kdp_trap(unsigned int, struct arm_saved_state * saved_state);
72
73extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
74extern void machine_trace_thread_clear_validation_cache(void);
0a7de745 75extern vm_map_t kernel_map;
5ba3f43e
A
76
77#if CONFIG_KDP_INTERACTIVE_DEBUGGING
78void
79kdp_exception(
0a7de745 80 unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode)
5ba3f43e
A
81{
82 struct {
83 kdp_exception_t pkt;
84 kdp_exc_info_t exc;
85 } aligned_pkt;
86
87 kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt;
88
89 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
90 rq->hdr.request = KDP_EXCEPTION;
91 rq->hdr.is_reply = 0;
92 rq->hdr.seq = kdp.exception_seq;
93 rq->hdr.key = 0;
94 rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t);
95
96 rq->n_exc_info = 1;
97 rq->exc_info[0].cpu = 0;
98 rq->exc_info[0].exception = exception;
99 rq->exc_info[0].code = code;
100 rq->exc_info[0].subcode = subcode;
101
102 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
103
104 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
105
106 kdp.exception_ack_needed = TRUE;
107
108 *remote_port = kdp.exception_port;
109 *len = rq->hdr.len;
110}
111
112boolean_t
113kdp_exception_ack(unsigned char * pkt, int len)
114{
115 kdp_exception_ack_t aligned_pkt;
116 kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt;
117
0a7de745
A
118 if ((unsigned)len < sizeof(*rq)) {
119 return FALSE;
120 }
5ba3f43e
A
121
122 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
123
0a7de745
A
124 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
125 return FALSE;
126 }
5ba3f43e
A
127
128 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
129
130 if (rq->hdr.seq == kdp.exception_seq) {
131 kdp.exception_ack_needed = FALSE;
132 kdp.exception_seq++;
133 }
0a7de745 134 return TRUE;
5ba3f43e
A
135}
136
137static void
138kdp_getintegerstate(char * out_state)
139{
140#if defined(__arm__)
141 struct arm_thread_state thread_state;
142 struct arm_saved_state *saved_state;
143
144 saved_state = kdp.saved_state;
145
146 bzero((char *) &thread_state, sizeof(struct arm_thread_state));
147
148 saved_state_to_thread_state32(saved_state, &thread_state);
149
150 bcopy((char *) &thread_state, (char *) out_state, sizeof(struct arm_thread_state));
151#elif defined(__arm64__)
152 struct arm_thread_state64 thread_state64;
153 arm_saved_state_t *saved_state;
154
155 saved_state = kdp.saved_state;
156 assert(is_saved_state64(saved_state));
157
158 bzero((char *) &thread_state64, sizeof(struct arm_thread_state64));
159
160 saved_state_to_thread_state64(saved_state, &thread_state64);
161
162 bcopy((char *) &thread_state64, (char *) out_state, sizeof(struct arm_thread_state64));
163#else
164#error Unknown architecture.
165#endif
166}
167
168kdp_error_t
169kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size)
170{
171 switch (flavor) {
172#if defined(__arm__)
173 case ARM_THREAD_STATE:
174 dprintf(("kdp_readregs THREAD_STATE\n"));
175 kdp_getintegerstate(data);
176 *size = ARM_THREAD_STATE_COUNT * sizeof(int);
177 return KDPERR_NO_ERROR;
178#elif defined(__arm64__)
179 case ARM_THREAD_STATE64:
180 dprintf(("kdp_readregs THREAD_STATE64\n"));
181 kdp_getintegerstate(data);
182 *size = ARM_THREAD_STATE64_COUNT * sizeof(int);
183 return KDPERR_NO_ERROR;
184#endif
185
186 case ARM_VFP_STATE:
187 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
188 bzero((char *) data, sizeof(struct arm_vfp_state));
189 *size = ARM_VFP_STATE_COUNT * sizeof(int);
190 return KDPERR_NO_ERROR;
191
192 default:
193 dprintf(("kdp_readregs bad flavor %d\n"));
194 return KDPERR_BADFLAVOR;
195 }
196}
197
198static void
199kdp_setintegerstate(char * state_in)
200{
201#if defined(__arm__)
202 struct arm_thread_state thread_state;
203 struct arm_saved_state *saved_state;
204
205 bcopy((char *) state_in, (char *) &thread_state, sizeof(struct arm_thread_state));
206 saved_state = kdp.saved_state;
207
208 thread_state32_to_saved_state(&thread_state, saved_state);
209#elif defined(__arm64__)
210 struct arm_thread_state64 thread_state64;
211 struct arm_saved_state *saved_state;
212
213 bcopy((char *) state_in, (char *) &thread_state64, sizeof(struct arm_thread_state64));
214 saved_state = kdp.saved_state;
215 assert(is_saved_state64(saved_state));
216
217 thread_state64_to_saved_state(&thread_state64, saved_state);
218#else
219#error Unknown architecture.
220#endif
221}
222
223kdp_error_t
224kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size)
225{
226 switch (flavor) {
227#if defined(__arm__)
228 case ARM_THREAD_STATE:
229 dprintf(("kdp_writeregs THREAD_STATE\n"));
230 kdp_setintegerstate(data);
231 return KDPERR_NO_ERROR;
232#elif defined(__arm64__)
233 case ARM_THREAD_STATE64:
234 dprintf(("kdp_writeregs THREAD_STATE64\n"));
235 kdp_setintegerstate(data);
236 return KDPERR_NO_ERROR;
237#endif
238
239 case ARM_VFP_STATE:
240 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
241 return KDPERR_NO_ERROR;
242
243 default:
244 dprintf(("kdp_writeregs bad flavor %d\n"));
245 return KDPERR_BADFLAVOR;
246 }
247}
248
249void
250kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)
251{
252 hostinfo->cpus_mask = 1;
253 hostinfo->cpu_type = slot_type(0);
254 hostinfo->cpu_subtype = slot_subtype(0);
255}
256
257__attribute__((noreturn))
258void
259kdp_panic(const char * msg)
260{
261 printf("kdp panic: %s\n", msg);
262 while (1) {
0a7de745
A
263 }
264 ;
5ba3f43e
A
265}
266
267int
268kdp_intr_disbl(void)
269{
0a7de745 270 return splhigh();
5ba3f43e
A
271}
272
273void
274kdp_intr_enbl(int s)
275{
276 splx(s);
277}
278
279void
280kdp_us_spin(int usec)
281{
282 delay(usec / 100);
283}
284
285void
286kdp_call(void)
287{
288 Debugger("inline call to debugger(machine_startup)");
289}
290
291int
292kdp_getc(void)
293{
0a7de745 294 return cnmaygetc();
5ba3f43e
A
295}
296
297void
298kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size)
299{
300 *(uint32_t *)bytes = GDB_TRAP_INSTR1;
301 *size = sizeof(uint32_t);
302}
303
304void
305kdp_sync_cache(void)
306{
307}
308
309int
310kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu)
311{
312#pragma unused(rq, data, lcpu)
313 return 0;
314}
315
316int
317kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu)
318{
319#pragma unused(rq, data, lcpu)
320 return 0;
321}
322
323int
324kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
325{
326#pragma unused(rq, data, lcpu)
0a7de745 327 return 0;
5ba3f43e
A
328}
329
330int
331kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
332{
333#pragma unused(rq, data, lcpu)
0a7de745 334 return 0;
5ba3f43e
A
335}
336#endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
337
338void
339kdp_trap(unsigned int exception, struct arm_saved_state * saved_state)
340{
341 handle_debugger_trap(exception, 0, 0, saved_state);
342
343#if defined(__arm__)
344 if (saved_state->cpsr & PSR_TF) {
345 unsigned short instr = *((unsigned short *)(saved_state->pc));
0a7de745 346 if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF))) {
5ba3f43e 347 saved_state->pc += 2;
0a7de745 348 }
5ba3f43e
A
349 } else {
350 unsigned int instr = *((unsigned int *)(saved_state->pc));
0a7de745 351 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
5ba3f43e 352 saved_state->pc += 4;
0a7de745 353 }
5ba3f43e
A
354 }
355
356#elif defined(__arm64__)
357 assert(is_saved_state64(saved_state));
358
359 uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state));
360
361 /*
362 * As long as we are using the arm32 trap encoding to handling
363 * traps to the debugger, we should identify both variants and
364 * increment for both of them.
365 */
0a7de745 366 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2)) {
5ba3f43e 367 set_saved_state_pc(saved_state, get_saved_state_pc(saved_state) + 4);
0a7de745 368 }
5ba3f43e
A
369#else
370#error Unknown architecture.
371#endif
372}
373
374#define ARM32_LR_OFFSET 4
375#define ARM64_LR_OFFSET 8
376
377/*
378 * Since sizeof (struct thread_snapshot) % 4 == 2
379 * make sure the compiler does not try to use word-aligned
380 * access to this data, which can result in alignment faults
381 * that can't be emulated in KDP context.
382 */
383typedef uint32_t uint32_align2_t __attribute__((aligned(2)));
384
385int
386machine_trace_thread(thread_t thread,
0a7de745
A
387 char * tracepos,
388 char * tracebound,
389 int nframes,
390 boolean_t user_p,
391 boolean_t trace_fp,
392 uint32_t * thread_trace_flags)
5ba3f43e
A
393{
394 uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos;
395
396 vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t);
397
398 vm_offset_t stacklimit = 0;
399 vm_offset_t stacklimit_bottom = 0;
400 int framecount = 0;
401 uint32_t short_fp = 0;
402 vm_offset_t fp = 0;
403 vm_offset_t pc, sp;
404 vm_offset_t prevfp = 0;
405 uint32_t prevlr = 0;
406 struct arm_saved_state * state;
407 vm_offset_t kern_virt_addr = 0;
408 vm_map_t bt_vm_map = VM_MAP_NULL;
409
410 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
411 if (!nframes) {
0a7de745 412 return 0;
5ba3f43e
A
413 }
414 framecount = 0;
415
416 if (user_p) {
417 /* Examine the user savearea */
418 state = get_user_regs(thread);
419 stacklimit = VM_MAX_ADDRESS;
420 stacklimit_bottom = VM_MIN_ADDRESS;
421
422 /* Fake up a stack frame for the PC */
423 *tracebuf++ = (uint32_t)get_saved_state_pc(state);
424 if (trace_fp) {
425 *tracebuf++ = (uint32_t)get_saved_state_sp(state);
426 }
427 framecount++;
428 bt_vm_map = thread->task->map;
429 } else {
430#if defined(__arm64__)
431 panic("Attempted to trace kernel thread_t %p as a 32-bit context", thread);
432 return 0;
433#elif defined(__arm__)
434 /* kstackptr may not always be there, so recompute it */
435 state = &thread_get_kernel_state(thread)->machine;
436
437 stacklimit = VM_MAX_KERNEL_ADDRESS;
438 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
439 bt_vm_map = kernel_map;
440#else
441#error Unknown architecture.
442#endif
443 }
444
445 /* Get the frame pointer */
446 fp = get_saved_state_fp(state);
447
448 /* Fill in the current link register */
449 prevlr = (uint32_t)get_saved_state_lr(state);
450 pc = get_saved_state_pc(state);
451 sp = get_saved_state_sp(state);
452
453 if (!user_p && !prevlr && !fp && !sp && !pc) {
454 return 0;
455 }
456
457 if (!user_p) {
458 /* This is safe since we will panic above on __arm64__ if !user_p */
459 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
460 }
461
462 for (; framecount < nframes; framecount++) {
5ba3f43e
A
463 *tracebuf++ = prevlr;
464 if (trace_fp) {
465 *tracebuf++ = (uint32_t)fp;
466 }
467
468 /* Invalid frame */
469 if (!fp) {
470 break;
471 }
472 /* Unaligned frame */
473 if (fp & 0x0000003) {
474 break;
475 }
476 /* Frame is out of range, maybe a user FP while doing kernel BT */
477 if (fp > stacklimit) {
478 break;
479 }
480 if (fp < stacklimit_bottom) {
481 break;
482 }
0a7de745 483 /* Stack grows downward */
5ba3f43e 484 if (fp < prevfp) {
5ba3f43e
A
485 boolean_t prev_in_interrupt_stack = FALSE;
486
487 if (!user_p) {
488 /*
489 * As a special case, sometimes we are backtracing out of an interrupt
490 * handler, and the stack jumps downward because of the memory allocation
491 * pattern during early boot due to KASLR.
492 */
493 int cpu;
494 int max_cpu = ml_get_max_cpu_number();
0a7de745
A
495
496 for (cpu = 0; cpu <= max_cpu; cpu++) {
5ba3f43e 497 cpu_data_t *target_cpu_datap;
0a7de745 498
5ba3f43e 499 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
0a7de745 500 if (target_cpu_datap == (cpu_data_t *)NULL) {
5ba3f43e 501 continue;
0a7de745
A
502 }
503
504 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
5ba3f43e
A
505 prev_in_interrupt_stack = TRUE;
506 break;
507 }
d9a64523
A
508
509#if defined(__arm__)
0a7de745 510 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
d9a64523
A
511 prev_in_interrupt_stack = TRUE;
512 break;
513 }
514#elif defined(__arm64__)
0a7de745 515 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
d9a64523
A
516 prev_in_interrupt_stack = TRUE;
517 break;
518 }
519#endif
5ba3f43e
A
520 }
521 }
522
523 if (!prev_in_interrupt_stack) {
524 /* Corrupt frame pointer? */
525 break;
526 }
527 }
528 /* Assume there's a saved link register, and read it */
529 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM32_LR_OFFSET, bt_vm_map, thread_trace_flags);
530
531 if (!kern_virt_addr) {
532 if (thread_trace_flags) {
533 *thread_trace_flags |= kThreadTruncatedBT;
534 }
535 break;
536 }
537
538 prevlr = *(uint32_t *)kern_virt_addr;
539 if (!user_p) {
540 /* This is safe since we will panic above on __arm64__ if !user_p */
541 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
542 }
543
544 prevfp = fp;
545
546 /*
547 * Next frame; read the fp value into short_fp first
548 * as it is 32-bit.
549 */
550 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
551
552 if (kern_virt_addr) {
553 short_fp = *(uint32_t *)kern_virt_addr;
554 fp = (vm_offset_t) short_fp;
555 } else {
556 fp = 0;
557 if (thread_trace_flags) {
558 *thread_trace_flags |= kThreadTruncatedBT;
559 }
560 }
5ba3f43e
A
561 }
562 /* Reset the target pmap */
563 machine_trace_thread_clear_validation_cache();
0a7de745 564 return (int)(((char *)tracebuf) - tracepos);
5ba3f43e
A
565}
566
567int
568machine_trace_thread64(thread_t thread,
0a7de745
A
569 char * tracepos,
570 char * tracebound,
571 int nframes,
572 boolean_t user_p,
573 boolean_t trace_fp,
574 uint32_t * thread_trace_flags,
575 uint64_t *sp_out)
5ba3f43e 576{
d9a64523 577#pragma unused(sp_out)
5ba3f43e
A
578#if defined(__arm__)
579#pragma unused(thread, tracepos, tracebound, nframes, user_p, trace_fp, thread_trace_flags)
580 return 0;
581#elif defined(__arm64__)
582
583 uint64_t * tracebuf = (uint64_t *)tracepos;
584 vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint64_t);
585
586 vm_offset_t stacklimit = 0;
587 vm_offset_t stacklimit_bottom = 0;
588 int framecount = 0;
589 vm_offset_t fp = 0;
590 vm_offset_t pc = 0;
591 vm_offset_t sp = 0;
592 vm_offset_t prevfp = 0;
593 uint64_t prevlr = 0;
594 struct arm_saved_state * state;
595 vm_offset_t kern_virt_addr = 0;
596 vm_map_t bt_vm_map = VM_MAP_NULL;
597
d9a64523
A
598 const boolean_t is_64bit_addr = thread_is_64bit_addr(thread);
599
5ba3f43e
A
600 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
601 if (!nframes) {
0a7de745 602 return 0;
5ba3f43e
A
603 }
604 framecount = 0;
605
606 if (user_p) {
607 /* Examine the user savearea */
608 state = thread->machine.upcb;
d9a64523
A
609 stacklimit = (is_64bit_addr) ? MACH_VM_MAX_ADDRESS : VM_MAX_ADDRESS;
610 stacklimit_bottom = (is_64bit_addr) ? MACH_VM_MIN_ADDRESS : VM_MIN_ADDRESS;
5ba3f43e
A
611
612 /* Fake up a stack frame for the PC */
613 *tracebuf++ = get_saved_state_pc(state);
614 if (trace_fp) {
615 *tracebuf++ = get_saved_state_sp(state);
616 }
617 framecount++;
618 bt_vm_map = thread->task->map;
619 } else {
620 /* kstackptr may not always be there, so recompute it */
621 state = &thread_get_kernel_state(thread)->machine.ss;
622 stacklimit = VM_MAX_KERNEL_ADDRESS;
623 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
624 bt_vm_map = kernel_map;
625 }
626
627 /* Get the frame pointer */
628 fp = get_saved_state_fp(state);
629
630 /* Fill in the current link register */
631 prevlr = get_saved_state_lr(state);
632 pc = get_saved_state_pc(state);
633 sp = get_saved_state_sp(state);
634
635 if (!user_p && !prevlr && !fp && !sp && !pc) {
636 return 0;
637 }
638
639 if (!user_p) {
640 prevlr = VM_KERNEL_UNSLIDE(prevlr);
641 }
642
643 for (; framecount < nframes; framecount++) {
5ba3f43e
A
644 *tracebuf++ = prevlr;
645 if (trace_fp) {
646 *tracebuf++ = fp;
647 }
648
649 /* Invalid frame */
650 if (!fp) {
651 break;
652 }
653 /*
654 * Unaligned frame; given that the stack register must always be
655 * 16-byte aligned, we are assured 8-byte alignment of the saved
656 * frame pointer and link register.
657 */
658 if (fp & 0x0000007) {
659 break;
660 }
661 /* Frame is out of range, maybe a user FP while doing kernel BT */
662 if (fp > stacklimit) {
663 break;
664 }
665 if (fp < stacklimit_bottom) {
666 break;
667 }
668 /* Stack grows downward */
669 if (fp < prevfp) {
670 boolean_t switched_stacks = FALSE;
671
672 if (!user_p) {
673 /*
674 * As a special case, sometimes we are backtracing out of an interrupt
675 * handler, and the stack jumps downward because of the memory allocation
676 * pattern during early boot due to KASLR.
677 */
678 int cpu;
679 int max_cpu = ml_get_max_cpu_number();
680
0a7de745 681 for (cpu = 0; cpu <= max_cpu; cpu++) {
5ba3f43e
A
682 cpu_data_t *target_cpu_datap;
683
684 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
0a7de745 685 if (target_cpu_datap == (cpu_data_t *)NULL) {
5ba3f43e 686 continue;
0a7de745 687 }
5ba3f43e 688
0a7de745 689 if (prevfp >= (target_cpu_datap->intstack_top - INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) {
5ba3f43e
A
690 switched_stacks = TRUE;
691 break;
692 }
d9a64523 693#if defined(__arm__)
0a7de745 694 if (prevfp >= (target_cpu_datap->fiqstack_top - FIQSTACK_SIZE) && prevfp < target_cpu_datap->fiqstack_top) {
d9a64523
A
695 switched_stacks = TRUE;
696 break;
697 }
698#elif defined(__arm64__)
0a7de745 699 if (prevfp >= (target_cpu_datap->excepstack_top - EXCEPSTACK_SIZE) && prevfp < target_cpu_datap->excepstack_top) {
d9a64523
A
700 switched_stacks = TRUE;
701 break;
702 }
703#endif
5ba3f43e
A
704 }
705
706 }
707
708 if (!switched_stacks) {
709 /* Corrupt frame pointer? */
710 break;
711 }
712 }
713
714 /* Assume there's a saved link register, and read it */
715 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM64_LR_OFFSET, bt_vm_map, thread_trace_flags);
716
717 if (!kern_virt_addr) {
718 if (thread_trace_flags) {
719 *thread_trace_flags |= kThreadTruncatedBT;
720 }
721 break;
722 }
723
724 prevlr = *(uint64_t *)kern_virt_addr;
725 if (!user_p) {
726 prevlr = VM_KERNEL_UNSLIDE(prevlr);
727 }
728
729 prevfp = fp;
730 /* Next frame */
731 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
732
733 if (kern_virt_addr) {
734 fp = *(uint64_t *)kern_virt_addr;
735 } else {
736 fp = 0;
737 if (thread_trace_flags) {
738 *thread_trace_flags |= kThreadTruncatedBT;
739 }
740 }
5ba3f43e
A
741 }
742 /* Reset the target pmap */
743 machine_trace_thread_clear_validation_cache();
0a7de745 744 return (int)(((char *)tracebuf) - tracepos);
5ba3f43e
A
745#else
746#error Unknown architecture.
747#endif
748}
749
750void
751kdp_ml_enter_debugger(void)
752{
0a7de745 753 __asm__ volatile (".long 0xe7ffdefe");
5ba3f43e 754}