]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/arm/kdp_machdep.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / arm / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/mach_types.h>
30 #include <mach/exception_types.h>
31 #include <arm/exception.h>
32 #include <arm/pmap.h>
33 #include <arm/proc_reg.h>
34 #include <arm/thread.h>
35 #include <arm/trap.h>
36 #include <arm/cpu_data_internal.h>
37 #include <kdp/kdp_internal.h>
38 #include <kern/debug.h>
39 #include <IOKit/IOPlatformExpert.h>
40 #include <kern/kalloc.h>
41 #include <libkern/OSAtomic.h>
42 #include <vm/vm_map.h>
43
44
45 #define KDP_TEST_HARNESS 0
46 #if KDP_TEST_HARNESS
47 #define dprintf(x) kprintf x
48 #else
49 #define dprintf(x) do {} while (0)
50 #endif
51
52 void halt_all_cpus(boolean_t);
53 void kdp_call(void);
54 int kdp_getc(void);
55 int machine_trace_thread(thread_t thread,
56 char * tracepos,
57 char * tracebound,
58 int nframes,
59 boolean_t user_p,
60 boolean_t trace_fp,
61 uint32_t * thread_trace_flags);
62 int machine_trace_thread64(thread_t thread,
63 char * tracepos,
64 char * tracebound,
65 int nframes,
66 boolean_t user_p,
67 boolean_t trace_fp,
68 uint32_t * thread_trace_flags);
69
70 void kdp_trap(unsigned int, struct arm_saved_state * saved_state);
71
72 extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
73 extern void machine_trace_thread_clear_validation_cache(void);
74 extern vm_map_t kernel_map;
75
76 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
77 void
78 kdp_exception(
79 unsigned char * pkt, int * len, unsigned short * remote_port, unsigned int exception, unsigned int code, unsigned int subcode)
80 {
81 struct {
82 kdp_exception_t pkt;
83 kdp_exc_info_t exc;
84 } aligned_pkt;
85
86 kdp_exception_t * rq = (kdp_exception_t *)&aligned_pkt;
87
88 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
89 rq->hdr.request = KDP_EXCEPTION;
90 rq->hdr.is_reply = 0;
91 rq->hdr.seq = kdp.exception_seq;
92 rq->hdr.key = 0;
93 rq->hdr.len = sizeof(*rq) + sizeof(kdp_exc_info_t);
94
95 rq->n_exc_info = 1;
96 rq->exc_info[0].cpu = 0;
97 rq->exc_info[0].exception = exception;
98 rq->exc_info[0].code = code;
99 rq->exc_info[0].subcode = subcode;
100
101 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
102
103 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
104
105 kdp.exception_ack_needed = TRUE;
106
107 *remote_port = kdp.exception_port;
108 *len = rq->hdr.len;
109 }
110
111 boolean_t
112 kdp_exception_ack(unsigned char * pkt, int len)
113 {
114 kdp_exception_ack_t aligned_pkt;
115 kdp_exception_ack_t * rq = (kdp_exception_ack_t *)&aligned_pkt;
116
117 if ((unsigned)len < sizeof(*rq))
118 return (FALSE);
119
120 bcopy((char *)pkt, (char *)rq, sizeof(*rq));
121
122 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
123 return (FALSE);
124
125 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
126
127 if (rq->hdr.seq == kdp.exception_seq) {
128 kdp.exception_ack_needed = FALSE;
129 kdp.exception_seq++;
130 }
131 return (TRUE);
132 }
133
134 static void
135 kdp_getintegerstate(char * out_state)
136 {
137 #if defined(__arm__)
138 struct arm_thread_state thread_state;
139 struct arm_saved_state *saved_state;
140
141 saved_state = kdp.saved_state;
142
143 bzero((char *) &thread_state, sizeof(struct arm_thread_state));
144
145 saved_state_to_thread_state32(saved_state, &thread_state);
146
147 bcopy((char *) &thread_state, (char *) out_state, sizeof(struct arm_thread_state));
148 #elif defined(__arm64__)
149 struct arm_thread_state64 thread_state64;
150 arm_saved_state_t *saved_state;
151
152 saved_state = kdp.saved_state;
153 assert(is_saved_state64(saved_state));
154
155 bzero((char *) &thread_state64, sizeof(struct arm_thread_state64));
156
157 saved_state_to_thread_state64(saved_state, &thread_state64);
158
159 bcopy((char *) &thread_state64, (char *) out_state, sizeof(struct arm_thread_state64));
160 #else
161 #error Unknown architecture.
162 #endif
163 }
164
165 kdp_error_t
166 kdp_machine_read_regs(__unused unsigned int cpu, unsigned int flavor, char * data, int * size)
167 {
168 switch (flavor) {
169 #if defined(__arm__)
170 case ARM_THREAD_STATE:
171 dprintf(("kdp_readregs THREAD_STATE\n"));
172 kdp_getintegerstate(data);
173 *size = ARM_THREAD_STATE_COUNT * sizeof(int);
174 return KDPERR_NO_ERROR;
175 #elif defined(__arm64__)
176 case ARM_THREAD_STATE64:
177 dprintf(("kdp_readregs THREAD_STATE64\n"));
178 kdp_getintegerstate(data);
179 *size = ARM_THREAD_STATE64_COUNT * sizeof(int);
180 return KDPERR_NO_ERROR;
181 #endif
182
183 case ARM_VFP_STATE:
184 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
185 bzero((char *) data, sizeof(struct arm_vfp_state));
186 *size = ARM_VFP_STATE_COUNT * sizeof(int);
187 return KDPERR_NO_ERROR;
188
189 default:
190 dprintf(("kdp_readregs bad flavor %d\n"));
191 return KDPERR_BADFLAVOR;
192 }
193 }
194
195 static void
196 kdp_setintegerstate(char * state_in)
197 {
198 #if defined(__arm__)
199 struct arm_thread_state thread_state;
200 struct arm_saved_state *saved_state;
201
202 bcopy((char *) state_in, (char *) &thread_state, sizeof(struct arm_thread_state));
203 saved_state = kdp.saved_state;
204
205 thread_state32_to_saved_state(&thread_state, saved_state);
206 #elif defined(__arm64__)
207 struct arm_thread_state64 thread_state64;
208 struct arm_saved_state *saved_state;
209
210 bcopy((char *) state_in, (char *) &thread_state64, sizeof(struct arm_thread_state64));
211 saved_state = kdp.saved_state;
212 assert(is_saved_state64(saved_state));
213
214 thread_state64_to_saved_state(&thread_state64, saved_state);
215 #else
216 #error Unknown architecture.
217 #endif
218 }
219
220 kdp_error_t
221 kdp_machine_write_regs(__unused unsigned int cpu, unsigned int flavor, char * data, __unused int * size)
222 {
223 switch (flavor) {
224 #if defined(__arm__)
225 case ARM_THREAD_STATE:
226 dprintf(("kdp_writeregs THREAD_STATE\n"));
227 kdp_setintegerstate(data);
228 return KDPERR_NO_ERROR;
229 #elif defined(__arm64__)
230 case ARM_THREAD_STATE64:
231 dprintf(("kdp_writeregs THREAD_STATE64\n"));
232 kdp_setintegerstate(data);
233 return KDPERR_NO_ERROR;
234 #endif
235
236 case ARM_VFP_STATE:
237 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
238 return KDPERR_NO_ERROR;
239
240 default:
241 dprintf(("kdp_writeregs bad flavor %d\n"));
242 return KDPERR_BADFLAVOR;
243 }
244 }
245
246 void
247 kdp_machine_hostinfo(kdp_hostinfo_t * hostinfo)
248 {
249 hostinfo->cpus_mask = 1;
250 hostinfo->cpu_type = slot_type(0);
251 hostinfo->cpu_subtype = slot_subtype(0);
252 }
253
254 __attribute__((noreturn))
255 void
256 kdp_panic(const char * msg)
257 {
258 printf("kdp panic: %s\n", msg);
259 while (1) {
260 };
261 }
262
263 int
264 kdp_intr_disbl(void)
265 {
266 return (splhigh());
267 }
268
269 void
270 kdp_intr_enbl(int s)
271 {
272 splx(s);
273 }
274
275 void
276 kdp_us_spin(int usec)
277 {
278 delay(usec / 100);
279 }
280
281 void
282 kdp_call(void)
283 {
284 Debugger("inline call to debugger(machine_startup)");
285 }
286
287 int
288 kdp_getc(void)
289 {
290 return (cnmaygetc());
291 }
292
293 void
294 kdp_machine_get_breakinsn(uint8_t * bytes, uint32_t * size)
295 {
296 *(uint32_t *)bytes = GDB_TRAP_INSTR1;
297 *size = sizeof(uint32_t);
298 }
299
300 void
301 kdp_sync_cache(void)
302 {
303 }
304
305 int
306 kdp_machine_ioport_read(kdp_readioport_req_t * rq, caddr_t data, uint16_t lcpu)
307 {
308 #pragma unused(rq, data, lcpu)
309 return 0;
310 }
311
312 int
313 kdp_machine_ioport_write(kdp_writeioport_req_t * rq, caddr_t data, uint16_t lcpu)
314 {
315 #pragma unused(rq, data, lcpu)
316 return 0;
317 }
318
319 int
320 kdp_machine_msr64_read(kdp_readmsr64_req_t *rq, caddr_t data, uint16_t lcpu)
321 {
322 #pragma unused(rq, data, lcpu)
323 return 0;
324 }
325
326 int
327 kdp_machine_msr64_write(kdp_writemsr64_req_t *rq, caddr_t data, uint16_t lcpu)
328 {
329 #pragma unused(rq, data, lcpu)
330 return 0;
331 }
332 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
333
334 void
335 kdp_trap(unsigned int exception, struct arm_saved_state * saved_state)
336 {
337 handle_debugger_trap(exception, 0, 0, saved_state);
338
339 #if defined(__arm__)
340 if (saved_state->cpsr & PSR_TF) {
341 unsigned short instr = *((unsigned short *)(saved_state->pc));
342 if ((instr == (GDB_TRAP_INSTR1 & 0xFFFF)) || (instr == (GDB_TRAP_INSTR2 & 0xFFFF)))
343 saved_state->pc += 2;
344 } else {
345 unsigned int instr = *((unsigned int *)(saved_state->pc));
346 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2))
347 saved_state->pc += 4;
348 }
349
350 #elif defined(__arm64__)
351 assert(is_saved_state64(saved_state));
352
353 uint32_t instr = *((uint32_t *)get_saved_state_pc(saved_state));
354
355 /*
356 * As long as we are using the arm32 trap encoding to handling
357 * traps to the debugger, we should identify both variants and
358 * increment for both of them.
359 */
360 if ((instr == GDB_TRAP_INSTR1) || (instr == GDB_TRAP_INSTR2))
361 set_saved_state_pc(saved_state, get_saved_state_pc(saved_state) + 4);
362 #else
363 #error Unknown architecture.
364 #endif
365 }
366
367 #define ARM32_LR_OFFSET 4
368 #define ARM64_LR_OFFSET 8
369
370 /*
371 * Since sizeof (struct thread_snapshot) % 4 == 2
372 * make sure the compiler does not try to use word-aligned
373 * access to this data, which can result in alignment faults
374 * that can't be emulated in KDP context.
375 */
376 typedef uint32_t uint32_align2_t __attribute__((aligned(2)));
377
378 int
379 machine_trace_thread(thread_t thread,
380 char * tracepos,
381 char * tracebound,
382 int nframes,
383 boolean_t user_p,
384 boolean_t trace_fp,
385 uint32_t * thread_trace_flags)
386 {
387 uint32_align2_t * tracebuf = (uint32_align2_t *)tracepos;
388
389 vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t);
390
391 vm_offset_t stacklimit = 0;
392 vm_offset_t stacklimit_bottom = 0;
393 int framecount = 0;
394 uint32_t short_fp = 0;
395 vm_offset_t fp = 0;
396 vm_offset_t pc, sp;
397 vm_offset_t prevfp = 0;
398 uint32_t prevlr = 0;
399 struct arm_saved_state * state;
400 vm_offset_t kern_virt_addr = 0;
401 vm_map_t bt_vm_map = VM_MAP_NULL;
402
403 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
404 if (!nframes) {
405 return (0);
406 }
407 framecount = 0;
408
409 if (user_p) {
410 /* Examine the user savearea */
411 state = get_user_regs(thread);
412 stacklimit = VM_MAX_ADDRESS;
413 stacklimit_bottom = VM_MIN_ADDRESS;
414
415 /* Fake up a stack frame for the PC */
416 *tracebuf++ = (uint32_t)get_saved_state_pc(state);
417 if (trace_fp) {
418 *tracebuf++ = (uint32_t)get_saved_state_sp(state);
419 }
420 framecount++;
421 bt_vm_map = thread->task->map;
422 } else {
423 #if defined(__arm64__)
424 panic("Attempted to trace kernel thread_t %p as a 32-bit context", thread);
425 return 0;
426 #elif defined(__arm__)
427 /* kstackptr may not always be there, so recompute it */
428 state = &thread_get_kernel_state(thread)->machine;
429
430 stacklimit = VM_MAX_KERNEL_ADDRESS;
431 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
432 bt_vm_map = kernel_map;
433 #else
434 #error Unknown architecture.
435 #endif
436 }
437
438 /* Get the frame pointer */
439 fp = get_saved_state_fp(state);
440
441 /* Fill in the current link register */
442 prevlr = (uint32_t)get_saved_state_lr(state);
443 pc = get_saved_state_pc(state);
444 sp = get_saved_state_sp(state);
445
446 if (!user_p && !prevlr && !fp && !sp && !pc) {
447 return 0;
448 }
449
450 if (!user_p) {
451 /* This is safe since we will panic above on __arm64__ if !user_p */
452 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
453 }
454
455 for (; framecount < nframes; framecount++) {
456
457 *tracebuf++ = prevlr;
458 if (trace_fp) {
459 *tracebuf++ = (uint32_t)fp;
460 }
461
462 /* Invalid frame */
463 if (!fp) {
464 break;
465 }
466 /* Unaligned frame */
467 if (fp & 0x0000003) {
468 break;
469 }
470 /* Frame is out of range, maybe a user FP while doing kernel BT */
471 if (fp > stacklimit) {
472 break;
473 }
474 if (fp < stacklimit_bottom) {
475 break;
476 }
477 /* Stack grows downward */
478 if (fp < prevfp) {
479
480 boolean_t prev_in_interrupt_stack = FALSE;
481
482 if (!user_p) {
483 /*
484 * As a special case, sometimes we are backtracing out of an interrupt
485 * handler, and the stack jumps downward because of the memory allocation
486 * pattern during early boot due to KASLR.
487 */
488 int cpu;
489 int max_cpu = ml_get_max_cpu_number();
490
491 for (cpu=0; cpu <= max_cpu; cpu++) {
492 cpu_data_t *target_cpu_datap;
493
494 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
495 if(target_cpu_datap == (cpu_data_t *)NULL)
496 continue;
497
498 if ((prevfp >= (target_cpu_datap->intstack_top-INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) ||
499 (prevfp >= (target_cpu_datap->fiqstack_top-PAGE_SIZE) && prevfp < target_cpu_datap->fiqstack_top)) {
500 prev_in_interrupt_stack = TRUE;
501 break;
502 }
503 }
504 }
505
506 if (!prev_in_interrupt_stack) {
507 /* Corrupt frame pointer? */
508 break;
509 }
510 }
511 /* Assume there's a saved link register, and read it */
512 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM32_LR_OFFSET, bt_vm_map, thread_trace_flags);
513
514 if (!kern_virt_addr) {
515 if (thread_trace_flags) {
516 *thread_trace_flags |= kThreadTruncatedBT;
517 }
518 break;
519 }
520
521 prevlr = *(uint32_t *)kern_virt_addr;
522 if (!user_p) {
523 /* This is safe since we will panic above on __arm64__ if !user_p */
524 prevlr = (uint32_t)VM_KERNEL_UNSLIDE(prevlr);
525 }
526
527 prevfp = fp;
528
529 /*
530 * Next frame; read the fp value into short_fp first
531 * as it is 32-bit.
532 */
533 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
534
535 if (kern_virt_addr) {
536 short_fp = *(uint32_t *)kern_virt_addr;
537 fp = (vm_offset_t) short_fp;
538 } else {
539 fp = 0;
540 if (thread_trace_flags) {
541 *thread_trace_flags |= kThreadTruncatedBT;
542 }
543 }
544
545 }
546 /* Reset the target pmap */
547 machine_trace_thread_clear_validation_cache();
548 return ((int)(((char *)tracebuf) - tracepos));
549 }
550
551 int
552 machine_trace_thread64(thread_t thread,
553 char * tracepos,
554 char * tracebound,
555 int nframes,
556 boolean_t user_p,
557 boolean_t trace_fp,
558 uint32_t * thread_trace_flags)
559 {
560 #if defined(__arm__)
561 #pragma unused(thread, tracepos, tracebound, nframes, user_p, trace_fp, thread_trace_flags)
562 return 0;
563 #elif defined(__arm64__)
564
565 uint64_t * tracebuf = (uint64_t *)tracepos;
566 vm_size_t framesize = (trace_fp ? 2 : 1) * sizeof(uint64_t);
567
568 vm_offset_t stacklimit = 0;
569 vm_offset_t stacklimit_bottom = 0;
570 int framecount = 0;
571 vm_offset_t fp = 0;
572 vm_offset_t pc = 0;
573 vm_offset_t sp = 0;
574 vm_offset_t prevfp = 0;
575 uint64_t prevlr = 0;
576 struct arm_saved_state * state;
577 vm_offset_t kern_virt_addr = 0;
578 vm_map_t bt_vm_map = VM_MAP_NULL;
579
580 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
581 if (!nframes) {
582 return (0);
583 }
584 framecount = 0;
585
586 if (user_p) {
587 /* Examine the user savearea */
588 state = thread->machine.upcb;
589 stacklimit = MACH_VM_MAX_ADDRESS;
590 stacklimit_bottom = MACH_VM_MIN_ADDRESS;
591
592 /* Fake up a stack frame for the PC */
593 *tracebuf++ = get_saved_state_pc(state);
594 if (trace_fp) {
595 *tracebuf++ = get_saved_state_sp(state);
596 }
597 framecount++;
598 bt_vm_map = thread->task->map;
599 } else {
600 /* kstackptr may not always be there, so recompute it */
601 state = &thread_get_kernel_state(thread)->machine.ss;
602 stacklimit = VM_MAX_KERNEL_ADDRESS;
603 stacklimit_bottom = VM_MIN_KERNEL_ADDRESS;
604 bt_vm_map = kernel_map;
605 }
606
607 /* Get the frame pointer */
608 fp = get_saved_state_fp(state);
609
610 /* Fill in the current link register */
611 prevlr = get_saved_state_lr(state);
612 pc = get_saved_state_pc(state);
613 sp = get_saved_state_sp(state);
614
615 if (!user_p && !prevlr && !fp && !sp && !pc) {
616 return 0;
617 }
618
619 if (!user_p) {
620 prevlr = VM_KERNEL_UNSLIDE(prevlr);
621 }
622
623 for (; framecount < nframes; framecount++) {
624
625 *tracebuf++ = prevlr;
626 if (trace_fp) {
627 *tracebuf++ = fp;
628 }
629
630 /* Invalid frame */
631 if (!fp) {
632 break;
633 }
634 /*
635 * Unaligned frame; given that the stack register must always be
636 * 16-byte aligned, we are assured 8-byte alignment of the saved
637 * frame pointer and link register.
638 */
639 if (fp & 0x0000007) {
640 break;
641 }
642 /* Frame is out of range, maybe a user FP while doing kernel BT */
643 if (fp > stacklimit) {
644 break;
645 }
646 if (fp < stacklimit_bottom) {
647 break;
648 }
649 /* Stack grows downward */
650 if (fp < prevfp) {
651 boolean_t switched_stacks = FALSE;
652
653 if (!user_p) {
654 /*
655 * As a special case, sometimes we are backtracing out of an interrupt
656 * handler, and the stack jumps downward because of the memory allocation
657 * pattern during early boot due to KASLR.
658 */
659 int cpu;
660 int max_cpu = ml_get_max_cpu_number();
661
662 for (cpu=0; cpu <= max_cpu; cpu++) {
663 cpu_data_t *target_cpu_datap;
664
665 target_cpu_datap = (cpu_data_t *)CpuDataEntries[cpu].cpu_data_vaddr;
666 if(target_cpu_datap == (cpu_data_t *)NULL)
667 continue;
668
669 if ((prevfp >= (target_cpu_datap->intstack_top-INTSTACK_SIZE) && prevfp < target_cpu_datap->intstack_top) ||
670 (prevfp >= (target_cpu_datap->fiqstack_top-PAGE_SIZE) && prevfp < target_cpu_datap->fiqstack_top)) {
671 switched_stacks = TRUE;
672 break;
673 }
674
675 }
676
677 }
678
679 if (!switched_stacks) {
680 /* Corrupt frame pointer? */
681 break;
682 }
683 }
684
685 /* Assume there's a saved link register, and read it */
686 kern_virt_addr = machine_trace_thread_get_kva(fp + ARM64_LR_OFFSET, bt_vm_map, thread_trace_flags);
687
688 if (!kern_virt_addr) {
689 if (thread_trace_flags) {
690 *thread_trace_flags |= kThreadTruncatedBT;
691 }
692 break;
693 }
694
695 prevlr = *(uint64_t *)kern_virt_addr;
696 if (!user_p) {
697 prevlr = VM_KERNEL_UNSLIDE(prevlr);
698 }
699
700 prevfp = fp;
701 /* Next frame */
702 kern_virt_addr = machine_trace_thread_get_kva(fp, bt_vm_map, thread_trace_flags);
703
704 if (kern_virt_addr) {
705 fp = *(uint64_t *)kern_virt_addr;
706 } else {
707 fp = 0;
708 if (thread_trace_flags) {
709 *thread_trace_flags |= kThreadTruncatedBT;
710 }
711 }
712
713 }
714 /* Reset the target pmap */
715 machine_trace_thread_clear_validation_cache();
716 return ((int)(((char *)tracebuf) - tracepos));
717 #else
718 #error Unknown architecture.
719 #endif
720 }
721
722 void
723 kdp_ml_enter_debugger(void)
724 {
725 __asm__ volatile(".long 0xe7ffdefe");
726 }
727