]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach_kdp.h> | |
30 | #include <mach/mach_types.h> | |
31 | #include <mach/machine.h> | |
32 | #include <mach/exception_types.h> | |
33 | #include <kern/cpu_data.h> | |
34 | #include <i386/trap.h> | |
35 | #include <i386/mp.h> | |
36 | #include <kdp/kdp_internal.h> | |
37 | #include <kdp/kdp_callout.h> | |
38 | #include <mach-o/loader.h> | |
39 | #include <mach-o/nlist.h> | |
40 | #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */ | |
41 | #include <kern/machine.h> /* for halt_all_cpus */ | |
42 | #include <libkern/OSAtomic.h> | |
43 | ||
44 | #include <kern/thread.h> | |
45 | #include <i386/thread.h> | |
46 | #include <vm/vm_map.h> | |
47 | #include <i386/pmap.h> | |
48 | #include <kern/kalloc.h> | |
49 | ||
50 | #define KDP_TEST_HARNESS 0 | |
51 | #if KDP_TEST_HARNESS | |
52 | #define dprintf(x) printf x | |
53 | #else | |
54 | #define dprintf(x) | |
55 | #endif | |
56 | ||
57 | extern cpu_type_t cpuid_cputype(void); | |
58 | extern cpu_subtype_t cpuid_cpusubtype(void); | |
59 | ||
60 | extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags); | |
61 | extern void machine_trace_thread_clear_validation_cache(void); | |
62 | extern vm_map_t kernel_map; | |
63 | ||
64 | void print_saved_state(void *); | |
65 | void kdp_call(void); | |
66 | int kdp_getc(void); | |
67 | boolean_t kdp_call_kdb(void); | |
68 | void kdp_getstate(x86_thread_state64_t *); | |
69 | void kdp_setstate(x86_thread_state64_t *); | |
70 | void kdp_print_phys(int); | |
71 | unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len); | |
72 | ||
73 | static void kdp_callouts(kdp_event_t event); | |
74 | ||
75 | void | |
76 | kdp_exception( | |
77 | unsigned char *pkt, | |
78 | int *len, | |
79 | unsigned short *remote_port, | |
80 | unsigned int exception, | |
81 | unsigned int code, | |
82 | unsigned int subcode | |
83 | ) | |
84 | { | |
85 | kdp_exception_t *rq = (kdp_exception_t *)pkt; | |
86 | ||
87 | rq->hdr.request = KDP_EXCEPTION; | |
88 | rq->hdr.is_reply = 0; | |
89 | rq->hdr.seq = kdp.exception_seq; | |
90 | rq->hdr.key = 0; | |
91 | rq->hdr.len = sizeof (*rq); | |
92 | ||
93 | rq->n_exc_info = 1; | |
94 | rq->exc_info[0].cpu = 0; | |
95 | rq->exc_info[0].exception = exception; | |
96 | rq->exc_info[0].code = code; | |
97 | rq->exc_info[0].subcode = subcode; | |
98 | ||
99 | rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t); | |
100 | ||
101 | bcopy((char *)rq, (char *)pkt, rq->hdr.len); | |
102 | ||
103 | kdp.exception_ack_needed = TRUE; | |
104 | ||
105 | *remote_port = kdp.exception_port; | |
106 | *len = rq->hdr.len; | |
107 | } | |
108 | ||
109 | boolean_t | |
110 | kdp_exception_ack( | |
111 | unsigned char *pkt, | |
112 | int len | |
113 | ) | |
114 | { | |
115 | kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt; | |
116 | ||
117 | if (((unsigned int) len) < sizeof (*rq)) | |
118 | return(FALSE); | |
119 | ||
120 | if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) | |
121 | return(FALSE); | |
122 | ||
123 | dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq)); | |
124 | ||
125 | if (rq->hdr.seq == kdp.exception_seq) { | |
126 | kdp.exception_ack_needed = FALSE; | |
127 | kdp.exception_seq++; | |
128 | } | |
129 | return(TRUE); | |
130 | } | |
131 | ||
132 | void | |
133 | kdp_getstate( | |
134 | x86_thread_state64_t *state | |
135 | ) | |
136 | { | |
137 | x86_saved_state64_t *saved_state; | |
138 | ||
139 | saved_state = (x86_saved_state64_t *)kdp.saved_state; | |
140 | ||
141 | state->rax = saved_state->rax; | |
142 | state->rbx = saved_state->rbx; | |
143 | state->rcx = saved_state->rcx; | |
144 | state->rdx = saved_state->rdx; | |
145 | state->rdi = saved_state->rdi; | |
146 | state->rsi = saved_state->rsi; | |
147 | state->rbp = saved_state->rbp; | |
148 | ||
149 | state->r8 = saved_state->r8; | |
150 | state->r9 = saved_state->r9; | |
151 | state->r10 = saved_state->r10; | |
152 | state->r11 = saved_state->r11; | |
153 | state->r12 = saved_state->r12; | |
154 | state->r13 = saved_state->r13; | |
155 | state->r14 = saved_state->r14; | |
156 | state->r15 = saved_state->r15; | |
157 | ||
158 | state->rsp = saved_state->isf.rsp; | |
159 | state->rflags = saved_state->isf.rflags; | |
160 | state->rip = saved_state->isf.rip; | |
161 | ||
162 | state->cs = saved_state->isf.cs; | |
163 | state->fs = saved_state->fs; | |
164 | state->gs = saved_state->gs; | |
165 | } | |
166 | ||
167 | ||
168 | void | |
169 | kdp_setstate( | |
170 | x86_thread_state64_t *state | |
171 | ) | |
172 | { | |
173 | x86_saved_state64_t *saved_state; | |
174 | ||
175 | saved_state = (x86_saved_state64_t *)kdp.saved_state; | |
176 | saved_state->rax = state->rax; | |
177 | saved_state->rbx = state->rbx; | |
178 | saved_state->rcx = state->rcx; | |
179 | saved_state->rdx = state->rdx; | |
180 | saved_state->rdi = state->rdi; | |
181 | saved_state->rsi = state->rsi; | |
182 | saved_state->rbp = state->rbp; | |
183 | saved_state->r8 = state->r8; | |
184 | saved_state->r9 = state->r9; | |
185 | saved_state->r10 = state->r10; | |
186 | saved_state->r11 = state->r11; | |
187 | saved_state->r12 = state->r12; | |
188 | saved_state->r13 = state->r13; | |
189 | saved_state->r14 = state->r14; | |
190 | saved_state->r15 = state->r15; | |
191 | ||
192 | saved_state->isf.rflags = state->rflags; | |
193 | saved_state->isf.rsp = state->rsp; | |
194 | saved_state->isf.rip = state->rip; | |
195 | ||
196 | saved_state->fs = (uint32_t)state->fs; | |
197 | saved_state->gs = (uint32_t)state->gs; | |
198 | } | |
199 | ||
200 | ||
201 | kdp_error_t | |
202 | kdp_machine_read_regs( | |
203 | __unused unsigned int cpu, | |
204 | unsigned int flavor, | |
205 | char *data, | |
206 | int *size | |
207 | ) | |
208 | { | |
209 | static x86_float_state64_t null_fpstate; | |
210 | ||
211 | switch (flavor) { | |
212 | ||
213 | case x86_THREAD_STATE64: | |
214 | dprintf(("kdp_readregs THREAD_STATE64\n")); | |
215 | kdp_getstate((x86_thread_state64_t *)data); | |
216 | *size = sizeof (x86_thread_state64_t); | |
217 | return KDPERR_NO_ERROR; | |
218 | ||
219 | case x86_FLOAT_STATE64: | |
220 | dprintf(("kdp_readregs THREAD_FPSTATE64\n")); | |
221 | *(x86_float_state64_t *)data = null_fpstate; | |
222 | *size = sizeof (x86_float_state64_t); | |
223 | return KDPERR_NO_ERROR; | |
224 | ||
225 | default: | |
226 | dprintf(("kdp_readregs bad flavor %d\n", flavor)); | |
227 | *size = 0; | |
228 | return KDPERR_BADFLAVOR; | |
229 | } | |
230 | } | |
231 | ||
232 | kdp_error_t | |
233 | kdp_machine_write_regs( | |
234 | __unused unsigned int cpu, | |
235 | unsigned int flavor, | |
236 | char *data, | |
237 | __unused int *size | |
238 | ) | |
239 | { | |
240 | switch (flavor) { | |
241 | ||
242 | case x86_THREAD_STATE64: | |
243 | dprintf(("kdp_writeregs THREAD_STATE64\n")); | |
244 | kdp_setstate((x86_thread_state64_t *)data); | |
245 | return KDPERR_NO_ERROR; | |
246 | ||
247 | case x86_FLOAT_STATE64: | |
248 | dprintf(("kdp_writeregs THREAD_FPSTATE64\n")); | |
249 | return KDPERR_NO_ERROR; | |
250 | ||
251 | default: | |
252 | dprintf(("kdp_writeregs bad flavor %d\n", flavor)); | |
253 | return KDPERR_BADFLAVOR; | |
254 | } | |
255 | } | |
256 | ||
257 | ||
258 | ||
259 | void | |
260 | kdp_machine_hostinfo( | |
261 | kdp_hostinfo_t *hostinfo | |
262 | ) | |
263 | { | |
264 | int i; | |
265 | ||
266 | hostinfo->cpus_mask = 0; | |
267 | ||
268 | for (i = 0; i < machine_info.max_cpus; i++) { | |
269 | if (cpu_data_ptr[i] == NULL) | |
270 | continue; | |
271 | ||
272 | hostinfo->cpus_mask |= (1 << i); | |
273 | } | |
274 | ||
275 | hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64; | |
276 | hostinfo->cpu_subtype = cpuid_cpusubtype(); | |
277 | } | |
278 | ||
279 | void | |
280 | kdp_panic( | |
281 | const char *msg | |
282 | ) | |
283 | { | |
284 | kprintf("kdp panic: %s\n", msg); | |
285 | __asm__ volatile("hlt"); | |
286 | } | |
287 | ||
288 | ||
289 | void | |
290 | kdp_machine_reboot(void) | |
291 | { | |
292 | printf("Attempting system restart..."); | |
293 | /* Call the platform specific restart*/ | |
294 | if (PE_halt_restart) | |
295 | (*PE_halt_restart)(kPERestartCPU); | |
296 | /* If we do reach this, give up */ | |
297 | halt_all_cpus(TRUE); | |
298 | } | |
299 | ||
300 | int | |
301 | kdp_intr_disbl(void) | |
302 | { | |
303 | return splhigh(); | |
304 | } | |
305 | ||
306 | void | |
307 | kdp_intr_enbl(int s) | |
308 | { | |
309 | splx(s); | |
310 | } | |
311 | ||
312 | int | |
313 | kdp_getc(void) | |
314 | { | |
315 | return cnmaygetc(); | |
316 | } | |
317 | ||
318 | void | |
319 | kdp_us_spin(int usec) | |
320 | { | |
321 | delay(usec/100); | |
322 | } | |
323 | ||
324 | void print_saved_state(void *state) | |
325 | { | |
326 | x86_saved_state64_t *saved_state; | |
327 | ||
328 | saved_state = state; | |
329 | ||
330 | kprintf("pc = 0x%llx\n", saved_state->isf.rip); | |
331 | kprintf("cr2= 0x%llx\n", saved_state->cr2); | |
332 | kprintf("rp = TODO FIXME\n"); | |
333 | kprintf("sp = %p\n", saved_state); | |
334 | ||
335 | } | |
336 | ||
337 | void | |
338 | kdp_sync_cache(void) | |
339 | { | |
340 | return; /* No op here. */ | |
341 | } | |
342 | ||
343 | void | |
344 | kdp_call(void) | |
345 | { | |
346 | __asm__ volatile ("int $3"); /* Let the processor do the work */ | |
347 | } | |
348 | ||
349 | ||
350 | typedef struct _cframe_t { | |
351 | struct _cframe_t *prev; | |
352 | unsigned caller; | |
353 | unsigned args[0]; | |
354 | } cframe_t; | |
355 | ||
356 | extern pt_entry_t *DMAP2; | |
357 | extern caddr_t DADDR2; | |
358 | ||
359 | void | |
360 | kdp_print_phys(int src) | |
361 | { | |
362 | unsigned int *iptr; | |
363 | int i; | |
364 | ||
365 | *(int *) DMAP2 = 0x63 | (src & 0xfffff000); | |
366 | invlpg((uintptr_t) DADDR2); | |
367 | iptr = (unsigned int *) DADDR2; | |
368 | for (i = 0; i < 100; i++) { | |
369 | kprintf("0x%x ", *iptr++); | |
370 | if ((i % 8) == 0) | |
371 | kprintf("\n"); | |
372 | } | |
373 | kprintf("\n"); | |
374 | *(int *) DMAP2 = 0; | |
375 | ||
376 | } | |
377 | ||
378 | boolean_t | |
379 | kdp_i386_trap( | |
380 | unsigned int trapno, | |
381 | x86_saved_state64_t *saved_state, | |
382 | kern_return_t result, | |
383 | vm_offset_t va | |
384 | ) | |
385 | { | |
386 | unsigned int exception, subcode = 0, code; | |
387 | ||
388 | if (trapno != T_INT3 && trapno != T_DEBUG) { | |
389 | kprintf("Debugger: Unexpected kernel trap number: " | |
390 | "0x%x, RIP: 0x%llx, CR2: 0x%llx\n", | |
391 | trapno, saved_state->isf.rip, saved_state->cr2); | |
392 | if (!kdp.is_conn) | |
393 | return FALSE; | |
394 | } | |
395 | ||
396 | mp_kdp_enter(); | |
397 | kdp_callouts(KDP_EVENT_ENTER); | |
398 | ||
399 | if (saved_state->isf.rflags & EFL_TF) { | |
400 | enable_preemption_no_check(); | |
401 | } | |
402 | ||
403 | switch (trapno) { | |
404 | ||
405 | case T_DIVIDE_ERROR: | |
406 | exception = EXC_ARITHMETIC; | |
407 | code = EXC_I386_DIVERR; | |
408 | break; | |
409 | ||
410 | case T_OVERFLOW: | |
411 | exception = EXC_SOFTWARE; | |
412 | code = EXC_I386_INTOFLT; | |
413 | break; | |
414 | ||
415 | case T_OUT_OF_BOUNDS: | |
416 | exception = EXC_ARITHMETIC; | |
417 | code = EXC_I386_BOUNDFLT; | |
418 | break; | |
419 | ||
420 | case T_INVALID_OPCODE: | |
421 | exception = EXC_BAD_INSTRUCTION; | |
422 | code = EXC_I386_INVOPFLT; | |
423 | break; | |
424 | ||
425 | case T_SEGMENT_NOT_PRESENT: | |
426 | exception = EXC_BAD_INSTRUCTION; | |
427 | code = EXC_I386_SEGNPFLT; | |
428 | subcode = (unsigned int)saved_state->isf.err; | |
429 | break; | |
430 | ||
431 | case T_STACK_FAULT: | |
432 | exception = EXC_BAD_INSTRUCTION; | |
433 | code = EXC_I386_STKFLT; | |
434 | subcode = (unsigned int)saved_state->isf.err; | |
435 | break; | |
436 | ||
437 | case T_GENERAL_PROTECTION: | |
438 | exception = EXC_BAD_INSTRUCTION; | |
439 | code = EXC_I386_GPFLT; | |
440 | subcode = (unsigned int)saved_state->isf.err; | |
441 | break; | |
442 | ||
443 | case T_PAGE_FAULT: | |
444 | exception = EXC_BAD_ACCESS; | |
445 | code = result; | |
446 | subcode = (unsigned int)va; | |
447 | break; | |
448 | ||
449 | case T_WATCHPOINT: | |
450 | exception = EXC_SOFTWARE; | |
451 | code = EXC_I386_ALIGNFLT; | |
452 | break; | |
453 | ||
454 | case T_DEBUG: | |
455 | case T_INT3: | |
456 | exception = EXC_BREAKPOINT; | |
457 | code = EXC_I386_BPTFLT; | |
458 | break; | |
459 | ||
460 | default: | |
461 | exception = EXC_BAD_INSTRUCTION; | |
462 | code = trapno; | |
463 | break; | |
464 | } | |
465 | ||
466 | if (current_cpu_datap()->cpu_fatal_trap_state) { | |
467 | current_cpu_datap()->cpu_post_fatal_trap_state = saved_state; | |
468 | saved_state = current_cpu_datap()->cpu_fatal_trap_state; | |
469 | } | |
470 | ||
471 | if (debugger_callback) { | |
472 | unsigned int initial_not_in_kdp = not_in_kdp; | |
473 | not_in_kdp = 0; | |
474 | debugger_callback->error = debugger_callback->callback(debugger_callback->callback_context); | |
475 | not_in_kdp = initial_not_in_kdp; | |
476 | } else { | |
477 | kdp_raise_exception(exception, code, subcode, saved_state); | |
478 | } | |
479 | ||
480 | /* If the instruction single step bit is set, disable kernel preemption | |
481 | */ | |
482 | if (saved_state->isf.rflags & EFL_TF) { | |
483 | disable_preemption(); | |
484 | } | |
485 | ||
486 | kdp_callouts(KDP_EVENT_EXIT); | |
487 | mp_kdp_exit(); | |
488 | ||
489 | return TRUE; | |
490 | } | |
491 | ||
492 | boolean_t | |
493 | kdp_call_kdb( | |
494 | void) | |
495 | { | |
496 | return(FALSE); | |
497 | } | |
498 | ||
499 | void | |
500 | kdp_machine_get_breakinsn( | |
501 | uint8_t *bytes, | |
502 | uint32_t *size | |
503 | ) | |
504 | { | |
505 | bytes[0] = 0xcc; | |
506 | *size = 1; | |
507 | } | |
508 | ||
509 | #define RETURN_OFFSET 4 | |
510 | ||
511 | int | |
512 | machine_trace_thread(thread_t thread, | |
513 | char * tracepos, | |
514 | char * tracebound, | |
515 | int nframes, | |
516 | boolean_t user_p, | |
517 | boolean_t trace_fp, | |
518 | uint32_t * thread_trace_flags) | |
519 | { | |
520 | uint32_t * tracebuf = (uint32_t *)tracepos; | |
521 | uint32_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t); | |
522 | ||
523 | uint32_t fence = 0; | |
524 | uint32_t stackptr = 0; | |
525 | uint32_t stacklimit = 0xfc000000; | |
526 | int framecount = 0; | |
527 | uint32_t prev_eip = 0; | |
528 | uint32_t prevsp = 0; | |
529 | vm_offset_t kern_virt_addr = 0; | |
530 | vm_map_t bt_vm_map = VM_MAP_NULL; | |
531 | ||
532 | nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; | |
533 | ||
534 | if (thread->machine.iss == NULL) { | |
535 | // no register states to backtrace, probably thread is terminating | |
536 | return 0; | |
537 | } | |
538 | ||
539 | if (user_p) { | |
540 | x86_saved_state32_t *iss32; | |
541 | ||
542 | iss32 = USER_REGS32(thread); | |
543 | prev_eip = iss32->eip; | |
544 | stackptr = iss32->ebp; | |
545 | ||
546 | stacklimit = 0xffffffff; | |
547 | bt_vm_map = thread->task->map; | |
548 | } | |
549 | else | |
550 | panic("32-bit trace attempted on 64-bit kernel"); | |
551 | ||
552 | for (framecount = 0; framecount < nframes; framecount++) { | |
553 | ||
554 | *tracebuf++ = prev_eip; | |
555 | if (trace_fp) { | |
556 | *tracebuf++ = stackptr; | |
557 | } | |
558 | ||
559 | /* Invalid frame, or hit fence */ | |
560 | if (!stackptr || (stackptr == fence)) { | |
561 | break; | |
562 | } | |
563 | ||
564 | /* Unaligned frame */ | |
565 | if (stackptr & 0x0000003) { | |
566 | break; | |
567 | } | |
568 | ||
569 | if (stackptr <= prevsp) { | |
570 | break; | |
571 | } | |
572 | ||
573 | if (stackptr > stacklimit) { | |
574 | break; | |
575 | } | |
576 | ||
577 | kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET, bt_vm_map, thread_trace_flags); | |
578 | ||
579 | if (!kern_virt_addr) { | |
580 | if (thread_trace_flags) { | |
581 | *thread_trace_flags |= kThreadTruncatedBT; | |
582 | } | |
583 | break; | |
584 | } | |
585 | ||
586 | prev_eip = *(uint32_t *)kern_virt_addr; | |
587 | ||
588 | prevsp = stackptr; | |
589 | ||
590 | kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags); | |
591 | ||
592 | if (kern_virt_addr) { | |
593 | stackptr = *(uint32_t *)kern_virt_addr; | |
594 | } else { | |
595 | stackptr = 0; | |
596 | if (thread_trace_flags) { | |
597 | *thread_trace_flags |= kThreadTruncatedBT; | |
598 | } | |
599 | } | |
600 | } | |
601 | ||
602 | machine_trace_thread_clear_validation_cache(); | |
603 | ||
604 | return (uint32_t) (((char *) tracebuf) - tracepos); | |
605 | } | |
606 | ||
607 | ||
608 | #define RETURN_OFFSET64 8 | |
609 | /* Routine to encapsulate the 64-bit address read hack*/ | |
610 | unsigned | |
611 | machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len) | |
612 | { | |
613 | return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len); | |
614 | } | |
615 | ||
616 | int | |
617 | machine_trace_thread64(thread_t thread, | |
618 | char * tracepos, | |
619 | char * tracebound, | |
620 | int nframes, | |
621 | boolean_t user_p, | |
622 | boolean_t trace_fp, | |
623 | uint32_t * thread_trace_flags) | |
624 | { | |
625 | uint64_t * tracebuf = (uint64_t *)tracepos; | |
626 | unsigned framesize = (trace_fp ? 2 : 1) * sizeof(addr64_t); | |
627 | ||
628 | uint32_t fence = 0; | |
629 | addr64_t stackptr = 0; | |
630 | int framecount = 0; | |
631 | addr64_t prev_rip = 0; | |
632 | addr64_t prevsp = 0; | |
633 | vm_offset_t kern_virt_addr = 0; | |
634 | vm_map_t bt_vm_map = VM_MAP_NULL; | |
635 | ||
636 | if (thread->machine.iss == NULL) { | |
637 | // no register states to backtrace, probably thread is terminating | |
638 | return 0; | |
639 | } | |
640 | ||
641 | nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0; | |
642 | ||
643 | if (user_p) { | |
644 | x86_saved_state64_t *iss64; | |
645 | iss64 = USER_REGS64(thread); | |
646 | prev_rip = iss64->isf.rip; | |
647 | stackptr = iss64->rbp; | |
648 | bt_vm_map = thread->task->map; | |
649 | } | |
650 | else { | |
651 | stackptr = STACK_IKS(thread->kernel_stack)->k_rbp; | |
652 | prev_rip = STACK_IKS(thread->kernel_stack)->k_rip; | |
653 | prev_rip = VM_KERNEL_UNSLIDE(prev_rip); | |
654 | bt_vm_map = kernel_map; | |
655 | } | |
656 | ||
657 | for (framecount = 0; framecount < nframes; framecount++) { | |
658 | ||
659 | *tracebuf++ = prev_rip; | |
660 | if (trace_fp) { | |
661 | *tracebuf++ = stackptr; | |
662 | } | |
663 | ||
664 | if (!stackptr || (stackptr == fence)) { | |
665 | break; | |
666 | } | |
667 | if (stackptr & 0x0000007) { | |
668 | break; | |
669 | } | |
670 | if (stackptr <= prevsp) { | |
671 | break; | |
672 | } | |
673 | ||
674 | kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64, bt_vm_map, thread_trace_flags); | |
675 | if (!kern_virt_addr) { | |
676 | if (thread_trace_flags) { | |
677 | *thread_trace_flags |= kThreadTruncatedBT; | |
678 | } | |
679 | break; | |
680 | } | |
681 | ||
682 | prev_rip = *(uint64_t *)kern_virt_addr; | |
683 | if (!user_p) { | |
684 | prev_rip = VM_KERNEL_UNSLIDE(prev_rip); | |
685 | } | |
686 | ||
687 | prevsp = stackptr; | |
688 | ||
689 | kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags); | |
690 | ||
691 | if (kern_virt_addr) { | |
692 | stackptr = *(uint64_t *)kern_virt_addr; | |
693 | } else { | |
694 | stackptr = 0; | |
695 | if (thread_trace_flags) { | |
696 | *thread_trace_flags |= kThreadTruncatedBT; | |
697 | } | |
698 | } | |
699 | } | |
700 | ||
701 | machine_trace_thread_clear_validation_cache(); | |
702 | ||
703 | return (uint32_t) (((char *) tracebuf) - tracepos); | |
704 | } | |
705 | ||
706 | static struct kdp_callout { | |
707 | struct kdp_callout *callout_next; | |
708 | kdp_callout_fn_t callout_fn; | |
709 | void *callout_arg; | |
710 | } *kdp_callout_list = NULL; | |
711 | ||
712 | ||
713 | /* | |
714 | * Called from kernel context to register a kdp event callout. | |
715 | */ | |
716 | void | |
717 | kdp_register_callout( | |
718 | kdp_callout_fn_t fn, | |
719 | void *arg) | |
720 | { | |
721 | struct kdp_callout *kcp; | |
722 | struct kdp_callout *list_head; | |
723 | ||
724 | kcp = kalloc(sizeof(*kcp)); | |
725 | if (kcp == NULL) | |
726 | panic("kdp_register_callout() kalloc failed"); | |
727 | ||
728 | kcp->callout_fn = fn; | |
729 | kcp->callout_arg = arg; | |
730 | ||
731 | /* Lock-less list insertion using compare and exchange. */ | |
732 | do { | |
733 | list_head = kdp_callout_list; | |
734 | kcp->callout_next = list_head; | |
735 | } while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list)); | |
736 | } | |
737 | ||
738 | /* | |
739 | * Called at exception/panic time when extering or exiting kdp. | |
740 | * We are single-threaded at this time and so we don't use locks. | |
741 | */ | |
742 | static void | |
743 | kdp_callouts(kdp_event_t event) | |
744 | { | |
745 | struct kdp_callout *kcp = kdp_callout_list; | |
746 | ||
747 | while (kcp) { | |
748 | kcp->callout_fn(kcp->callout_arg, event); | |
749 | kcp = kcp->callout_next; | |
750 | } | |
751 | } | |
752 | ||
753 | void | |
754 | kdp_ml_enter_debugger(void) | |
755 | { | |
756 | __asm__ __volatile__("int3"); | |
757 | } |