]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/x86_64/kdp_machdep.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / x86_64 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <mach-o/loader.h>
38 #include <mach-o/nlist.h>
39 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
40 #include <kern/machine.h> /* for halt_all_cpus */
41 #include <libkern/OSAtomic.h>
42
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47
48 #define KDP_TEST_HARNESS 0
49 #if KDP_TEST_HARNESS
50 #define dprintf(x) printf x
51 #else
52 #define dprintf(x)
53 #endif
54
55 extern cpu_type_t cpuid_cputype(void);
56 extern cpu_subtype_t cpuid_cpusubtype(void);
57
58 extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
59 extern void machine_trace_thread_clear_validation_cache(void);
60 extern vm_map_t kernel_map;
61
62 void print_saved_state(void *);
63 void kdp_call(void);
64 int kdp_getc(void);
65 void kdp_getstate(x86_thread_state64_t *);
66 void kdp_setstate(x86_thread_state64_t *);
67 unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
68
69 void
70 kdp_exception(
71 unsigned char *pkt,
72 int *len,
73 unsigned short *remote_port,
74 unsigned int exception,
75 unsigned int code,
76 unsigned int subcode
77 )
78 {
79 kdp_exception_t *rq = (kdp_exception_t *)pkt;
80
81 rq->hdr.request = KDP_EXCEPTION;
82 rq->hdr.is_reply = 0;
83 rq->hdr.seq = kdp.exception_seq;
84 rq->hdr.key = 0;
85 rq->hdr.len = sizeof(*rq);
86
87 rq->n_exc_info = 1;
88 rq->exc_info[0].cpu = 0;
89 rq->exc_info[0].exception = exception;
90 rq->exc_info[0].code = code;
91 rq->exc_info[0].subcode = subcode;
92
93 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
94
95 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
96
97 kdp.exception_ack_needed = TRUE;
98
99 *remote_port = kdp.exception_port;
100 *len = rq->hdr.len;
101 }
102
103 boolean_t
104 kdp_exception_ack(
105 unsigned char *pkt,
106 int len
107 )
108 {
109 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
110
111 if (((unsigned int) len) < sizeof(*rq)) {
112 return FALSE;
113 }
114
115 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
116 return FALSE;
117 }
118
119 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
120
121 if (rq->hdr.seq == kdp.exception_seq) {
122 kdp.exception_ack_needed = FALSE;
123 kdp.exception_seq++;
124 }
125 return TRUE;
126 }
127
128 void
129 kdp_getstate(
130 x86_thread_state64_t *state
131 )
132 {
133 x86_saved_state64_t *saved_state;
134
135 saved_state = (x86_saved_state64_t *)kdp.saved_state;
136
137 state->rax = saved_state->rax;
138 state->rbx = saved_state->rbx;
139 state->rcx = saved_state->rcx;
140 state->rdx = saved_state->rdx;
141 state->rdi = saved_state->rdi;
142 state->rsi = saved_state->rsi;
143 state->rbp = saved_state->rbp;
144
145 state->r8 = saved_state->r8;
146 state->r9 = saved_state->r9;
147 state->r10 = saved_state->r10;
148 state->r11 = saved_state->r11;
149 state->r12 = saved_state->r12;
150 state->r13 = saved_state->r13;
151 state->r14 = saved_state->r14;
152 state->r15 = saved_state->r15;
153
154 state->rsp = saved_state->isf.rsp;
155 state->rflags = saved_state->isf.rflags;
156 state->rip = saved_state->isf.rip;
157
158 state->cs = saved_state->isf.cs;
159 state->fs = saved_state->fs;
160 state->gs = saved_state->gs;
161 }
162
163
164 void
165 kdp_setstate(
166 x86_thread_state64_t *state
167 )
168 {
169 x86_saved_state64_t *saved_state;
170
171 saved_state = (x86_saved_state64_t *)kdp.saved_state;
172 saved_state->rax = state->rax;
173 saved_state->rbx = state->rbx;
174 saved_state->rcx = state->rcx;
175 saved_state->rdx = state->rdx;
176 saved_state->rdi = state->rdi;
177 saved_state->rsi = state->rsi;
178 saved_state->rbp = state->rbp;
179 saved_state->r8 = state->r8;
180 saved_state->r9 = state->r9;
181 saved_state->r10 = state->r10;
182 saved_state->r11 = state->r11;
183 saved_state->r12 = state->r12;
184 saved_state->r13 = state->r13;
185 saved_state->r14 = state->r14;
186 saved_state->r15 = state->r15;
187
188 saved_state->isf.rflags = state->rflags;
189 saved_state->isf.rsp = state->rsp;
190 saved_state->isf.rip = state->rip;
191
192 saved_state->fs = (uint32_t)state->fs;
193 saved_state->gs = (uint32_t)state->gs;
194 }
195
196
197 kdp_error_t
198 kdp_machine_read_regs(
199 __unused unsigned int cpu,
200 unsigned int flavor,
201 char *data,
202 int *size
203 )
204 {
205 static x86_float_state64_t null_fpstate;
206
207 switch (flavor) {
208 case x86_THREAD_STATE64:
209 dprintf(("kdp_readregs THREAD_STATE64\n"));
210 kdp_getstate((x86_thread_state64_t *)data);
211 *size = sizeof(x86_thread_state64_t);
212 return KDPERR_NO_ERROR;
213
214 case x86_FLOAT_STATE64:
215 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
216 *(x86_float_state64_t *)data = null_fpstate;
217 *size = sizeof(x86_float_state64_t);
218 return KDPERR_NO_ERROR;
219
220 default:
221 dprintf(("kdp_readregs bad flavor %d\n", flavor));
222 *size = 0;
223 return KDPERR_BADFLAVOR;
224 }
225 }
226
227 kdp_error_t
228 kdp_machine_write_regs(
229 __unused unsigned int cpu,
230 unsigned int flavor,
231 char *data,
232 __unused int *size
233 )
234 {
235 switch (flavor) {
236 case x86_THREAD_STATE64:
237 dprintf(("kdp_writeregs THREAD_STATE64\n"));
238 kdp_setstate((x86_thread_state64_t *)data);
239 return KDPERR_NO_ERROR;
240
241 case x86_FLOAT_STATE64:
242 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
243 return KDPERR_NO_ERROR;
244
245 default:
246 dprintf(("kdp_writeregs bad flavor %d\n", flavor));
247 return KDPERR_BADFLAVOR;
248 }
249 }
250
251
252
253 void
254 kdp_machine_hostinfo(
255 kdp_hostinfo_t *hostinfo
256 )
257 {
258 int i;
259
260 hostinfo->cpus_mask = 0;
261
262 for (i = 0; i < machine_info.max_cpus; i++) {
263 if (cpu_data_ptr[i] == NULL) {
264 continue;
265 }
266
267 hostinfo->cpus_mask |= (1 << i);
268 }
269
270 hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
271 hostinfo->cpu_subtype = cpuid_cpusubtype();
272 }
273
274 void
275 kdp_panic(
276 const char *fmt,
277 ...
278 )
279 {
280 char kdp_fmt[256];
281 va_list args;
282
283 va_start(args, fmt);
284 (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
285 vprintf(kdp_fmt, args);
286 va_end(args);
287
288 __asm__ volatile ("hlt");
289 }
290
291 int
292 kdp_intr_disbl(void)
293 {
294 return splhigh();
295 }
296
297 void
298 kdp_intr_enbl(int s)
299 {
300 splx(s);
301 }
302
303 int
304 kdp_getc(void)
305 {
306 return cnmaygetc();
307 }
308
309 void
310 kdp_us_spin(int usec)
311 {
312 delay(usec / 100);
313 }
314
315 void
316 print_saved_state(void *state)
317 {
318 x86_saved_state64_t *saved_state;
319
320 saved_state = state;
321
322 kprintf("pc = 0x%llx\n", saved_state->isf.rip);
323 kprintf("cr2= 0x%llx\n", saved_state->cr2);
324 kprintf("rp = TODO FIXME\n");
325 kprintf("sp = %p\n", saved_state);
326 }
327
328 void
329 kdp_sync_cache(void)
330 {
331 return; /* No op here. */
332 }
333
334 void
335 kdp_call(void)
336 {
337 __asm__ volatile ("int $3"); /* Let the processor do the work */
338 }
339
340
341 typedef struct _cframe_t {
342 struct _cframe_t *prev;
343 unsigned caller;
344 unsigned args[0];
345 } cframe_t;
346
347 boolean_t
348 kdp_i386_trap(
349 unsigned int trapno,
350 x86_saved_state64_t *saved_state,
351 kern_return_t result,
352 vm_offset_t va
353 )
354 {
355 unsigned int exception, code, subcode = 0;
356 boolean_t prev_interrupts_state;
357
358 if (trapno != T_INT3 && trapno != T_DEBUG) {
359 kprintf("Debugger: Unexpected kernel trap number: "
360 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
361 trapno, saved_state->isf.rip, saved_state->cr2);
362 if (!kdp.is_conn) {
363 return FALSE;
364 }
365 }
366
367 prev_interrupts_state = ml_set_interrupts_enabled(FALSE);
368 disable_preemption();
369
370 if (saved_state->isf.rflags & EFL_TF) {
371 enable_preemption_no_check();
372 }
373
374 switch (trapno) {
375 case T_DIVIDE_ERROR:
376 exception = EXC_ARITHMETIC;
377 code = EXC_I386_DIVERR;
378 break;
379
380 case T_OVERFLOW:
381 exception = EXC_SOFTWARE;
382 code = EXC_I386_INTOFLT;
383 break;
384
385 case T_OUT_OF_BOUNDS:
386 exception = EXC_ARITHMETIC;
387 code = EXC_I386_BOUNDFLT;
388 break;
389
390 case T_INVALID_OPCODE:
391 exception = EXC_BAD_INSTRUCTION;
392 code = EXC_I386_INVOPFLT;
393 break;
394
395 case T_SEGMENT_NOT_PRESENT:
396 exception = EXC_BAD_INSTRUCTION;
397 code = EXC_I386_SEGNPFLT;
398 subcode = (unsigned int)saved_state->isf.err;
399 break;
400
401 case T_STACK_FAULT:
402 exception = EXC_BAD_INSTRUCTION;
403 code = EXC_I386_STKFLT;
404 subcode = (unsigned int)saved_state->isf.err;
405 break;
406
407 case T_GENERAL_PROTECTION:
408 exception = EXC_BAD_INSTRUCTION;
409 code = EXC_I386_GPFLT;
410 subcode = (unsigned int)saved_state->isf.err;
411 break;
412
413 case T_PAGE_FAULT:
414 exception = EXC_BAD_ACCESS;
415 code = result;
416 subcode = (unsigned int)va;
417 break;
418
419 case T_WATCHPOINT:
420 exception = EXC_SOFTWARE;
421 code = EXC_I386_ALIGNFLT;
422 break;
423
424 case T_DEBUG:
425 case T_INT3:
426 exception = EXC_BREAKPOINT;
427 code = EXC_I386_BPTFLT;
428 break;
429
430 default:
431 exception = EXC_BAD_INSTRUCTION;
432 code = trapno;
433 break;
434 }
435
436 if (current_cpu_datap()->cpu_fatal_trap_state) {
437 current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
438 saved_state = current_cpu_datap()->cpu_fatal_trap_state;
439 }
440
441 handle_debugger_trap(exception, code, subcode, saved_state);
442
443 enable_preemption();
444 ml_set_interrupts_enabled(prev_interrupts_state);
445
446 /* If the instruction single step bit is set, disable kernel preemption
447 */
448 if (saved_state->isf.rflags & EFL_TF) {
449 disable_preemption();
450 }
451
452 return TRUE;
453 }
454
455 void
456 kdp_machine_get_breakinsn(
457 uint8_t *bytes,
458 uint32_t *size
459 )
460 {
461 bytes[0] = 0xcc;
462 *size = 1;
463 }
464
465 #define RETURN_OFFSET 4
466
467 int
468 machine_trace_thread(thread_t thread,
469 char * tracepos,
470 char * tracebound,
471 int nframes,
472 boolean_t user_p,
473 uint32_t * thread_trace_flags)
474 {
475 uint32_t * tracebuf = (uint32_t *)tracepos;
476 uint32_t framesize = sizeof(uint32_t);
477
478 uint32_t fence = 0;
479 uint32_t stackptr = 0;
480 uint32_t stacklimit = 0xfc000000;
481 int framecount = 0;
482 uint32_t prev_eip = 0;
483 uint32_t prevsp = 0;
484 vm_offset_t kern_virt_addr = 0;
485 vm_map_t bt_vm_map = VM_MAP_NULL;
486
487 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
488
489 if (user_p) {
490 x86_saved_state32_t *iss32;
491
492 iss32 = USER_REGS32(thread);
493 prev_eip = iss32->eip;
494 stackptr = iss32->ebp;
495
496 stacklimit = 0xffffffff;
497 bt_vm_map = thread->task->map;
498 } else {
499 panic("32-bit trace attempted on 64-bit kernel");
500 }
501
502 for (framecount = 0; framecount < nframes; framecount++) {
503 *tracebuf++ = prev_eip;
504
505 /* Invalid frame, or hit fence */
506 if (!stackptr || (stackptr == fence)) {
507 break;
508 }
509
510 /* Unaligned frame */
511 if (stackptr & 0x0000003) {
512 break;
513 }
514
515 if (stackptr <= prevsp) {
516 break;
517 }
518
519 if (stackptr > stacklimit) {
520 break;
521 }
522
523 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET, bt_vm_map, thread_trace_flags);
524
525 if (!kern_virt_addr) {
526 if (thread_trace_flags) {
527 *thread_trace_flags |= kThreadTruncatedBT;
528 }
529 break;
530 }
531
532 prev_eip = *(uint32_t *)kern_virt_addr;
533
534 prevsp = stackptr;
535
536 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
537
538 if (kern_virt_addr) {
539 stackptr = *(uint32_t *)kern_virt_addr;
540 } else {
541 stackptr = 0;
542 if (thread_trace_flags) {
543 *thread_trace_flags |= kThreadTruncatedBT;
544 }
545 }
546 }
547
548 machine_trace_thread_clear_validation_cache();
549
550 return (uint32_t) (((char *) tracebuf) - tracepos);
551 }
552
553
554 #define RETURN_OFFSET64 8
555 /* Routine to encapsulate the 64-bit address read hack*/
556 unsigned
557 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
558 {
559 return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
560 }
561
562 int
563 machine_trace_thread64(thread_t thread,
564 char * tracepos,
565 char * tracebound,
566 int nframes,
567 boolean_t user_p,
568 uint32_t * thread_trace_flags,
569 uint64_t *sp,
570 vm_offset_t fp)
571 {
572 uint64_t * tracebuf = (uint64_t *)tracepos;
573 unsigned framesize = sizeof(addr64_t);
574
575 uint32_t fence = 0;
576 addr64_t stackptr = 0;
577 int framecount = 0;
578 addr64_t prev_rip = 0;
579 addr64_t prevsp = 0;
580 vm_offset_t kern_virt_addr = 0;
581 vm_map_t bt_vm_map = VM_MAP_NULL;
582
583 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
584
585 if (user_p) {
586 x86_saved_state64_t *iss64;
587 iss64 = USER_REGS64(thread);
588 prev_rip = iss64->isf.rip;
589 if (fp == 0) {
590 stackptr = iss64->rbp;
591 }
592 bt_vm_map = thread->task->map;
593 if (sp && user_p) {
594 *sp = iss64->isf.rsp;
595 }
596 } else {
597 if (fp == 0) {
598 stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
599 }
600 prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
601 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
602 bt_vm_map = kernel_map;
603 }
604
605 for (framecount = 0; framecount < nframes; framecount++) {
606 *tracebuf++ = prev_rip;
607
608 if (!stackptr || (stackptr == fence)) {
609 break;
610 }
611 if (stackptr & 0x0000007) {
612 break;
613 }
614 if (stackptr <= prevsp) {
615 break;
616 }
617
618 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64, bt_vm_map, thread_trace_flags);
619 if (!kern_virt_addr) {
620 if (thread_trace_flags) {
621 *thread_trace_flags |= kThreadTruncatedBT;
622 }
623 break;
624 }
625
626 prev_rip = *(uint64_t *)kern_virt_addr;
627 if (!user_p) {
628 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
629 }
630
631 prevsp = stackptr;
632
633 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
634
635 if (kern_virt_addr) {
636 stackptr = *(uint64_t *)kern_virt_addr;
637 } else {
638 stackptr = 0;
639 if (thread_trace_flags) {
640 *thread_trace_flags |= kThreadTruncatedBT;
641 }
642 }
643 }
644
645 machine_trace_thread_clear_validation_cache();
646
647 return (uint32_t) (((char *) tracebuf) - tracepos);
648 }
649
650 void
651 kdp_ml_enter_debugger(void)
652 {
653 __asm__ __volatile__ ("int3");
654 }