]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/x86_64/kdp_machdep.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / x86_64 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <mach-o/loader.h>
38 #include <mach-o/nlist.h>
39 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
40 #include <kern/machine.h> /* for halt_all_cpus */
41 #include <libkern/OSAtomic.h>
42
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47 #include <kern/kalloc.h>
48
49 #define KDP_TEST_HARNESS 0
50 #if KDP_TEST_HARNESS
51 #define dprintf(x) printf x
52 #else
53 #define dprintf(x)
54 #endif
55
56 extern cpu_type_t cpuid_cputype(void);
57 extern cpu_subtype_t cpuid_cpusubtype(void);
58
59 extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
60 extern void machine_trace_thread_clear_validation_cache(void);
61 extern vm_map_t kernel_map;
62
63 void print_saved_state(void *);
64 void kdp_call(void);
65 int kdp_getc(void);
66 void kdp_getstate(x86_thread_state64_t *);
67 void kdp_setstate(x86_thread_state64_t *);
68 unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
69
70 void
71 kdp_exception(
72 unsigned char *pkt,
73 int *len,
74 unsigned short *remote_port,
75 unsigned int exception,
76 unsigned int code,
77 unsigned int subcode
78 )
79 {
80 kdp_exception_t *rq = (kdp_exception_t *)pkt;
81
82 rq->hdr.request = KDP_EXCEPTION;
83 rq->hdr.is_reply = 0;
84 rq->hdr.seq = kdp.exception_seq;
85 rq->hdr.key = 0;
86 rq->hdr.len = sizeof(*rq);
87
88 rq->n_exc_info = 1;
89 rq->exc_info[0].cpu = 0;
90 rq->exc_info[0].exception = exception;
91 rq->exc_info[0].code = code;
92 rq->exc_info[0].subcode = subcode;
93
94 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
95
96 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
97
98 kdp.exception_ack_needed = TRUE;
99
100 *remote_port = kdp.exception_port;
101 *len = rq->hdr.len;
102 }
103
104 boolean_t
105 kdp_exception_ack(
106 unsigned char *pkt,
107 int len
108 )
109 {
110 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
111
112 if (((unsigned int) len) < sizeof(*rq)) {
113 return FALSE;
114 }
115
116 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
117 return FALSE;
118 }
119
120 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
121
122 if (rq->hdr.seq == kdp.exception_seq) {
123 kdp.exception_ack_needed = FALSE;
124 kdp.exception_seq++;
125 }
126 return TRUE;
127 }
128
129 void
130 kdp_getstate(
131 x86_thread_state64_t *state
132 )
133 {
134 x86_saved_state64_t *saved_state;
135
136 saved_state = (x86_saved_state64_t *)kdp.saved_state;
137
138 state->rax = saved_state->rax;
139 state->rbx = saved_state->rbx;
140 state->rcx = saved_state->rcx;
141 state->rdx = saved_state->rdx;
142 state->rdi = saved_state->rdi;
143 state->rsi = saved_state->rsi;
144 state->rbp = saved_state->rbp;
145
146 state->r8 = saved_state->r8;
147 state->r9 = saved_state->r9;
148 state->r10 = saved_state->r10;
149 state->r11 = saved_state->r11;
150 state->r12 = saved_state->r12;
151 state->r13 = saved_state->r13;
152 state->r14 = saved_state->r14;
153 state->r15 = saved_state->r15;
154
155 state->rsp = saved_state->isf.rsp;
156 state->rflags = saved_state->isf.rflags;
157 state->rip = saved_state->isf.rip;
158
159 state->cs = saved_state->isf.cs;
160 state->fs = saved_state->fs;
161 state->gs = saved_state->gs;
162 }
163
164
165 void
166 kdp_setstate(
167 x86_thread_state64_t *state
168 )
169 {
170 x86_saved_state64_t *saved_state;
171
172 saved_state = (x86_saved_state64_t *)kdp.saved_state;
173 saved_state->rax = state->rax;
174 saved_state->rbx = state->rbx;
175 saved_state->rcx = state->rcx;
176 saved_state->rdx = state->rdx;
177 saved_state->rdi = state->rdi;
178 saved_state->rsi = state->rsi;
179 saved_state->rbp = state->rbp;
180 saved_state->r8 = state->r8;
181 saved_state->r9 = state->r9;
182 saved_state->r10 = state->r10;
183 saved_state->r11 = state->r11;
184 saved_state->r12 = state->r12;
185 saved_state->r13 = state->r13;
186 saved_state->r14 = state->r14;
187 saved_state->r15 = state->r15;
188
189 saved_state->isf.rflags = state->rflags;
190 saved_state->isf.rsp = state->rsp;
191 saved_state->isf.rip = state->rip;
192
193 saved_state->fs = (uint32_t)state->fs;
194 saved_state->gs = (uint32_t)state->gs;
195 }
196
197
198 kdp_error_t
199 kdp_machine_read_regs(
200 __unused unsigned int cpu,
201 unsigned int flavor,
202 char *data,
203 int *size
204 )
205 {
206 static x86_float_state64_t null_fpstate;
207
208 switch (flavor) {
209 case x86_THREAD_STATE64:
210 dprintf(("kdp_readregs THREAD_STATE64\n"));
211 kdp_getstate((x86_thread_state64_t *)data);
212 *size = sizeof(x86_thread_state64_t);
213 return KDPERR_NO_ERROR;
214
215 case x86_FLOAT_STATE64:
216 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
217 *(x86_float_state64_t *)data = null_fpstate;
218 *size = sizeof(x86_float_state64_t);
219 return KDPERR_NO_ERROR;
220
221 default:
222 dprintf(("kdp_readregs bad flavor %d\n", flavor));
223 *size = 0;
224 return KDPERR_BADFLAVOR;
225 }
226 }
227
228 kdp_error_t
229 kdp_machine_write_regs(
230 __unused unsigned int cpu,
231 unsigned int flavor,
232 char *data,
233 __unused int *size
234 )
235 {
236 switch (flavor) {
237 case x86_THREAD_STATE64:
238 dprintf(("kdp_writeregs THREAD_STATE64\n"));
239 kdp_setstate((x86_thread_state64_t *)data);
240 return KDPERR_NO_ERROR;
241
242 case x86_FLOAT_STATE64:
243 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
244 return KDPERR_NO_ERROR;
245
246 default:
247 dprintf(("kdp_writeregs bad flavor %d\n", flavor));
248 return KDPERR_BADFLAVOR;
249 }
250 }
251
252
253
254 void
255 kdp_machine_hostinfo(
256 kdp_hostinfo_t *hostinfo
257 )
258 {
259 int i;
260
261 hostinfo->cpus_mask = 0;
262
263 for (i = 0; i < machine_info.max_cpus; i++) {
264 if (cpu_data_ptr[i] == NULL) {
265 continue;
266 }
267
268 hostinfo->cpus_mask |= (1 << i);
269 }
270
271 hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
272 hostinfo->cpu_subtype = cpuid_cpusubtype();
273 }
274
275 void
276 kdp_panic(
277 const char *msg
278 )
279 {
280 kprintf("kdp panic: %s\n", msg);
281 __asm__ volatile ("hlt");
282 }
283
284 int
285 kdp_intr_disbl(void)
286 {
287 return splhigh();
288 }
289
290 void
291 kdp_intr_enbl(int s)
292 {
293 splx(s);
294 }
295
296 int
297 kdp_getc(void)
298 {
299 return cnmaygetc();
300 }
301
302 void
303 kdp_us_spin(int usec)
304 {
305 delay(usec / 100);
306 }
307
308 void
309 print_saved_state(void *state)
310 {
311 x86_saved_state64_t *saved_state;
312
313 saved_state = state;
314
315 kprintf("pc = 0x%llx\n", saved_state->isf.rip);
316 kprintf("cr2= 0x%llx\n", saved_state->cr2);
317 kprintf("rp = TODO FIXME\n");
318 kprintf("sp = %p\n", saved_state);
319 }
320
321 void
322 kdp_sync_cache(void)
323 {
324 return; /* No op here. */
325 }
326
327 void
328 kdp_call(void)
329 {
330 __asm__ volatile ("int $3"); /* Let the processor do the work */
331 }
332
333
334 typedef struct _cframe_t {
335 struct _cframe_t *prev;
336 unsigned caller;
337 unsigned args[0];
338 } cframe_t;
339
340 boolean_t
341 kdp_i386_trap(
342 unsigned int trapno,
343 x86_saved_state64_t *saved_state,
344 kern_return_t result,
345 vm_offset_t va
346 )
347 {
348 unsigned int exception, code, subcode = 0;
349 boolean_t prev_interrupts_state;
350
351 if (trapno != T_INT3 && trapno != T_DEBUG) {
352 kprintf("Debugger: Unexpected kernel trap number: "
353 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
354 trapno, saved_state->isf.rip, saved_state->cr2);
355 if (!kdp.is_conn) {
356 return FALSE;
357 }
358 }
359
360 prev_interrupts_state = ml_set_interrupts_enabled(FALSE);
361 disable_preemption();
362
363 if (saved_state->isf.rflags & EFL_TF) {
364 enable_preemption_no_check();
365 }
366
367 switch (trapno) {
368 case T_DIVIDE_ERROR:
369 exception = EXC_ARITHMETIC;
370 code = EXC_I386_DIVERR;
371 break;
372
373 case T_OVERFLOW:
374 exception = EXC_SOFTWARE;
375 code = EXC_I386_INTOFLT;
376 break;
377
378 case T_OUT_OF_BOUNDS:
379 exception = EXC_ARITHMETIC;
380 code = EXC_I386_BOUNDFLT;
381 break;
382
383 case T_INVALID_OPCODE:
384 exception = EXC_BAD_INSTRUCTION;
385 code = EXC_I386_INVOPFLT;
386 break;
387
388 case T_SEGMENT_NOT_PRESENT:
389 exception = EXC_BAD_INSTRUCTION;
390 code = EXC_I386_SEGNPFLT;
391 subcode = (unsigned int)saved_state->isf.err;
392 break;
393
394 case T_STACK_FAULT:
395 exception = EXC_BAD_INSTRUCTION;
396 code = EXC_I386_STKFLT;
397 subcode = (unsigned int)saved_state->isf.err;
398 break;
399
400 case T_GENERAL_PROTECTION:
401 exception = EXC_BAD_INSTRUCTION;
402 code = EXC_I386_GPFLT;
403 subcode = (unsigned int)saved_state->isf.err;
404 break;
405
406 case T_PAGE_FAULT:
407 exception = EXC_BAD_ACCESS;
408 code = result;
409 subcode = (unsigned int)va;
410 break;
411
412 case T_WATCHPOINT:
413 exception = EXC_SOFTWARE;
414 code = EXC_I386_ALIGNFLT;
415 break;
416
417 case T_DEBUG:
418 case T_INT3:
419 exception = EXC_BREAKPOINT;
420 code = EXC_I386_BPTFLT;
421 break;
422
423 default:
424 exception = EXC_BAD_INSTRUCTION;
425 code = trapno;
426 break;
427 }
428
429 if (current_cpu_datap()->cpu_fatal_trap_state) {
430 current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
431 saved_state = current_cpu_datap()->cpu_fatal_trap_state;
432 }
433
434 handle_debugger_trap(exception, code, subcode, saved_state);
435
436 enable_preemption();
437 ml_set_interrupts_enabled(prev_interrupts_state);
438
439 /* If the instruction single step bit is set, disable kernel preemption
440 */
441 if (saved_state->isf.rflags & EFL_TF) {
442 disable_preemption();
443 }
444
445 return TRUE;
446 }
447
448 void
449 kdp_machine_get_breakinsn(
450 uint8_t *bytes,
451 uint32_t *size
452 )
453 {
454 bytes[0] = 0xcc;
455 *size = 1;
456 }
457
458 #define RETURN_OFFSET 4
459
460 int
461 machine_trace_thread(thread_t thread,
462 char * tracepos,
463 char * tracebound,
464 int nframes,
465 boolean_t user_p,
466 boolean_t trace_fp,
467 uint32_t * thread_trace_flags)
468 {
469 uint32_t * tracebuf = (uint32_t *)tracepos;
470 uint32_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t);
471
472 uint32_t fence = 0;
473 uint32_t stackptr = 0;
474 uint32_t stacklimit = 0xfc000000;
475 int framecount = 0;
476 uint32_t prev_eip = 0;
477 uint32_t prevsp = 0;
478 vm_offset_t kern_virt_addr = 0;
479 vm_map_t bt_vm_map = VM_MAP_NULL;
480
481 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
482
483 if (user_p) {
484 x86_saved_state32_t *iss32;
485
486 iss32 = USER_REGS32(thread);
487 prev_eip = iss32->eip;
488 stackptr = iss32->ebp;
489
490 stacklimit = 0xffffffff;
491 bt_vm_map = thread->task->map;
492 } else {
493 panic("32-bit trace attempted on 64-bit kernel");
494 }
495
496 for (framecount = 0; framecount < nframes; framecount++) {
497 *tracebuf++ = prev_eip;
498 if (trace_fp) {
499 *tracebuf++ = stackptr;
500 }
501
502 /* Invalid frame, or hit fence */
503 if (!stackptr || (stackptr == fence)) {
504 break;
505 }
506
507 /* Unaligned frame */
508 if (stackptr & 0x0000003) {
509 break;
510 }
511
512 if (stackptr <= prevsp) {
513 break;
514 }
515
516 if (stackptr > stacklimit) {
517 break;
518 }
519
520 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET, bt_vm_map, thread_trace_flags);
521
522 if (!kern_virt_addr) {
523 if (thread_trace_flags) {
524 *thread_trace_flags |= kThreadTruncatedBT;
525 }
526 break;
527 }
528
529 prev_eip = *(uint32_t *)kern_virt_addr;
530
531 prevsp = stackptr;
532
533 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
534
535 if (kern_virt_addr) {
536 stackptr = *(uint32_t *)kern_virt_addr;
537 } else {
538 stackptr = 0;
539 if (thread_trace_flags) {
540 *thread_trace_flags |= kThreadTruncatedBT;
541 }
542 }
543 }
544
545 machine_trace_thread_clear_validation_cache();
546
547 return (uint32_t) (((char *) tracebuf) - tracepos);
548 }
549
550
551 #define RETURN_OFFSET64 8
552 /* Routine to encapsulate the 64-bit address read hack*/
553 unsigned
554 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
555 {
556 return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
557 }
558
559 int
560 machine_trace_thread64(thread_t thread,
561 char * tracepos,
562 char * tracebound,
563 int nframes,
564 boolean_t user_p,
565 boolean_t trace_fp,
566 uint32_t * thread_trace_flags,
567 uint64_t *sp)
568 {
569 uint64_t * tracebuf = (uint64_t *)tracepos;
570 unsigned framesize = (trace_fp ? 2 : 1) * sizeof(addr64_t);
571
572 uint32_t fence = 0;
573 addr64_t stackptr = 0;
574 int framecount = 0;
575 addr64_t prev_rip = 0;
576 addr64_t prevsp = 0;
577 vm_offset_t kern_virt_addr = 0;
578 vm_map_t bt_vm_map = VM_MAP_NULL;
579
580 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
581
582 if (user_p) {
583 x86_saved_state64_t *iss64;
584 iss64 = USER_REGS64(thread);
585 prev_rip = iss64->isf.rip;
586 stackptr = iss64->rbp;
587 bt_vm_map = thread->task->map;
588 if (sp && user_p) {
589 *sp = iss64->isf.rsp;
590 }
591 } else {
592 stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
593 prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
594 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
595 bt_vm_map = kernel_map;
596 }
597
598 for (framecount = 0; framecount < nframes; framecount++) {
599 *tracebuf++ = prev_rip;
600 if (trace_fp) {
601 *tracebuf++ = stackptr;
602 }
603
604 if (!stackptr || (stackptr == fence)) {
605 break;
606 }
607 if (stackptr & 0x0000007) {
608 break;
609 }
610 if (stackptr <= prevsp) {
611 break;
612 }
613
614 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64, bt_vm_map, thread_trace_flags);
615 if (!kern_virt_addr) {
616 if (thread_trace_flags) {
617 *thread_trace_flags |= kThreadTruncatedBT;
618 }
619 break;
620 }
621
622 prev_rip = *(uint64_t *)kern_virt_addr;
623 if (!user_p) {
624 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
625 }
626
627 prevsp = stackptr;
628
629 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
630
631 if (kern_virt_addr) {
632 stackptr = *(uint64_t *)kern_virt_addr;
633 } else {
634 stackptr = 0;
635 if (thread_trace_flags) {
636 *thread_trace_flags |= kThreadTruncatedBT;
637 }
638 }
639 }
640
641 machine_trace_thread_clear_validation_cache();
642
643 return (uint32_t) (((char *) tracebuf) - tracepos);
644 }
645
646 void
647 kdp_ml_enter_debugger(void)
648 {
649 __asm__ __volatile__ ("int3");
650 }