]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/x86_64/kdp_machdep.c
0d716b5d1202b55806ba53b305eb6a1923324b4d
[apple/xnu.git] / osfmk / kdp / ml / x86_64 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <mach-o/loader.h>
38 #include <mach-o/nlist.h>
39 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
40 #include <kern/machine.h> /* for halt_all_cpus */
41 #include <libkern/OSAtomic.h>
42
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47 #include <kern/kalloc.h>
48
49 #define KDP_TEST_HARNESS 0
50 #if KDP_TEST_HARNESS
51 #define dprintf(x) printf x
52 #else
53 #define dprintf(x)
54 #endif
55
56 extern cpu_type_t cpuid_cputype(void);
57 extern cpu_subtype_t cpuid_cpusubtype(void);
58
59 extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
60 extern void machine_trace_thread_clear_validation_cache(void);
61 extern vm_map_t kernel_map;
62
63 void print_saved_state(void *);
64 void kdp_call(void);
65 int kdp_getc(void);
66 void kdp_getstate(x86_thread_state64_t *);
67 void kdp_setstate(x86_thread_state64_t *);
68 void kdp_print_phys(int);
69 unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
70
71 void
72 kdp_exception(
73 unsigned char *pkt,
74 int *len,
75 unsigned short *remote_port,
76 unsigned int exception,
77 unsigned int code,
78 unsigned int subcode
79 )
80 {
81 kdp_exception_t *rq = (kdp_exception_t *)pkt;
82
83 rq->hdr.request = KDP_EXCEPTION;
84 rq->hdr.is_reply = 0;
85 rq->hdr.seq = kdp.exception_seq;
86 rq->hdr.key = 0;
87 rq->hdr.len = sizeof (*rq);
88
89 rq->n_exc_info = 1;
90 rq->exc_info[0].cpu = 0;
91 rq->exc_info[0].exception = exception;
92 rq->exc_info[0].code = code;
93 rq->exc_info[0].subcode = subcode;
94
95 rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
96
97 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
98
99 kdp.exception_ack_needed = TRUE;
100
101 *remote_port = kdp.exception_port;
102 *len = rq->hdr.len;
103 }
104
105 boolean_t
106 kdp_exception_ack(
107 unsigned char *pkt,
108 int len
109 )
110 {
111 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
112
113 if (((unsigned int) len) < sizeof (*rq))
114 return(FALSE);
115
116 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
117 return(FALSE);
118
119 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
120
121 if (rq->hdr.seq == kdp.exception_seq) {
122 kdp.exception_ack_needed = FALSE;
123 kdp.exception_seq++;
124 }
125 return(TRUE);
126 }
127
128 void
129 kdp_getstate(
130 x86_thread_state64_t *state
131 )
132 {
133 x86_saved_state64_t *saved_state;
134
135 saved_state = (x86_saved_state64_t *)kdp.saved_state;
136
137 state->rax = saved_state->rax;
138 state->rbx = saved_state->rbx;
139 state->rcx = saved_state->rcx;
140 state->rdx = saved_state->rdx;
141 state->rdi = saved_state->rdi;
142 state->rsi = saved_state->rsi;
143 state->rbp = saved_state->rbp;
144
145 state->r8 = saved_state->r8;
146 state->r9 = saved_state->r9;
147 state->r10 = saved_state->r10;
148 state->r11 = saved_state->r11;
149 state->r12 = saved_state->r12;
150 state->r13 = saved_state->r13;
151 state->r14 = saved_state->r14;
152 state->r15 = saved_state->r15;
153
154 state->rsp = saved_state->isf.rsp;
155 state->rflags = saved_state->isf.rflags;
156 state->rip = saved_state->isf.rip;
157
158 state->cs = saved_state->isf.cs;
159 state->fs = saved_state->fs;
160 state->gs = saved_state->gs;
161 }
162
163
164 void
165 kdp_setstate(
166 x86_thread_state64_t *state
167 )
168 {
169 x86_saved_state64_t *saved_state;
170
171 saved_state = (x86_saved_state64_t *)kdp.saved_state;
172 saved_state->rax = state->rax;
173 saved_state->rbx = state->rbx;
174 saved_state->rcx = state->rcx;
175 saved_state->rdx = state->rdx;
176 saved_state->rdi = state->rdi;
177 saved_state->rsi = state->rsi;
178 saved_state->rbp = state->rbp;
179 saved_state->r8 = state->r8;
180 saved_state->r9 = state->r9;
181 saved_state->r10 = state->r10;
182 saved_state->r11 = state->r11;
183 saved_state->r12 = state->r12;
184 saved_state->r13 = state->r13;
185 saved_state->r14 = state->r14;
186 saved_state->r15 = state->r15;
187
188 saved_state->isf.rflags = state->rflags;
189 saved_state->isf.rsp = state->rsp;
190 saved_state->isf.rip = state->rip;
191
192 saved_state->fs = (uint32_t)state->fs;
193 saved_state->gs = (uint32_t)state->gs;
194 }
195
196
197 kdp_error_t
198 kdp_machine_read_regs(
199 __unused unsigned int cpu,
200 unsigned int flavor,
201 char *data,
202 int *size
203 )
204 {
205 static x86_float_state64_t null_fpstate;
206
207 switch (flavor) {
208
209 case x86_THREAD_STATE64:
210 dprintf(("kdp_readregs THREAD_STATE64\n"));
211 kdp_getstate((x86_thread_state64_t *)data);
212 *size = sizeof (x86_thread_state64_t);
213 return KDPERR_NO_ERROR;
214
215 case x86_FLOAT_STATE64:
216 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
217 *(x86_float_state64_t *)data = null_fpstate;
218 *size = sizeof (x86_float_state64_t);
219 return KDPERR_NO_ERROR;
220
221 default:
222 dprintf(("kdp_readregs bad flavor %d\n", flavor));
223 *size = 0;
224 return KDPERR_BADFLAVOR;
225 }
226 }
227
228 kdp_error_t
229 kdp_machine_write_regs(
230 __unused unsigned int cpu,
231 unsigned int flavor,
232 char *data,
233 __unused int *size
234 )
235 {
236 switch (flavor) {
237
238 case x86_THREAD_STATE64:
239 dprintf(("kdp_writeregs THREAD_STATE64\n"));
240 kdp_setstate((x86_thread_state64_t *)data);
241 return KDPERR_NO_ERROR;
242
243 case x86_FLOAT_STATE64:
244 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
245 return KDPERR_NO_ERROR;
246
247 default:
248 dprintf(("kdp_writeregs bad flavor %d\n", flavor));
249 return KDPERR_BADFLAVOR;
250 }
251 }
252
253
254
255 void
256 kdp_machine_hostinfo(
257 kdp_hostinfo_t *hostinfo
258 )
259 {
260 int i;
261
262 hostinfo->cpus_mask = 0;
263
264 for (i = 0; i < machine_info.max_cpus; i++) {
265 if (cpu_data_ptr[i] == NULL)
266 continue;
267
268 hostinfo->cpus_mask |= (1 << i);
269 }
270
271 hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
272 hostinfo->cpu_subtype = cpuid_cpusubtype();
273 }
274
275 void
276 kdp_panic(
277 const char *msg
278 )
279 {
280 kprintf("kdp panic: %s\n", msg);
281 __asm__ volatile("hlt");
282 }
283
284 int
285 kdp_intr_disbl(void)
286 {
287 return splhigh();
288 }
289
290 void
291 kdp_intr_enbl(int s)
292 {
293 splx(s);
294 }
295
296 int
297 kdp_getc(void)
298 {
299 return cnmaygetc();
300 }
301
302 void
303 kdp_us_spin(int usec)
304 {
305 delay(usec/100);
306 }
307
308 void print_saved_state(void *state)
309 {
310 x86_saved_state64_t *saved_state;
311
312 saved_state = state;
313
314 kprintf("pc = 0x%llx\n", saved_state->isf.rip);
315 kprintf("cr2= 0x%llx\n", saved_state->cr2);
316 kprintf("rp = TODO FIXME\n");
317 kprintf("sp = %p\n", saved_state);
318
319 }
320
321 void
322 kdp_sync_cache(void)
323 {
324 return; /* No op here. */
325 }
326
327 void
328 kdp_call(void)
329 {
330 __asm__ volatile ("int $3"); /* Let the processor do the work */
331 }
332
333
334 typedef struct _cframe_t {
335 struct _cframe_t *prev;
336 unsigned caller;
337 unsigned args[0];
338 } cframe_t;
339
340 extern pt_entry_t *DMAP2;
341 extern caddr_t DADDR2;
342
343 void
344 kdp_print_phys(int src)
345 {
346 unsigned int *iptr;
347 int i;
348
349 *(int *) DMAP2 = 0x63 | (src & 0xfffff000);
350 invlpg((uintptr_t) DADDR2);
351 iptr = (unsigned int *) DADDR2;
352 for (i = 0; i < 100; i++) {
353 kprintf("0x%x ", *iptr++);
354 if ((i % 8) == 0)
355 kprintf("\n");
356 }
357 kprintf("\n");
358 *(int *) DMAP2 = 0;
359
360 }
361
362 boolean_t
363 kdp_i386_trap(
364 unsigned int trapno,
365 x86_saved_state64_t *saved_state,
366 kern_return_t result,
367 vm_offset_t va
368 )
369 {
370 unsigned int exception, code, subcode = 0;
371 boolean_t prev_interrupts_state;
372
373 if (trapno != T_INT3 && trapno != T_DEBUG) {
374 kprintf("Debugger: Unexpected kernel trap number: "
375 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
376 trapno, saved_state->isf.rip, saved_state->cr2);
377 if (!kdp.is_conn)
378 return FALSE;
379 }
380
381 prev_interrupts_state = ml_set_interrupts_enabled(FALSE);
382 disable_preemption();
383
384 if (saved_state->isf.rflags & EFL_TF) {
385 enable_preemption_no_check();
386 }
387
388 switch (trapno) {
389
390 case T_DIVIDE_ERROR:
391 exception = EXC_ARITHMETIC;
392 code = EXC_I386_DIVERR;
393 break;
394
395 case T_OVERFLOW:
396 exception = EXC_SOFTWARE;
397 code = EXC_I386_INTOFLT;
398 break;
399
400 case T_OUT_OF_BOUNDS:
401 exception = EXC_ARITHMETIC;
402 code = EXC_I386_BOUNDFLT;
403 break;
404
405 case T_INVALID_OPCODE:
406 exception = EXC_BAD_INSTRUCTION;
407 code = EXC_I386_INVOPFLT;
408 break;
409
410 case T_SEGMENT_NOT_PRESENT:
411 exception = EXC_BAD_INSTRUCTION;
412 code = EXC_I386_SEGNPFLT;
413 subcode = (unsigned int)saved_state->isf.err;
414 break;
415
416 case T_STACK_FAULT:
417 exception = EXC_BAD_INSTRUCTION;
418 code = EXC_I386_STKFLT;
419 subcode = (unsigned int)saved_state->isf.err;
420 break;
421
422 case T_GENERAL_PROTECTION:
423 exception = EXC_BAD_INSTRUCTION;
424 code = EXC_I386_GPFLT;
425 subcode = (unsigned int)saved_state->isf.err;
426 break;
427
428 case T_PAGE_FAULT:
429 exception = EXC_BAD_ACCESS;
430 code = result;
431 subcode = (unsigned int)va;
432 break;
433
434 case T_WATCHPOINT:
435 exception = EXC_SOFTWARE;
436 code = EXC_I386_ALIGNFLT;
437 break;
438
439 case T_DEBUG:
440 case T_INT3:
441 exception = EXC_BREAKPOINT;
442 code = EXC_I386_BPTFLT;
443 break;
444
445 default:
446 exception = EXC_BAD_INSTRUCTION;
447 code = trapno;
448 break;
449 }
450
451 if (current_cpu_datap()->cpu_fatal_trap_state) {
452 current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
453 saved_state = current_cpu_datap()->cpu_fatal_trap_state;
454 }
455
456 handle_debugger_trap(exception, code, subcode, saved_state);
457
458 enable_preemption();
459 ml_set_interrupts_enabled(prev_interrupts_state);
460
461 /* If the instruction single step bit is set, disable kernel preemption
462 */
463 if (saved_state->isf.rflags & EFL_TF) {
464 disable_preemption();
465 }
466
467 return TRUE;
468 }
469
470 void
471 kdp_machine_get_breakinsn(
472 uint8_t *bytes,
473 uint32_t *size
474 )
475 {
476 bytes[0] = 0xcc;
477 *size = 1;
478 }
479
480 #define RETURN_OFFSET 4
481
482 int
483 machine_trace_thread(thread_t thread,
484 char * tracepos,
485 char * tracebound,
486 int nframes,
487 boolean_t user_p,
488 boolean_t trace_fp,
489 uint32_t * thread_trace_flags)
490 {
491 uint32_t * tracebuf = (uint32_t *)tracepos;
492 uint32_t framesize = (trace_fp ? 2 : 1) * sizeof(uint32_t);
493
494 uint32_t fence = 0;
495 uint32_t stackptr = 0;
496 uint32_t stacklimit = 0xfc000000;
497 int framecount = 0;
498 uint32_t prev_eip = 0;
499 uint32_t prevsp = 0;
500 vm_offset_t kern_virt_addr = 0;
501 vm_map_t bt_vm_map = VM_MAP_NULL;
502
503 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
504
505 if (user_p) {
506 x86_saved_state32_t *iss32;
507
508 iss32 = USER_REGS32(thread);
509 prev_eip = iss32->eip;
510 stackptr = iss32->ebp;
511
512 stacklimit = 0xffffffff;
513 bt_vm_map = thread->task->map;
514 }
515 else
516 panic("32-bit trace attempted on 64-bit kernel");
517
518 for (framecount = 0; framecount < nframes; framecount++) {
519
520 *tracebuf++ = prev_eip;
521 if (trace_fp) {
522 *tracebuf++ = stackptr;
523 }
524
525 /* Invalid frame, or hit fence */
526 if (!stackptr || (stackptr == fence)) {
527 break;
528 }
529
530 /* Unaligned frame */
531 if (stackptr & 0x0000003) {
532 break;
533 }
534
535 if (stackptr <= prevsp) {
536 break;
537 }
538
539 if (stackptr > stacklimit) {
540 break;
541 }
542
543 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET, bt_vm_map, thread_trace_flags);
544
545 if (!kern_virt_addr) {
546 if (thread_trace_flags) {
547 *thread_trace_flags |= kThreadTruncatedBT;
548 }
549 break;
550 }
551
552 prev_eip = *(uint32_t *)kern_virt_addr;
553
554 prevsp = stackptr;
555
556 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
557
558 if (kern_virt_addr) {
559 stackptr = *(uint32_t *)kern_virt_addr;
560 } else {
561 stackptr = 0;
562 if (thread_trace_flags) {
563 *thread_trace_flags |= kThreadTruncatedBT;
564 }
565 }
566 }
567
568 machine_trace_thread_clear_validation_cache();
569
570 return (uint32_t) (((char *) tracebuf) - tracepos);
571 }
572
573
574 #define RETURN_OFFSET64 8
575 /* Routine to encapsulate the 64-bit address read hack*/
576 unsigned
577 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
578 {
579 return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
580 }
581
582 int
583 machine_trace_thread64(thread_t thread,
584 char * tracepos,
585 char * tracebound,
586 int nframes,
587 boolean_t user_p,
588 boolean_t trace_fp,
589 uint32_t * thread_trace_flags)
590 {
591 uint64_t * tracebuf = (uint64_t *)tracepos;
592 unsigned framesize = (trace_fp ? 2 : 1) * sizeof(addr64_t);
593
594 uint32_t fence = 0;
595 addr64_t stackptr = 0;
596 int framecount = 0;
597 addr64_t prev_rip = 0;
598 addr64_t prevsp = 0;
599 vm_offset_t kern_virt_addr = 0;
600 vm_map_t bt_vm_map = VM_MAP_NULL;
601
602 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
603
604 if (user_p) {
605 x86_saved_state64_t *iss64;
606 iss64 = USER_REGS64(thread);
607 prev_rip = iss64->isf.rip;
608 stackptr = iss64->rbp;
609 bt_vm_map = thread->task->map;
610 }
611 else {
612 stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
613 prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
614 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
615 bt_vm_map = kernel_map;
616 }
617
618 for (framecount = 0; framecount < nframes; framecount++) {
619
620 *tracebuf++ = prev_rip;
621 if (trace_fp) {
622 *tracebuf++ = stackptr;
623 }
624
625 if (!stackptr || (stackptr == fence)) {
626 break;
627 }
628 if (stackptr & 0x0000007) {
629 break;
630 }
631 if (stackptr <= prevsp) {
632 break;
633 }
634
635 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64, bt_vm_map, thread_trace_flags);
636 if (!kern_virt_addr) {
637 if (thread_trace_flags) {
638 *thread_trace_flags |= kThreadTruncatedBT;
639 }
640 break;
641 }
642
643 prev_rip = *(uint64_t *)kern_virt_addr;
644 if (!user_p) {
645 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
646 }
647
648 prevsp = stackptr;
649
650 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
651
652 if (kern_virt_addr) {
653 stackptr = *(uint64_t *)kern_virt_addr;
654 } else {
655 stackptr = 0;
656 if (thread_trace_flags) {
657 *thread_trace_flags |= kThreadTruncatedBT;
658 }
659 }
660 }
661
662 machine_trace_thread_clear_validation_cache();
663
664 return (uint32_t) (((char *) tracebuf) - tracepos);
665 }
666
667 void
668 kdp_ml_enter_debugger(void)
669 {
670 __asm__ __volatile__("int3");
671 }