]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/x86_64/kdp_machdep.c
xnu-1699.26.8.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / x86_64 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <kdp/kdp_callout.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/nlist.h>
40 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41 #include <kern/machine.h> /* for halt_all_cpus */
42 #include <libkern/OSAtomic.h>
43
44 #include <kern/thread.h>
45 #include <i386/thread.h>
46 #include <vm/vm_map.h>
47 #include <i386/pmap.h>
48 #include <kern/kalloc.h>
49
50 #define KDP_TEST_HARNESS 0
51 #if KDP_TEST_HARNESS
52 #define dprintf(x) printf x
53 #else
54 #define dprintf(x)
55 #endif
56
57 extern cpu_type_t cpuid_cputype(void);
58 extern cpu_subtype_t cpuid_cpusubtype(void);
59
60 void print_saved_state(void *);
61 void kdp_call(void);
62 int kdp_getc(void);
63 boolean_t kdp_call_kdb(void);
64 void kdp_getstate(x86_thread_state64_t *);
65 void kdp_setstate(x86_thread_state64_t *);
66 void kdp_print_phys(int);
67
68 int
69 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
70
71 int
72 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
73
74 unsigned
75 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
76
77 static void kdp_callouts(kdp_event_t event);
78
79 void
80 kdp_exception(
81 unsigned char *pkt,
82 int *len,
83 unsigned short *remote_port,
84 unsigned int exception,
85 unsigned int code,
86 unsigned int subcode
87 )
88 {
89 kdp_exception_t *rq = (kdp_exception_t *)pkt;
90
91 rq->hdr.request = KDP_EXCEPTION;
92 rq->hdr.is_reply = 0;
93 rq->hdr.seq = kdp.exception_seq;
94 rq->hdr.key = 0;
95 rq->hdr.len = sizeof (*rq);
96
97 rq->n_exc_info = 1;
98 rq->exc_info[0].cpu = 0;
99 rq->exc_info[0].exception = exception;
100 rq->exc_info[0].code = code;
101 rq->exc_info[0].subcode = subcode;
102
103 rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
104
105 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
106
107 kdp.exception_ack_needed = TRUE;
108
109 *remote_port = kdp.exception_port;
110 *len = rq->hdr.len;
111 }
112
113 boolean_t
114 kdp_exception_ack(
115 unsigned char *pkt,
116 int len
117 )
118 {
119 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
120
121 if (((unsigned int) len) < sizeof (*rq))
122 return(FALSE);
123
124 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
125 return(FALSE);
126
127 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
128
129 if (rq->hdr.seq == kdp.exception_seq) {
130 kdp.exception_ack_needed = FALSE;
131 kdp.exception_seq++;
132 }
133 return(TRUE);
134 }
135
136 void
137 kdp_getstate(
138 x86_thread_state64_t *state
139 )
140 {
141 x86_saved_state64_t *saved_state;
142
143 saved_state = (x86_saved_state64_t *)kdp.saved_state;
144
145 state->rax = saved_state->rax;
146 state->rbx = saved_state->rbx;
147 state->rcx = saved_state->rcx;
148 state->rdx = saved_state->rdx;
149 state->rdi = saved_state->rdi;
150 state->rsi = saved_state->rsi;
151 state->rbp = saved_state->rbp;
152
153 state->r8 = saved_state->r8;
154 state->r9 = saved_state->r9;
155 state->r10 = saved_state->r10;
156 state->r11 = saved_state->r11;
157 state->r12 = saved_state->r12;
158 state->r13 = saved_state->r13;
159 state->r14 = saved_state->r14;
160 state->r15 = saved_state->r15;
161
162 state->rsp = saved_state->isf.rsp;
163 state->rflags = saved_state->isf.rflags;
164 state->rip = saved_state->isf.rip;
165
166 state->cs = saved_state->isf.cs;
167 state->fs = saved_state->fs;
168 state->gs = saved_state->gs;
169 }
170
171
172 void
173 kdp_setstate(
174 x86_thread_state64_t *state
175 )
176 {
177 x86_saved_state64_t *saved_state;
178
179 saved_state = (x86_saved_state64_t *)kdp.saved_state;
180 saved_state->rax = state->rax;
181 saved_state->rbx = state->rbx;
182 saved_state->rcx = state->rcx;
183 saved_state->rdx = state->rdx;
184 saved_state->rdi = state->rdi;
185 saved_state->rsi = state->rsi;
186 saved_state->rbp = state->rbp;
187 saved_state->r8 = state->r8;
188 saved_state->r9 = state->r9;
189 saved_state->r10 = state->r10;
190 saved_state->r11 = state->r11;
191 saved_state->r12 = state->r12;
192 saved_state->r13 = state->r13;
193 saved_state->r14 = state->r14;
194 saved_state->r15 = state->r15;
195
196 saved_state->isf.rflags = state->rflags;
197 saved_state->isf.rsp = state->rsp;
198 saved_state->isf.rip = state->rip;
199
200 saved_state->fs = (uint32_t)state->fs;
201 saved_state->gs = (uint32_t)state->gs;
202 }
203
204
205 kdp_error_t
206 kdp_machine_read_regs(
207 __unused unsigned int cpu,
208 unsigned int flavor,
209 char *data,
210 int *size
211 )
212 {
213 static x86_float_state64_t null_fpstate;
214
215 switch (flavor) {
216
217 case x86_THREAD_STATE64:
218 dprintf(("kdp_readregs THREAD_STATE64\n"));
219 kdp_getstate((x86_thread_state64_t *)data);
220 *size = sizeof (x86_thread_state64_t);
221 return KDPERR_NO_ERROR;
222
223 case x86_FLOAT_STATE64:
224 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
225 *(x86_float_state64_t *)data = null_fpstate;
226 *size = sizeof (x86_float_state64_t);
227 return KDPERR_NO_ERROR;
228
229 default:
230 dprintf(("kdp_readregs bad flavor %d\n", flavor));
231 *size = 0;
232 return KDPERR_BADFLAVOR;
233 }
234 }
235
236 kdp_error_t
237 kdp_machine_write_regs(
238 __unused unsigned int cpu,
239 unsigned int flavor,
240 char *data,
241 __unused int *size
242 )
243 {
244 switch (flavor) {
245
246 case x86_THREAD_STATE64:
247 dprintf(("kdp_writeregs THREAD_STATE64\n"));
248 kdp_setstate((x86_thread_state64_t *)data);
249 return KDPERR_NO_ERROR;
250
251 case x86_FLOAT_STATE64:
252 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
253 return KDPERR_NO_ERROR;
254
255 default:
256 dprintf(("kdp_writeregs bad flavor %d\n", flavor));
257 return KDPERR_BADFLAVOR;
258 }
259 }
260
261
262
263 void
264 kdp_machine_hostinfo(
265 kdp_hostinfo_t *hostinfo
266 )
267 {
268 int i;
269
270 hostinfo->cpus_mask = 0;
271
272 for (i = 0; i < machine_info.max_cpus; i++) {
273 if (cpu_data_ptr[i] == NULL)
274 continue;
275
276 hostinfo->cpus_mask |= (1 << i);
277 }
278
279 hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
280 hostinfo->cpu_subtype = cpuid_cpusubtype();
281 }
282
283 void
284 kdp_panic(
285 const char *msg
286 )
287 {
288 kprintf("kdp panic: %s\n", msg);
289 __asm__ volatile("hlt");
290 }
291
292
293 void
294 kdp_machine_reboot(void)
295 {
296 printf("Attempting system restart...");
297 /* Call the platform specific restart*/
298 if (PE_halt_restart)
299 (*PE_halt_restart)(kPERestartCPU);
300 /* If we do reach this, give up */
301 halt_all_cpus(TRUE);
302 }
303
304 int
305 kdp_intr_disbl(void)
306 {
307 return splhigh();
308 }
309
310 void
311 kdp_intr_enbl(int s)
312 {
313 splx(s);
314 }
315
316 int
317 kdp_getc(void)
318 {
319 return cnmaygetc();
320 }
321
322 void
323 kdp_us_spin(int usec)
324 {
325 delay(usec/100);
326 }
327
328 void print_saved_state(void *state)
329 {
330 x86_saved_state64_t *saved_state;
331
332 saved_state = state;
333
334 kprintf("pc = 0x%llx\n", saved_state->isf.rip);
335 kprintf("cr2= 0x%llx\n", saved_state->cr2);
336 kprintf("rp = TODO FIXME\n");
337 kprintf("sp = %p\n", saved_state);
338
339 }
340
341 void
342 kdp_sync_cache(void)
343 {
344 return; /* No op here. */
345 }
346
347 void
348 kdp_call(void)
349 {
350 __asm__ volatile ("int $3"); /* Let the processor do the work */
351 }
352
353
354 typedef struct _cframe_t {
355 struct _cframe_t *prev;
356 unsigned caller;
357 unsigned args[0];
358 } cframe_t;
359
360 extern pt_entry_t *DMAP2;
361 extern caddr_t DADDR2;
362
363 void
364 kdp_print_phys(int src)
365 {
366 unsigned int *iptr;
367 int i;
368
369 *(int *) DMAP2 = 0x63 | (src & 0xfffff000);
370 invlpg((uintptr_t) DADDR2);
371 iptr = (unsigned int *) DADDR2;
372 for (i = 0; i < 100; i++) {
373 kprintf("0x%x ", *iptr++);
374 if ((i % 8) == 0)
375 kprintf("\n");
376 }
377 kprintf("\n");
378 *(int *) DMAP2 = 0;
379
380 }
381
382 boolean_t
383 kdp_i386_trap(
384 unsigned int trapno,
385 x86_saved_state64_t *saved_state,
386 kern_return_t result,
387 vm_offset_t va
388 )
389 {
390 unsigned int exception, subcode = 0, code;
391
392 if (trapno != T_INT3 && trapno != T_DEBUG) {
393 kprintf("Debugger: Unexpected kernel trap number: "
394 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
395 trapno, saved_state->isf.rip, saved_state->cr2);
396 if (!kdp.is_conn)
397 return FALSE;
398 }
399
400 mp_kdp_enter();
401 kdp_callouts(KDP_EVENT_ENTER);
402
403 if (saved_state->isf.rflags & EFL_TF) {
404 enable_preemption_no_check();
405 }
406
407 switch (trapno) {
408
409 case T_DIVIDE_ERROR:
410 exception = EXC_ARITHMETIC;
411 code = EXC_I386_DIVERR;
412 break;
413
414 case T_OVERFLOW:
415 exception = EXC_SOFTWARE;
416 code = EXC_I386_INTOFLT;
417 break;
418
419 case T_OUT_OF_BOUNDS:
420 exception = EXC_ARITHMETIC;
421 code = EXC_I386_BOUNDFLT;
422 break;
423
424 case T_INVALID_OPCODE:
425 exception = EXC_BAD_INSTRUCTION;
426 code = EXC_I386_INVOPFLT;
427 break;
428
429 case T_SEGMENT_NOT_PRESENT:
430 exception = EXC_BAD_INSTRUCTION;
431 code = EXC_I386_SEGNPFLT;
432 subcode = (unsigned int)saved_state->isf.err;
433 break;
434
435 case T_STACK_FAULT:
436 exception = EXC_BAD_INSTRUCTION;
437 code = EXC_I386_STKFLT;
438 subcode = (unsigned int)saved_state->isf.err;
439 break;
440
441 case T_GENERAL_PROTECTION:
442 exception = EXC_BAD_INSTRUCTION;
443 code = EXC_I386_GPFLT;
444 subcode = (unsigned int)saved_state->isf.err;
445 break;
446
447 case T_PAGE_FAULT:
448 exception = EXC_BAD_ACCESS;
449 code = result;
450 subcode = (unsigned int)va;
451 break;
452
453 case T_WATCHPOINT:
454 exception = EXC_SOFTWARE;
455 code = EXC_I386_ALIGNFLT;
456 break;
457
458 case T_DEBUG:
459 case T_INT3:
460 exception = EXC_BREAKPOINT;
461 code = EXC_I386_BPTFLT;
462 break;
463
464 default:
465 exception = EXC_BAD_INSTRUCTION;
466 code = trapno;
467 break;
468 }
469
470 if (current_cpu_datap()->cpu_fatal_trap_state) {
471 current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
472 saved_state = current_cpu_datap()->cpu_fatal_trap_state;
473 }
474
475 kdp_raise_exception(exception, code, subcode, saved_state);
476 /* If the instruction single step bit is set, disable kernel preemption
477 */
478 if (saved_state->isf.rflags & EFL_TF) {
479 disable_preemption();
480 }
481
482 kdp_callouts(KDP_EVENT_EXIT);
483 mp_kdp_exit();
484
485 return TRUE;
486 }
487
488 boolean_t
489 kdp_call_kdb(
490 void)
491 {
492 return(FALSE);
493 }
494
495 void
496 kdp_machine_get_breakinsn(
497 uint8_t *bytes,
498 uint32_t *size
499 )
500 {
501 bytes[0] = 0xcc;
502 *size = 1;
503 }
504
505 extern pmap_t kdp_pmap;
506
507 #define RETURN_OFFSET 4
508
509 int
510 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
511 {
512 uint32_t *tracebuf = (uint32_t *)tracepos;
513 uint32_t fence = 0;
514 uint32_t stackptr = 0;
515 uint32_t stacklimit = 0xfc000000;
516 int framecount = 0;
517 uint32_t init_eip = 0;
518 uint32_t prevsp = 0;
519 uint32_t framesize = 2 * sizeof(vm_offset_t);
520
521 if (user_p) {
522 x86_saved_state32_t *iss32;
523
524 iss32 = USER_REGS32(thread);
525 init_eip = iss32->eip;
526 stackptr = iss32->ebp;
527
528 stacklimit = 0xffffffff;
529 kdp_pmap = thread->task->map->pmap;
530 }
531 else
532 panic("32-bit trace attempted on 64-bit kernel");
533
534 *tracebuf++ = init_eip;
535
536 for (framecount = 0; framecount < nframes; framecount++) {
537
538 if ((tracebound - ((char *)tracebuf)) < (4 * framesize)) {
539 tracebuf--;
540 break;
541 }
542
543 *tracebuf++ = stackptr;
544 /* Invalid frame, or hit fence */
545 if (!stackptr || (stackptr == fence)) {
546 break;
547 }
548
549 /* Unaligned frame */
550 if (stackptr & 0x0000003) {
551 break;
552 }
553
554 if (stackptr <= prevsp) {
555 break;
556 }
557
558 if (stackptr > stacklimit) {
559 break;
560 }
561
562 if (kdp_machine_vm_read((mach_vm_address_t)(stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(*tracebuf)) != sizeof(*tracebuf)) {
563 break;
564 }
565 tracebuf++;
566
567 prevsp = stackptr;
568 if (kdp_machine_vm_read((mach_vm_address_t)stackptr, (caddr_t) &stackptr, sizeof(stackptr)) != sizeof(stackptr)) {
569 *tracebuf++ = 0;
570 break;
571 }
572 }
573
574 kdp_pmap = 0;
575
576 return (uint32_t) (((char *) tracebuf) - tracepos);
577 }
578
579
580 #define RETURN_OFFSET64 8
581 /* Routine to encapsulate the 64-bit address read hack*/
582 unsigned
583 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
584 {
585 return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
586 }
587
588 int
589 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
590 {
591 uint64_t *tracebuf = (uint64_t *)tracepos;
592 uint32_t fence = 0;
593 addr64_t stackptr = 0;
594 int framecount = 0;
595 addr64_t init_rip = 0;
596 addr64_t prevsp = 0;
597 unsigned framesize = 2 * sizeof(addr64_t);
598
599 if (user_p) {
600 x86_saved_state64_t *iss64;
601 iss64 = USER_REGS64(thread);
602 init_rip = iss64->isf.rip;
603 stackptr = iss64->rbp;
604 kdp_pmap = thread->task->map->pmap;
605 }
606 else {
607 stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
608 init_rip = STACK_IKS(thread->kernel_stack)->k_rip;
609 kdp_pmap = 0;
610 }
611
612 *tracebuf++ = init_rip;
613
614 for (framecount = 0; framecount < nframes; framecount++) {
615
616 if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
617 tracebuf--;
618 break;
619 }
620
621 *tracebuf++ = stackptr;
622
623 if (!stackptr || (stackptr == fence)){
624 break;
625 }
626
627 if (stackptr & 0x0000003) {
628 break;
629 }
630
631 if (stackptr <= prevsp) {
632 break;
633 }
634
635 if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
636 break;
637 }
638 tracebuf++;
639
640 prevsp = stackptr;
641 if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
642 *tracebuf++ = 0;
643 break;
644 }
645 }
646
647 kdp_pmap = NULL;
648
649 return (uint32_t) (((char *) tracebuf) - tracepos);
650 }
651
652 static struct kdp_callout {
653 struct kdp_callout *callout_next;
654 kdp_callout_fn_t callout_fn;
655 void *callout_arg;
656 } *kdp_callout_list = NULL;
657
658
659 /*
660 * Called from kernel context to register a kdp event callout.
661 */
662 void
663 kdp_register_callout(
664 kdp_callout_fn_t fn,
665 void *arg)
666 {
667 struct kdp_callout *kcp;
668 struct kdp_callout *list_head;
669
670 kcp = kalloc(sizeof(*kcp));
671 if (kcp == NULL)
672 panic("kdp_register_callout() kalloc failed");
673
674 kcp->callout_fn = fn;
675 kcp->callout_arg = arg;
676
677 /* Lock-less list insertion using compare and exchange. */
678 do {
679 list_head = kdp_callout_list;
680 kcp->callout_next = list_head;
681 } while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list));
682 }
683
684 /*
685 * Called at exception/panic time when extering or exiting kdp.
686 * We are single-threaded at this time and so we don't use locks.
687 */
688 static void
689 kdp_callouts(kdp_event_t event)
690 {
691 struct kdp_callout *kcp = kdp_callout_list;
692
693 while (kcp) {
694 kcp->callout_fn(kcp->callout_arg, event);
695 kcp = kcp->callout_next;
696 }
697 }
698
699 void
700 kdp_ml_enter_debugger(void)
701 {
702 __asm__ __volatile__("int3");
703 }