]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/i386/kdp_machdep.c
xnu-1228.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / i386 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <kdp/kdp_callout.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/nlist.h>
40 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41 #include <kern/machine.h> /* for halt_all_cpus */
42
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47 #include <kern/kalloc.h>
48
49 #define KDP_TEST_HARNESS 0
50 #if KDP_TEST_HARNESS
51 #define dprintf(x) printf x
52 #else
53 #define dprintf(x)
54 #endif
55
56 extern cpu_type_t cpuid_cputype(void);
57 extern cpu_subtype_t cpuid_cpusubtype(void);
58
59 void print_saved_state(void *);
60 void kdp_call(void);
61 int kdp_getc(void);
62 boolean_t kdp_call_kdb(void);
63 void kdp_getstate(i386_thread_state_t *);
64 void kdp_setstate(i386_thread_state_t *);
65 void kdp_print_phys(int);
66
67 int
68 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
69
70 int
71 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
72
73 unsigned
74 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
75
76 extern unsigned kdp_vm_read(caddr_t src, caddr_t dst, unsigned len);
77
78 static void kdp_callouts(kdp_event_t event);
79
80 void
81 kdp_exception(
82 unsigned char *pkt,
83 int *len,
84 unsigned short *remote_port,
85 unsigned int exception,
86 unsigned int code,
87 unsigned int subcode
88 )
89 {
90 kdp_exception_t *rq = (kdp_exception_t *)pkt;
91
92 rq->hdr.request = KDP_EXCEPTION;
93 rq->hdr.is_reply = 0;
94 rq->hdr.seq = kdp.exception_seq;
95 rq->hdr.key = 0;
96 rq->hdr.len = sizeof (*rq);
97
98 rq->n_exc_info = 1;
99 rq->exc_info[0].cpu = 0;
100 rq->exc_info[0].exception = exception;
101 rq->exc_info[0].code = code;
102 rq->exc_info[0].subcode = subcode;
103
104 rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
105
106 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
107
108 kdp.exception_ack_needed = TRUE;
109
110 *remote_port = kdp.exception_port;
111 *len = rq->hdr.len;
112 }
113
114 boolean_t
115 kdp_exception_ack(
116 unsigned char *pkt,
117 int len
118 )
119 {
120 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
121
122 if (((unsigned int) len) < sizeof (*rq))
123 return(FALSE);
124
125 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
126 return(FALSE);
127
128 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
129
130 if (rq->hdr.seq == kdp.exception_seq) {
131 kdp.exception_ack_needed = FALSE;
132 kdp.exception_seq++;
133 }
134 return(TRUE);
135 }
136
137 void
138 kdp_getstate(
139 x86_thread_state32_t *state
140 )
141 {
142 static x86_thread_state32_t null_state;
143 x86_saved_state32_t *saved_state;
144
145 saved_state = (x86_saved_state32_t *)kdp.saved_state;
146
147 *state = null_state;
148 state->eax = saved_state->eax;
149 state->ebx = saved_state->ebx;
150 state->ecx = saved_state->ecx;
151 state->edx = saved_state->edx;
152 state->edi = saved_state->edi;
153 state->esi = saved_state->esi;
154 state->ebp = saved_state->ebp;
155
156 if ((saved_state->cs & 0x3) == 0){ /* Kernel State */
157 state->esp = (unsigned int) &saved_state->uesp;
158 state->ss = KERNEL_DS;
159 } else {
160 state->esp = saved_state->uesp;
161 state->ss = saved_state->ss;
162 }
163
164 state->eflags = saved_state->efl;
165 state->eip = saved_state->eip;
166 state->cs = saved_state->cs;
167 state->ds = saved_state->ds;
168 state->es = saved_state->es;
169 state->fs = saved_state->fs;
170 state->gs = saved_state->gs;
171 }
172
173
174 void
175 kdp_setstate(
176 x86_thread_state32_t *state
177 )
178 {
179 x86_saved_state32_t *saved_state;
180
181 saved_state = (x86_saved_state32_t *)kdp.saved_state;
182
183 saved_state->eax = state->eax;
184 saved_state->ebx = state->ebx;
185 saved_state->ecx = state->ecx;
186 saved_state->edx = state->edx;
187 saved_state->edi = state->edi;
188 saved_state->esi = state->esi;
189 saved_state->ebp = state->ebp;
190 saved_state->efl = state->eflags;
191 #if 0
192 saved_state->frame.eflags &= ~( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR );
193 saved_state->frame.eflags |= ( EFL_IF | EFL_SET );
194 #endif
195 saved_state->eip = state->eip;
196 saved_state->fs = state->fs;
197 saved_state->gs = state->gs;
198 }
199
200
201 kdp_error_t
202 kdp_machine_read_regs(
203 __unused unsigned int cpu,
204 __unused unsigned int flavor,
205 char *data,
206 __unused int *size
207 )
208 {
209 static x86_float_state32_t null_fpstate;
210
211 switch (flavor) {
212
213 case x86_THREAD_STATE32:
214 dprintf(("kdp_readregs THREAD_STATE\n"));
215 kdp_getstate((x86_thread_state32_t *)data);
216 *size = sizeof (x86_thread_state32_t);
217 return KDPERR_NO_ERROR;
218
219 case x86_FLOAT_STATE32:
220 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
221 *(x86_float_state32_t *)data = null_fpstate;
222 *size = sizeof (x86_float_state32_t);
223 return KDPERR_NO_ERROR;
224
225 default:
226 dprintf(("kdp_readregs bad flavor %d\n", flavor));
227 *size = 0;
228 return KDPERR_BADFLAVOR;
229 }
230 }
231
232 kdp_error_t
233 kdp_machine_write_regs(
234 __unused unsigned int cpu,
235 unsigned int flavor,
236 char *data,
237 __unused int *size
238 )
239 {
240 switch (flavor) {
241
242 case x86_THREAD_STATE32:
243 dprintf(("kdp_writeregs THREAD_STATE\n"));
244 kdp_setstate((x86_thread_state32_t *)data);
245 return KDPERR_NO_ERROR;
246
247 case x86_FLOAT_STATE32:
248 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
249 return KDPERR_NO_ERROR;
250
251 default:
252 dprintf(("kdp_writeregs bad flavor %d\n"));
253 return KDPERR_BADFLAVOR;
254 }
255 }
256
257
258
259 void
260 kdp_machine_hostinfo(
261 kdp_hostinfo_t *hostinfo
262 )
263 {
264 int i;
265
266 hostinfo->cpus_mask = 0;
267
268 for (i = 0; i < machine_info.max_cpus; i++) {
269 if (cpu_data_ptr[i] == NULL)
270 continue;
271
272 hostinfo->cpus_mask |= (1 << i);
273 }
274
275 hostinfo->cpu_type = cpuid_cputype();
276 hostinfo->cpu_subtype = cpuid_cpusubtype();
277 }
278
279 void
280 kdp_panic(
281 #if CONFIG_NO_KPRINTF_STRINGS
282 __unused const char *msg
283 #else
284 const char *msg
285 #endif
286 )
287 {
288 kprintf("kdp panic: %s\n", msg);
289 __asm__ volatile("hlt");
290 }
291
292
293 void
294 kdp_reboot(void)
295 {
296 printf("Attempting system restart...");
297 /* Call the platform specific restart*/
298 if (PE_halt_restart)
299 (*PE_halt_restart)(kPERestartCPU);
300 /* If we do reach this, give up */
301 halt_all_cpus(TRUE);
302 }
303
304 int
305 kdp_intr_disbl(void)
306 {
307 return splhigh();
308 }
309
310 void
311 kdp_intr_enbl(int s)
312 {
313 splx(s);
314 }
315
316 int
317 kdp_getc(void)
318 {
319 return cnmaygetc();
320 }
321
322 void
323 kdp_us_spin(int usec)
324 {
325 delay(usec/100);
326 }
327
328 void print_saved_state(void *state)
329 {
330 x86_saved_state32_t *saved_state;
331
332 saved_state = state;
333
334 kprintf("pc = 0x%x\n", saved_state->eip);
335 kprintf("cr2= 0x%x\n", saved_state->cr2);
336 kprintf("rp = TODO FIXME\n");
337 kprintf("sp = %p\n", saved_state);
338
339 }
340
341 void
342 kdp_sync_cache(void)
343 {
344 return; /* No op here. */
345 }
346
347 void
348 kdp_call(void)
349 {
350 __asm__ volatile ("int $3"); /* Let the processor do the work */
351 }
352
353
354 typedef struct _cframe_t {
355 struct _cframe_t *prev;
356 unsigned caller;
357 unsigned args[0];
358 } cframe_t;
359
360 #include <i386/pmap.h>
361 extern pt_entry_t *DMAP2;
362 extern caddr_t DADDR2;
363
364 void
365 kdp_print_phys(int src)
366 {
367 unsigned int *iptr;
368 int i;
369
370 *(int *) DMAP2 = 0x63 | (src & 0xfffff000);
371 invlpg((u_int) DADDR2);
372 iptr = (unsigned int *) DADDR2;
373 for (i = 0; i < 100; i++) {
374 kprintf("0x%x ", *iptr++);
375 if ((i % 8) == 0)
376 kprintf("\n");
377 }
378 kprintf("\n");
379 *(int *) DMAP2 = 0;
380
381 }
382
383 boolean_t
384 kdp_i386_trap(
385 unsigned int trapno,
386 x86_saved_state32_t *saved_state,
387 kern_return_t result,
388 vm_offset_t va
389 )
390 {
391 unsigned int exception, subcode = 0, code;
392
393 if (trapno != T_INT3 && trapno != T_DEBUG) {
394 kprintf("Debugger: Unexpected kernel trap number: "
395 "0x%x, EIP: 0x%x, CR2: 0x%x\n",
396 trapno, saved_state->eip, saved_state->cr2);
397 if (!kdp.is_conn)
398 return FALSE;
399 }
400
401 mp_kdp_enter();
402 kdp_callouts(KDP_EVENT_ENTER);
403
404 if (saved_state->efl & EFL_TF) {
405 enable_preemption_no_check();
406 }
407
408 switch (trapno) {
409
410 case T_DIVIDE_ERROR:
411 exception = EXC_ARITHMETIC;
412 code = EXC_I386_DIVERR;
413 break;
414
415 case T_OVERFLOW:
416 exception = EXC_SOFTWARE;
417 code = EXC_I386_INTOFLT;
418 break;
419
420 case T_OUT_OF_BOUNDS:
421 exception = EXC_ARITHMETIC;
422 code = EXC_I386_BOUNDFLT;
423 break;
424
425 case T_INVALID_OPCODE:
426 exception = EXC_BAD_INSTRUCTION;
427 code = EXC_I386_INVOPFLT;
428 break;
429
430 case T_SEGMENT_NOT_PRESENT:
431 exception = EXC_BAD_INSTRUCTION;
432 code = EXC_I386_SEGNPFLT;
433 subcode = saved_state->err;
434 break;
435
436 case T_STACK_FAULT:
437 exception = EXC_BAD_INSTRUCTION;
438 code = EXC_I386_STKFLT;
439 subcode = saved_state->err;
440 break;
441
442 case T_GENERAL_PROTECTION:
443 exception = EXC_BAD_INSTRUCTION;
444 code = EXC_I386_GPFLT;
445 subcode = saved_state->err;
446 break;
447
448 case T_PAGE_FAULT:
449 exception = EXC_BAD_ACCESS;
450 code = result;
451 subcode = va;
452 break;
453
454 case T_WATCHPOINT:
455 exception = EXC_SOFTWARE;
456 code = EXC_I386_ALIGNFLT;
457 break;
458
459 case T_DEBUG:
460 case T_INT3:
461 exception = EXC_BREAKPOINT;
462 code = EXC_I386_BPTFLT;
463 break;
464
465 default:
466 exception = EXC_BAD_INSTRUCTION;
467 code = trapno;
468 break;
469 }
470
471 kdp_raise_exception(exception, code, subcode, saved_state);
472 /* If the instruction single step bit is set, disable kernel preemption
473 */
474 if (saved_state->efl & EFL_TF) {
475 disable_preemption();
476 }
477
478 kdp_callouts(KDP_EVENT_EXIT);
479 mp_kdp_exit();
480
481 return TRUE;
482 }
483
484 boolean_t
485 kdp_call_kdb(
486 void)
487 {
488 return(FALSE);
489 }
490
491 unsigned int
492 kdp_ml_get_breakinsn(void)
493 {
494 return 0xcc;
495 }
496 extern pmap_t kdp_pmap;
497 extern uint32_t kdp_src_high32;
498
499 #define RETURN_OFFSET 4
500 int
501 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
502 {
503 uint32_t *tracebuf = (uint32_t *)tracepos;
504 uint32_t fence = 0;
505 uint32_t stackptr = 0;
506 uint32_t stacklimit = 0xfc000000;
507 int framecount = 0;
508 uint32_t init_eip = 0;
509 uint32_t prevsp = 0;
510 uint32_t framesize = 2 * sizeof(vm_offset_t);
511
512 if (user_p) {
513 x86_saved_state32_t *iss32;
514
515 iss32 = USER_REGS32(thread);
516
517 init_eip = iss32->eip;
518 stackptr = iss32->ebp;
519
520 /* This bound isn't useful, but it doesn't hinder us*/
521 stacklimit = 0xffffffff;
522 kdp_pmap = thread->task->map->pmap;
523 }
524 else {
525 /*Examine the i386_saved_state at the base of the kernel stack*/
526 stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
527 init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
528 }
529
530 *tracebuf++ = init_eip;
531
532 for (framecount = 0; framecount < nframes; framecount++) {
533
534 if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
535 tracebuf--;
536 break;
537 }
538
539 *tracebuf++ = stackptr;
540 /* Invalid frame, or hit fence */
541 if (!stackptr || (stackptr == fence)) {
542 break;
543 }
544 /* Stack grows downward */
545 if (stackptr < prevsp) {
546 break;
547 }
548 /* Unaligned frame */
549 if (stackptr & 0x0000003) {
550 break;
551 }
552 if (stackptr > stacklimit) {
553 break;
554 }
555
556 if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
557 break;
558 }
559 tracebuf++;
560
561 prevsp = stackptr;
562 if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
563 *tracebuf++ = 0;
564 break;
565 }
566 }
567
568 kdp_pmap = 0;
569
570 return (uint32_t) (((char *) tracebuf) - tracepos);
571 }
572
573 #define RETURN_OFFSET64 8
574 /* Routine to encapsulate the 64-bit address read hack*/
575 unsigned
576 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
577 {
578 uint32_t kdp_vm_read_low32;
579 unsigned retval;
580
581 kdp_src_high32 = srcaddr >> 32;
582 kdp_vm_read_low32 = srcaddr & 0x00000000FFFFFFFFUL;
583 retval = kdp_vm_read((caddr_t)kdp_vm_read_low32, dstaddr, len);
584 kdp_src_high32 = 0;
585 return retval;
586 }
587
588 int
589 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
590 {
591 uint64_t *tracebuf = (uint64_t *)tracepos;
592 uint32_t fence = 0;
593 addr64_t stackptr = 0;
594 uint64_t stacklimit = 0xfc000000;
595 int framecount = 0;
596 addr64_t init_rip = 0;
597 addr64_t prevsp = 0;
598 unsigned framesize = 2 * sizeof(addr64_t);
599
600 if (user_p) {
601 x86_saved_state64_t *iss64;
602 iss64 = USER_REGS64(thread);
603 init_rip = iss64->isf.rip;
604 stackptr = iss64->rbp;
605 stacklimit = 0xffffffffffffffffULL;
606 kdp_pmap = thread->task->map->pmap;
607 }
608 else {
609 /* DRK: This would need to adapt for a 64-bit kernel, if any */
610 stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
611 init_rip = STACK_IKS(thread->kernel_stack)->k_eip;
612 }
613
614 *tracebuf++ = init_rip;
615
616 for (framecount = 0; framecount < nframes; framecount++) {
617
618 if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
619 tracebuf--;
620 break;
621 }
622
623 *tracebuf++ = stackptr;
624
625 if (!stackptr || (stackptr == fence)){
626 break;
627 }
628 if (stackptr < prevsp) {
629 break;
630 }
631 if (stackptr & 0x0000003) {
632 break;
633 }
634 if (stackptr > stacklimit) {
635 break;
636 }
637
638 if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
639 break;
640 }
641 tracebuf++;
642
643 prevsp = stackptr;
644 if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
645 *tracebuf++ = 0;
646 break;
647 }
648 }
649
650 kdp_pmap = NULL;
651
652 return (uint32_t) (((char *) tracebuf) - tracepos);
653 }
654
655 static struct kdp_callout {
656 struct kdp_callout *callout_next;
657 kdp_callout_fn_t callout_fn;
658 void *callout_arg;
659 } *kdp_callout_list = NULL;
660
661
662 /*
663 * Called from kernel context to register a kdp event callout.
664 */
665 void
666 kdp_register_callout(
667 kdp_callout_fn_t fn,
668 void *arg)
669 {
670 struct kdp_callout *kcp;
671 struct kdp_callout *list_head;
672
673 kcp = kalloc(sizeof(*kcp));
674 if (kcp == NULL)
675 panic("kdp_register_callout() kalloc failed");
676
677 kcp->callout_fn = fn;
678 kcp->callout_arg = arg;
679
680 /* Lock-less list insertion using compare and exchange. */
681 do {
682 list_head = kdp_callout_list;
683 kcp->callout_next = list_head;
684 } while(!atomic_cmpxchg((uint32_t *) &kdp_callout_list,
685 (uint32_t) list_head,
686 (uint32_t) kcp));
687 }
688
689 /*
690 * Called at exception/panic time when extering or exiting kdp.
691 * We are single-threaded at this time and so we don't use locks.
692 */
693 static void
694 kdp_callouts(kdp_event_t event)
695 {
696 struct kdp_callout *kcp = kdp_callout_list;
697
698 while (kcp) {
699 kcp->callout_fn(kcp->callout_arg, event);
700 kcp = kcp->callout_next;
701 }
702 }
703
704 void
705 kdp_ml_enter_debugger(void)
706 {
707 __asm__ __volatile__("int3");
708 }