]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/i386/kdp_machdep.c
xnu-1228.7.58.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / i386 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach_kdp.h>
30 #include <mach/mach_types.h>
31 #include <mach/machine.h>
32 #include <mach/exception_types.h>
33 #include <kern/cpu_data.h>
34 #include <i386/trap.h>
35 #include <i386/mp.h>
36 #include <kdp/kdp_internal.h>
37 #include <kdp/kdp_callout.h>
38 #include <mach-o/loader.h>
39 #include <mach-o/nlist.h>
40 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41 #include <kern/machine.h> /* for halt_all_cpus */
42
43 #include <kern/thread.h>
44 #include <i386/thread.h>
45 #include <vm/vm_map.h>
46 #include <i386/pmap.h>
47 #include <kern/kalloc.h>
48
49 #define KDP_TEST_HARNESS 0
50 #if KDP_TEST_HARNESS
51 #define dprintf(x) printf x
52 #else
53 #define dprintf(x)
54 #endif
55
56 extern cpu_type_t cpuid_cputype(void);
57 extern cpu_subtype_t cpuid_cpusubtype(void);
58
59 void print_saved_state(void *);
60 void kdp_call(void);
61 int kdp_getc(void);
62 boolean_t kdp_call_kdb(void);
63 void kdp_getstate(i386_thread_state_t *);
64 void kdp_setstate(i386_thread_state_t *);
65 void kdp_print_phys(int);
66
67 int
68 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
69
70 int
71 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
72
73 unsigned
74 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
75
76 extern unsigned kdp_vm_read(caddr_t src, caddr_t dst, unsigned len);
77
78 static void kdp_callouts(kdp_event_t event);
79
80 void
81 kdp_exception(
82 unsigned char *pkt,
83 int *len,
84 unsigned short *remote_port,
85 unsigned int exception,
86 unsigned int code,
87 unsigned int subcode
88 )
89 {
90 kdp_exception_t *rq = (kdp_exception_t *)pkt;
91
92 rq->hdr.request = KDP_EXCEPTION;
93 rq->hdr.is_reply = 0;
94 rq->hdr.seq = kdp.exception_seq;
95 rq->hdr.key = 0;
96 rq->hdr.len = sizeof (*rq);
97
98 rq->n_exc_info = 1;
99 rq->exc_info[0].cpu = 0;
100 rq->exc_info[0].exception = exception;
101 rq->exc_info[0].code = code;
102 rq->exc_info[0].subcode = subcode;
103
104 rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
105
106 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
107
108 kdp.exception_ack_needed = TRUE;
109
110 *remote_port = kdp.exception_port;
111 *len = rq->hdr.len;
112 }
113
114 boolean_t
115 kdp_exception_ack(
116 unsigned char *pkt,
117 int len
118 )
119 {
120 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
121
122 if (((unsigned int) len) < sizeof (*rq))
123 return(FALSE);
124
125 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
126 return(FALSE);
127
128 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
129
130 if (rq->hdr.seq == kdp.exception_seq) {
131 kdp.exception_ack_needed = FALSE;
132 kdp.exception_seq++;
133 }
134 return(TRUE);
135 }
136
137 void
138 kdp_getstate(
139 x86_thread_state32_t *state
140 )
141 {
142 static x86_thread_state32_t null_state;
143 x86_saved_state32_t *saved_state;
144
145 saved_state = (x86_saved_state32_t *)kdp.saved_state;
146
147 *state = null_state;
148 state->eax = saved_state->eax;
149 state->ebx = saved_state->ebx;
150 state->ecx = saved_state->ecx;
151 state->edx = saved_state->edx;
152 state->edi = saved_state->edi;
153 state->esi = saved_state->esi;
154 state->ebp = saved_state->ebp;
155
156 if ((saved_state->cs & SEL_PL) == SEL_PL_K) { /* Kernel state? */
157 if (cpu_mode_is64bit())
158 state->esp = (uint32_t) saved_state->uesp;
159 else
160 state->esp = ((uint32_t)saved_state) + offsetof(x86_saved_state_t, ss_32) + sizeof(x86_saved_state32_t);
161 state->ss = KERNEL_DS;
162 } else {
163 state->esp = saved_state->uesp;
164 state->ss = saved_state->ss;
165 }
166
167 state->eflags = saved_state->efl;
168 state->eip = saved_state->eip;
169 state->cs = saved_state->cs;
170 state->ds = saved_state->ds;
171 state->es = saved_state->es;
172 state->fs = saved_state->fs;
173 state->gs = saved_state->gs;
174 }
175
176
177 void
178 kdp_setstate(
179 x86_thread_state32_t *state
180 )
181 {
182 x86_saved_state32_t *saved_state;
183
184 saved_state = (x86_saved_state32_t *)kdp.saved_state;
185
186 saved_state->eax = state->eax;
187 saved_state->ebx = state->ebx;
188 saved_state->ecx = state->ecx;
189 saved_state->edx = state->edx;
190 saved_state->edi = state->edi;
191 saved_state->esi = state->esi;
192 saved_state->ebp = state->ebp;
193 saved_state->efl = state->eflags;
194 #if 0
195 saved_state->frame.eflags &= ~( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR );
196 saved_state->frame.eflags |= ( EFL_IF | EFL_SET );
197 #endif
198 saved_state->eip = state->eip;
199 }
200
201
202 kdp_error_t
203 kdp_machine_read_regs(
204 __unused unsigned int cpu,
205 __unused unsigned int flavor,
206 char *data,
207 __unused int *size
208 )
209 {
210 static x86_float_state32_t null_fpstate;
211
212 switch (flavor) {
213
214 case x86_THREAD_STATE32:
215 dprintf(("kdp_readregs THREAD_STATE\n"));
216 kdp_getstate((x86_thread_state32_t *)data);
217 *size = sizeof (x86_thread_state32_t);
218 return KDPERR_NO_ERROR;
219
220 case x86_FLOAT_STATE32:
221 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
222 *(x86_float_state32_t *)data = null_fpstate;
223 *size = sizeof (x86_float_state32_t);
224 return KDPERR_NO_ERROR;
225
226 default:
227 dprintf(("kdp_readregs bad flavor %d\n", flavor));
228 *size = 0;
229 return KDPERR_BADFLAVOR;
230 }
231 }
232
233 kdp_error_t
234 kdp_machine_write_regs(
235 __unused unsigned int cpu,
236 unsigned int flavor,
237 char *data,
238 __unused int *size
239 )
240 {
241 switch (flavor) {
242
243 case x86_THREAD_STATE32:
244 dprintf(("kdp_writeregs THREAD_STATE\n"));
245 kdp_setstate((x86_thread_state32_t *)data);
246 return KDPERR_NO_ERROR;
247
248 case x86_FLOAT_STATE32:
249 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
250 return KDPERR_NO_ERROR;
251
252 default:
253 dprintf(("kdp_writeregs bad flavor %d\n"));
254 return KDPERR_BADFLAVOR;
255 }
256 }
257
258
259
260 void
261 kdp_machine_hostinfo(
262 kdp_hostinfo_t *hostinfo
263 )
264 {
265 int i;
266
267 hostinfo->cpus_mask = 0;
268
269 for (i = 0; i < machine_info.max_cpus; i++) {
270 if (cpu_data_ptr[i] == NULL)
271 continue;
272
273 hostinfo->cpus_mask |= (1 << i);
274 }
275
276 hostinfo->cpu_type = cpuid_cputype();
277 hostinfo->cpu_subtype = cpuid_cpusubtype();
278 }
279
280 void
281 kdp_panic(
282 #if CONFIG_NO_KPRINTF_STRINGS
283 __unused const char *msg
284 #else
285 const char *msg
286 #endif
287 )
288 {
289 kprintf("kdp panic: %s\n", msg);
290 __asm__ volatile("hlt");
291 }
292
293
294 void
295 kdp_reboot(void)
296 {
297 printf("Attempting system restart...");
298 /* Call the platform specific restart*/
299 if (PE_halt_restart)
300 (*PE_halt_restart)(kPERestartCPU);
301 /* If we do reach this, give up */
302 halt_all_cpus(TRUE);
303 }
304
305 int
306 kdp_intr_disbl(void)
307 {
308 return splhigh();
309 }
310
311 void
312 kdp_intr_enbl(int s)
313 {
314 splx(s);
315 }
316
317 int
318 kdp_getc(void)
319 {
320 return cnmaygetc();
321 }
322
323 void
324 kdp_us_spin(int usec)
325 {
326 delay(usec/100);
327 }
328
329 void print_saved_state(void *state)
330 {
331 x86_saved_state32_t *saved_state;
332
333 saved_state = state;
334
335 kprintf("pc = 0x%x\n", saved_state->eip);
336 kprintf("cr2= 0x%x\n", saved_state->cr2);
337 kprintf("rp = TODO FIXME\n");
338 kprintf("sp = %p\n", saved_state);
339
340 }
341
342 void
343 kdp_sync_cache(void)
344 {
345 return; /* No op here. */
346 }
347
348 void
349 kdp_call(void)
350 {
351 __asm__ volatile ("int $3"); /* Let the processor do the work */
352 }
353
354
355 typedef struct _cframe_t {
356 struct _cframe_t *prev;
357 unsigned caller;
358 unsigned args[0];
359 } cframe_t;
360
361 #include <i386/pmap.h>
362 extern pt_entry_t *DMAP2;
363 extern caddr_t DADDR2;
364
365 void
366 kdp_print_phys(int src)
367 {
368 unsigned int *iptr;
369 int i;
370
371 *(int *) DMAP2 = 0x63 | (src & 0xfffff000);
372 invlpg((u_int) DADDR2);
373 iptr = (unsigned int *) DADDR2;
374 for (i = 0; i < 100; i++) {
375 kprintf("0x%x ", *iptr++);
376 if ((i % 8) == 0)
377 kprintf("\n");
378 }
379 kprintf("\n");
380 *(int *) DMAP2 = 0;
381
382 }
383
384 boolean_t
385 kdp_i386_trap(
386 unsigned int trapno,
387 x86_saved_state32_t *saved_state,
388 kern_return_t result,
389 vm_offset_t va
390 )
391 {
392 unsigned int exception, subcode = 0, code;
393
394 if (trapno != T_INT3 && trapno != T_DEBUG) {
395 kprintf("Debugger: Unexpected kernel trap number: "
396 "0x%x, EIP: 0x%x, CR2: 0x%x\n",
397 trapno, saved_state->eip, saved_state->cr2);
398 if (!kdp.is_conn)
399 return FALSE;
400 }
401
402 mp_kdp_enter();
403 kdp_callouts(KDP_EVENT_ENTER);
404
405 if (saved_state->efl & EFL_TF) {
406 enable_preemption_no_check();
407 }
408
409 switch (trapno) {
410
411 case T_DIVIDE_ERROR:
412 exception = EXC_ARITHMETIC;
413 code = EXC_I386_DIVERR;
414 break;
415
416 case T_OVERFLOW:
417 exception = EXC_SOFTWARE;
418 code = EXC_I386_INTOFLT;
419 break;
420
421 case T_OUT_OF_BOUNDS:
422 exception = EXC_ARITHMETIC;
423 code = EXC_I386_BOUNDFLT;
424 break;
425
426 case T_INVALID_OPCODE:
427 exception = EXC_BAD_INSTRUCTION;
428 code = EXC_I386_INVOPFLT;
429 break;
430
431 case T_SEGMENT_NOT_PRESENT:
432 exception = EXC_BAD_INSTRUCTION;
433 code = EXC_I386_SEGNPFLT;
434 subcode = saved_state->err;
435 break;
436
437 case T_STACK_FAULT:
438 exception = EXC_BAD_INSTRUCTION;
439 code = EXC_I386_STKFLT;
440 subcode = saved_state->err;
441 break;
442
443 case T_GENERAL_PROTECTION:
444 exception = EXC_BAD_INSTRUCTION;
445 code = EXC_I386_GPFLT;
446 subcode = saved_state->err;
447 break;
448
449 case T_PAGE_FAULT:
450 exception = EXC_BAD_ACCESS;
451 code = result;
452 subcode = va;
453 break;
454
455 case T_WATCHPOINT:
456 exception = EXC_SOFTWARE;
457 code = EXC_I386_ALIGNFLT;
458 break;
459
460 case T_DEBUG:
461 case T_INT3:
462 exception = EXC_BREAKPOINT;
463 code = EXC_I386_BPTFLT;
464 break;
465
466 default:
467 exception = EXC_BAD_INSTRUCTION;
468 code = trapno;
469 break;
470 }
471
472 kdp_raise_exception(exception, code, subcode, saved_state);
473 /* If the instruction single step bit is set, disable kernel preemption
474 */
475 if (saved_state->efl & EFL_TF) {
476 disable_preemption();
477 }
478
479 kdp_callouts(KDP_EVENT_EXIT);
480 mp_kdp_exit();
481
482 return TRUE;
483 }
484
485 boolean_t
486 kdp_call_kdb(
487 void)
488 {
489 return(FALSE);
490 }
491
492 unsigned int
493 kdp_ml_get_breakinsn(void)
494 {
495 return 0xcc;
496 }
497
498 extern pmap_t kdp_pmap;
499 extern uint32_t kdp_src_high32;
500
501 #define RETURN_OFFSET 4
502 int
503 machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
504 {
505 uint32_t *tracebuf = (uint32_t *)tracepos;
506 uint32_t fence = 0;
507 uint32_t stackptr = 0;
508 uint32_t stacklimit = 0xfc000000;
509 int framecount = 0;
510 uint32_t init_eip = 0;
511 uint32_t prevsp = 0;
512 uint32_t framesize = 2 * sizeof(vm_offset_t);
513
514 if (user_p) {
515 x86_saved_state32_t *iss32;
516
517 iss32 = USER_REGS32(thread);
518
519 init_eip = iss32->eip;
520 stackptr = iss32->ebp;
521
522 /* This bound isn't useful, but it doesn't hinder us*/
523 stacklimit = 0xffffffff;
524 kdp_pmap = thread->task->map->pmap;
525 }
526 else {
527 /*Examine the i386_saved_state at the base of the kernel stack*/
528 stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
529 init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
530 }
531
532 *tracebuf++ = init_eip;
533
534 for (framecount = 0; framecount < nframes; framecount++) {
535
536 if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
537 tracebuf--;
538 break;
539 }
540
541 *tracebuf++ = stackptr;
542 /* Invalid frame, or hit fence */
543 if (!stackptr || (stackptr == fence)) {
544 break;
545 }
546 /* Stack grows downward */
547 if (stackptr < prevsp) {
548 break;
549 }
550 /* Unaligned frame */
551 if (stackptr & 0x0000003) {
552 break;
553 }
554 if (stackptr > stacklimit) {
555 break;
556 }
557
558 if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
559 break;
560 }
561 tracebuf++;
562
563 prevsp = stackptr;
564 if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
565 *tracebuf++ = 0;
566 break;
567 }
568 }
569
570 kdp_pmap = 0;
571
572 return (uint32_t) (((char *) tracebuf) - tracepos);
573 }
574
575 #define RETURN_OFFSET64 8
576 /* Routine to encapsulate the 64-bit address read hack*/
577 unsigned
578 machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
579 {
580 uint32_t kdp_vm_read_low32;
581 unsigned retval;
582
583 kdp_src_high32 = srcaddr >> 32;
584 kdp_vm_read_low32 = srcaddr & 0x00000000FFFFFFFFUL;
585 retval = kdp_vm_read((caddr_t)kdp_vm_read_low32, dstaddr, len);
586 kdp_src_high32 = 0;
587 return retval;
588 }
589
590 int
591 machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
592 {
593 uint64_t *tracebuf = (uint64_t *)tracepos;
594 uint32_t fence = 0;
595 addr64_t stackptr = 0;
596 uint64_t stacklimit = 0xfc000000;
597 int framecount = 0;
598 addr64_t init_rip = 0;
599 addr64_t prevsp = 0;
600 unsigned framesize = 2 * sizeof(addr64_t);
601
602 if (user_p) {
603 x86_saved_state64_t *iss64;
604 iss64 = USER_REGS64(thread);
605 init_rip = iss64->isf.rip;
606 stackptr = iss64->rbp;
607 stacklimit = 0xffffffffffffffffULL;
608 kdp_pmap = thread->task->map->pmap;
609 }
610 else {
611 /* DRK: This would need to adapt for a 64-bit kernel, if any */
612 stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
613 init_rip = STACK_IKS(thread->kernel_stack)->k_eip;
614 }
615
616 *tracebuf++ = init_rip;
617
618 for (framecount = 0; framecount < nframes; framecount++) {
619
620 if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
621 tracebuf--;
622 break;
623 }
624
625 *tracebuf++ = stackptr;
626
627 if (!stackptr || (stackptr == fence)){
628 break;
629 }
630 if (stackptr < prevsp) {
631 break;
632 }
633 if (stackptr & 0x0000003) {
634 break;
635 }
636 if (stackptr > stacklimit) {
637 break;
638 }
639
640 if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
641 break;
642 }
643 tracebuf++;
644
645 prevsp = stackptr;
646 if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
647 *tracebuf++ = 0;
648 break;
649 }
650 }
651
652 kdp_pmap = NULL;
653
654 return (uint32_t) (((char *) tracebuf) - tracepos);
655 }
656
657 static struct kdp_callout {
658 struct kdp_callout *callout_next;
659 kdp_callout_fn_t callout_fn;
660 void *callout_arg;
661 } *kdp_callout_list = NULL;
662
663
664 /*
665 * Called from kernel context to register a kdp event callout.
666 */
667 void
668 kdp_register_callout(
669 kdp_callout_fn_t fn,
670 void *arg)
671 {
672 struct kdp_callout *kcp;
673 struct kdp_callout *list_head;
674
675 kcp = kalloc(sizeof(*kcp));
676 if (kcp == NULL)
677 panic("kdp_register_callout() kalloc failed");
678
679 kcp->callout_fn = fn;
680 kcp->callout_arg = arg;
681
682 /* Lock-less list insertion using compare and exchange. */
683 do {
684 list_head = kdp_callout_list;
685 kcp->callout_next = list_head;
686 } while(!atomic_cmpxchg((uint32_t *) &kdp_callout_list,
687 (uint32_t) list_head,
688 (uint32_t) kcp));
689 }
690
691 /*
692 * Called at exception/panic time when extering or exiting kdp.
693 * We are single-threaded at this time and so we don't use locks.
694 */
695 static void
696 kdp_callouts(kdp_event_t event)
697 {
698 struct kdp_callout *kcp = kdp_callout_list;
699
700 while (kcp) {
701 kcp->callout_fn(kcp->callout_arg, event);
702 kcp = kcp->callout_next;
703 }
704 }
705
706 void
707 kdp_ml_enter_debugger(void)
708 {
709 __asm__ __volatile__("int3");
710 }