]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/i386/kdp_machdep.c
09e93e2b4033df76527b7aa22f27c1573954bf1a
[apple/xnu.git] / osfmk / kdp / ml / i386 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <mach_kdp.h>
24 #include <mach/mach_types.h>
25 #include <mach/machine.h>
26 #include <mach/exception_types.h>
27 #include <kern/cpu_data.h>
28 #include <i386/trap.h>
29 #include <i386/mp.h>
30 #include <kdp/kdp_internal.h>
31 #include <mach-o/loader.h>
32 #include <mach-o/nlist.h>
33 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
34 #include <kern/machine.h> /* for halt_all_cpus */
35
36 #include <kern/thread.h>
37 #include <i386/thread.h>
38 #include <vm/vm_map.h>
39 #include <i386/pmap.h>
40
41 #define KDP_TEST_HARNESS 0
42 #if KDP_TEST_HARNESS
43 #define dprintf(x) printf x
44 #else
45 #define dprintf(x)
46 #endif
47
48 extern cpu_type_t cpuid_cputype(void);
49 extern cpu_subtype_t cpuid_cpusubtype(void);
50
51 void print_saved_state(void *);
52 void kdp_call(void);
53 int kdp_getc(void);
54 boolean_t kdp_call_kdb(void);
55 void kdp_getstate(i386_thread_state_t *);
56 void kdp_setstate(i386_thread_state_t *);
57 void kdp_print_phys(int);
58
59 int
60 machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
61
62 int
63 machine_trace_thread64(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
64
65 extern unsigned kdp_vm_read(caddr_t src, caddr_t dst, unsigned len);
66
67 void
68 kdp_exception(
69 unsigned char *pkt,
70 int *len,
71 unsigned short *remote_port,
72 unsigned int exception,
73 unsigned int code,
74 unsigned int subcode
75 )
76 {
77 kdp_exception_t *rq = (kdp_exception_t *)pkt;
78
79 rq->hdr.request = KDP_EXCEPTION;
80 rq->hdr.is_reply = 0;
81 rq->hdr.seq = kdp.exception_seq;
82 rq->hdr.key = 0;
83 rq->hdr.len = sizeof (*rq);
84
85 rq->n_exc_info = 1;
86 rq->exc_info[0].cpu = 0;
87 rq->exc_info[0].exception = exception;
88 rq->exc_info[0].code = code;
89 rq->exc_info[0].subcode = subcode;
90
91 rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
92
93 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
94
95 kdp.exception_ack_needed = TRUE;
96
97 *remote_port = kdp.exception_port;
98 *len = rq->hdr.len;
99 }
100
101 boolean_t
102 kdp_exception_ack(
103 unsigned char *pkt,
104 int len
105 )
106 {
107 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
108
109 if (((unsigned int) len) < sizeof (*rq))
110 return(FALSE);
111
112 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
113 return(FALSE);
114
115 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
116
117 if (rq->hdr.seq == kdp.exception_seq) {
118 kdp.exception_ack_needed = FALSE;
119 kdp.exception_seq++;
120 }
121 return(TRUE);
122 }
123
124 void
125 kdp_getstate(
126 x86_thread_state32_t *state
127 )
128 {
129 static x86_thread_state32_t null_state;
130 x86_saved_state32_t *saved_state;
131
132 saved_state = (x86_saved_state32_t *)kdp.saved_state;
133
134 *state = null_state;
135 state->eax = saved_state->eax;
136 state->ebx = saved_state->ebx;
137 state->ecx = saved_state->ecx;
138 state->edx = saved_state->edx;
139 state->edi = saved_state->edi;
140 state->esi = saved_state->esi;
141 state->ebp = saved_state->ebp;
142
143 if ((saved_state->cs & 0x3) == 0){ /* Kernel State */
144 state->esp = (unsigned int) &saved_state->uesp;
145 state->ss = KERNEL_DS;
146 } else {
147 state->esp = saved_state->uesp;
148 state->ss = saved_state->ss;
149 }
150
151 state->eflags = saved_state->efl;
152 state->eip = saved_state->eip;
153 state->cs = saved_state->cs;
154 state->ds = saved_state->ds;
155 state->es = saved_state->es;
156 state->fs = saved_state->fs;
157 state->gs = saved_state->gs;
158 }
159
160
161 void
162 kdp_setstate(
163 x86_thread_state32_t *state
164 )
165 {
166 x86_saved_state32_t *saved_state;
167
168 saved_state = (x86_saved_state32_t *)kdp.saved_state;
169
170 saved_state->eax = state->eax;
171 saved_state->ebx = state->ebx;
172 saved_state->ecx = state->ecx;
173 saved_state->edx = state->edx;
174 saved_state->edi = state->edi;
175 saved_state->esi = state->esi;
176 saved_state->ebp = state->ebp;
177 saved_state->efl = state->eflags;
178 #if 0
179 saved_state->frame.eflags &= ~( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR );
180 saved_state->frame.eflags |= ( EFL_IF | EFL_SET );
181 #endif
182 saved_state->eip = state->eip;
183 saved_state->fs = state->fs;
184 saved_state->gs = state->gs;
185 }
186
187
188 kdp_error_t
189 kdp_machine_read_regs(
190 __unused unsigned int cpu,
191 __unused unsigned int flavor,
192 char *data,
193 __unused int *size
194 )
195 {
196 static struct i386_float_state null_fpstate;
197
198 switch (flavor) {
199
200 case OLD_i386_THREAD_STATE:
201 case x86_THREAD_STATE32:
202 dprintf(("kdp_readregs THREAD_STATE\n"));
203 kdp_getstate((x86_thread_state32_t *)data);
204 *size = sizeof (x86_thread_state32_t);
205 return KDPERR_NO_ERROR;
206
207 case x86_FLOAT_STATE32:
208 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
209 *(x86_float_state32_t *)data = null_fpstate;
210 *size = sizeof (x86_float_state32_t);
211 return KDPERR_NO_ERROR;
212
213 default:
214 dprintf(("kdp_readregs bad flavor %d\n", flavor));
215 *size = 0;
216 return KDPERR_BADFLAVOR;
217 }
218 }
219
220 kdp_error_t
221 kdp_machine_write_regs(
222 __unused unsigned int cpu,
223 unsigned int flavor,
224 char *data,
225 __unused int *size
226 )
227 {
228 switch (flavor) {
229
230 case OLD_i386_THREAD_STATE:
231 case x86_THREAD_STATE32:
232 dprintf(("kdp_writeregs THREAD_STATE\n"));
233 kdp_setstate((x86_thread_state32_t *)data);
234 return KDPERR_NO_ERROR;
235
236 case x86_FLOAT_STATE32:
237 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
238 return KDPERR_NO_ERROR;
239
240 default:
241 dprintf(("kdp_writeregs bad flavor %d\n"));
242 return KDPERR_BADFLAVOR;
243 }
244 }
245
246
247
248 void
249 kdp_machine_hostinfo(
250 kdp_hostinfo_t *hostinfo
251 )
252 {
253 int i;
254
255 hostinfo->cpus_mask = 0;
256
257 for (i = 0; i < machine_info.max_cpus; i++) {
258 if (cpu_data_ptr[i] == NULL)
259 continue;
260
261 hostinfo->cpus_mask |= (1 << i);
262 }
263
264 hostinfo->cpu_type = cpuid_cputype();
265 hostinfo->cpu_subtype = cpuid_cpusubtype();
266 }
267
268 void
269 kdp_panic(
270 const char *msg
271 )
272 {
273 kprintf("kdp panic: %s\n", msg);
274 __asm__ volatile("hlt");
275 }
276
277
278 void
279 kdp_reboot(void)
280 {
281 printf("Attempting system restart...");
282 /* Call the platform specific restart*/
283 if (PE_halt_restart)
284 (*PE_halt_restart)(kPERestartCPU);
285 /* If we do reach this, give up */
286 halt_all_cpus(TRUE);
287 }
288
289 int
290 kdp_intr_disbl(void)
291 {
292 return splhigh();
293 }
294
295 void
296 kdp_intr_enbl(int s)
297 {
298 splx(s);
299 }
300
301 int
302 kdp_getc()
303 {
304 return cnmaygetc();
305 }
306
307 void
308 kdp_us_spin(int usec)
309 {
310 delay(usec/100);
311 }
312
313 void print_saved_state(void *state)
314 {
315 x86_saved_state32_t *saved_state;
316
317 saved_state = state;
318
319 kprintf("pc = 0x%x\n", saved_state->eip);
320 kprintf("cr2= 0x%x\n", saved_state->cr2);
321 kprintf("rp = TODO FIXME\n");
322 kprintf("sp = 0x%x\n", saved_state);
323
324 }
325
326 void
327 kdp_sync_cache()
328 {
329 return; /* No op here. */
330 }
331
332 void
333 kdp_call()
334 {
335 __asm__ volatile ("int $3"); /* Let the processor do the work */
336 }
337
338
339 typedef struct _cframe_t {
340 struct _cframe_t *prev;
341 unsigned caller;
342 unsigned args[0];
343 } cframe_t;
344
345 #include <i386/pmap.h>
346 extern pt_entry_t *DMAP2;
347 extern caddr_t DADDR2;
348
349 void
350 kdp_print_phys(int src)
351 {
352 unsigned int *iptr;
353 int i;
354
355 *(int *) DMAP2 = 0x63 | (src & 0xfffff000);
356 invlpg((u_int) DADDR2);
357 iptr = (unsigned int *) DADDR2;
358 for (i = 0; i < 100; i++) {
359 kprintf("0x%x ", *iptr++);
360 if ((i % 8) == 0)
361 kprintf("\n");
362 }
363 kprintf("\n");
364 *(int *) DMAP2 = 0;
365
366 }
367
368 boolean_t
369 kdp_i386_trap(
370 unsigned int trapno,
371 x86_saved_state32_t *saved_state,
372 kern_return_t result,
373 vm_offset_t va
374 )
375 {
376 unsigned int exception, subcode = 0, code;
377
378 if (trapno != T_INT3 && trapno != T_DEBUG) {
379 kprintf("unexpected kernel trap 0x%x eip 0x%x cr2 0x%x \n",
380 trapno, saved_state->eip, saved_state->cr2);
381 if (!kdp.is_conn)
382 return FALSE;
383 }
384
385 mp_kdp_enter();
386
387 switch (trapno) {
388
389 case T_DIVIDE_ERROR:
390 exception = EXC_ARITHMETIC;
391 code = EXC_I386_DIVERR;
392 break;
393
394 case T_OVERFLOW:
395 exception = EXC_SOFTWARE;
396 code = EXC_I386_INTOFLT;
397 break;
398
399 case T_OUT_OF_BOUNDS:
400 exception = EXC_ARITHMETIC;
401 code = EXC_I386_BOUNDFLT;
402 break;
403
404 case T_INVALID_OPCODE:
405 exception = EXC_BAD_INSTRUCTION;
406 code = EXC_I386_INVOPFLT;
407 break;
408
409 case T_SEGMENT_NOT_PRESENT:
410 exception = EXC_BAD_INSTRUCTION;
411 code = EXC_I386_SEGNPFLT;
412 subcode = saved_state->err;
413 break;
414
415 case T_STACK_FAULT:
416 exception = EXC_BAD_INSTRUCTION;
417 code = EXC_I386_STKFLT;
418 subcode = saved_state->err;
419 break;
420
421 case T_GENERAL_PROTECTION:
422 exception = EXC_BAD_INSTRUCTION;
423 code = EXC_I386_GPFLT;
424 subcode = saved_state->err;
425 break;
426
427 case T_PAGE_FAULT:
428 exception = EXC_BAD_ACCESS;
429 code = result;
430 subcode = va;
431 break;
432
433 case T_WATCHPOINT:
434 exception = EXC_SOFTWARE;
435 code = EXC_I386_ALIGNFLT;
436 break;
437
438 case T_DEBUG:
439 case T_INT3:
440 exception = EXC_BREAKPOINT;
441 code = EXC_I386_BPTFLT;
442 break;
443
444 default:
445 exception = EXC_BAD_INSTRUCTION;
446 code = trapno;
447 break;
448 }
449
450 kdp_raise_exception(exception, code, subcode, saved_state);
451
452 mp_kdp_exit();
453
454 return TRUE;
455 }
456
457 boolean_t
458 kdp_call_kdb(
459 void)
460 {
461 return(FALSE);
462 }
463
464 unsigned int
465 kdp_ml_get_breakinsn(void)
466 {
467 return 0xcc;
468 }
469 extern pmap_t kdp_pmap;
470
471 #define RETURN_OFFSET 4
472 int
473 machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p)
474 {
475 uint32_t *tracebuf = (uint32_t *)tracepos;
476 uint32_t fence = 0;
477 uint32_t stackptr = 0;
478 uint32_t stacklimit = 0xfc000000;
479 int framecount = 0;
480 uint32_t init_eip = 0;
481 uint32_t prevsp = 0;
482 uint32_t framesize = 2 * sizeof(vm_offset_t);
483
484 if (user_p) {
485 x86_saved_state32_t *iss32;
486
487 iss32 = USER_REGS32(thread);
488
489 init_eip = iss32->eip;
490 stackptr = iss32->ebp;
491
492 /* This bound isn't useful, but it doesn't hinder us*/
493 stacklimit = 0xffffffff;
494 kdp_pmap = thread->task->map->pmap;
495 }
496 else {
497 /*Examine the i386_saved_state at the base of the kernel stack*/
498 stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
499 init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
500 }
501
502 *tracebuf++ = init_eip;
503
504 for (framecount = 0; framecount < nframes; framecount++) {
505
506 if ((tracebound - ((uint32_t) tracebuf)) < (4 * framesize)) {
507 tracebuf--;
508 break;
509 }
510
511 *tracebuf++ = stackptr;
512 /* Invalid frame, or hit fence */
513 if (!stackptr || (stackptr == fence)) {
514 break;
515 }
516 /* Stack grows downward */
517 if (stackptr < prevsp) {
518 break;
519 }
520 /* Unaligned frame */
521 if (stackptr & 0x0000003) {
522 break;
523 }
524 if (stackptr > stacklimit) {
525 break;
526 }
527
528 if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
529 break;
530 }
531 tracebuf++;
532
533 prevsp = stackptr;
534 if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
535 *tracebuf++ = 0;
536 break;
537 }
538 }
539
540 kdp_pmap = 0;
541
542 return ((uint32_t) tracebuf - tracepos);
543 }
544
545 /* This is a stub until the x86 64-bit model becomes clear */
546 int
547 machine_trace_thread64(__unused thread_t thread, __unused uint32_t tracepos, __unused uint32_t tracebound, __unused int nframes, __unused boolean_t user_p) {
548 return 0;
549 }