]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kdp/ml/i386/kdp_machdep.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / i386 / kdp_machdep.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <mach_kdp.h>
32 #include <mach/mach_types.h>
33 #include <mach/machine.h>
34 #include <mach/exception_types.h>
35 #include <kern/cpu_data.h>
36 #include <i386/trap.h>
37 #include <i386/mp.h>
38 #include <kdp/kdp_internal.h>
39 #include <mach-o/loader.h>
40 #include <mach-o/nlist.h>
41 #include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
42 #include <kern/machine.h> /* for halt_all_cpus */
43
44 #include <kern/thread.h>
45 #include <i386/thread.h>
46 #include <vm/vm_map.h>
47 #include <i386/pmap.h>
48
49 #define KDP_TEST_HARNESS 0
50 #if KDP_TEST_HARNESS
51 #define dprintf(x) printf x
52 #else
53 #define dprintf(x)
54 #endif
55
56 extern cpu_type_t cpuid_cputype(void);
57 extern cpu_subtype_t cpuid_cpusubtype(void);
58
59 void print_saved_state(void *);
60 void kdp_call(void);
61 int kdp_getc(void);
62 boolean_t kdp_call_kdb(void);
63 void kdp_getstate(i386_thread_state_t *);
64 void kdp_setstate(i386_thread_state_t *);
65 void kdp_print_phys(int);
66
67 int
68 machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
69
70 int
71 machine_trace_thread64(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p);
72
73 extern unsigned kdp_vm_read(caddr_t src, caddr_t dst, unsigned len);
74
75 void
76 kdp_exception(
77 unsigned char *pkt,
78 int *len,
79 unsigned short *remote_port,
80 unsigned int exception,
81 unsigned int code,
82 unsigned int subcode
83 )
84 {
85 kdp_exception_t *rq = (kdp_exception_t *)pkt;
86
87 rq->hdr.request = KDP_EXCEPTION;
88 rq->hdr.is_reply = 0;
89 rq->hdr.seq = kdp.exception_seq;
90 rq->hdr.key = 0;
91 rq->hdr.len = sizeof (*rq);
92
93 rq->n_exc_info = 1;
94 rq->exc_info[0].cpu = 0;
95 rq->exc_info[0].exception = exception;
96 rq->exc_info[0].code = code;
97 rq->exc_info[0].subcode = subcode;
98
99 rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
100
101 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
102
103 kdp.exception_ack_needed = TRUE;
104
105 *remote_port = kdp.exception_port;
106 *len = rq->hdr.len;
107 }
108
109 boolean_t
110 kdp_exception_ack(
111 unsigned char *pkt,
112 int len
113 )
114 {
115 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
116
117 if (((unsigned int) len) < sizeof (*rq))
118 return(FALSE);
119
120 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
121 return(FALSE);
122
123 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
124
125 if (rq->hdr.seq == kdp.exception_seq) {
126 kdp.exception_ack_needed = FALSE;
127 kdp.exception_seq++;
128 }
129 return(TRUE);
130 }
131
132 void
133 kdp_getstate(
134 x86_thread_state32_t *state
135 )
136 {
137 static x86_thread_state32_t null_state;
138 x86_saved_state32_t *saved_state;
139
140 saved_state = (x86_saved_state32_t *)kdp.saved_state;
141
142 *state = null_state;
143 state->eax = saved_state->eax;
144 state->ebx = saved_state->ebx;
145 state->ecx = saved_state->ecx;
146 state->edx = saved_state->edx;
147 state->edi = saved_state->edi;
148 state->esi = saved_state->esi;
149 state->ebp = saved_state->ebp;
150
151 if ((saved_state->cs & 0x3) == 0){ /* Kernel State */
152 state->esp = (unsigned int) &saved_state->uesp;
153 state->ss = KERNEL_DS;
154 } else {
155 state->esp = saved_state->uesp;
156 state->ss = saved_state->ss;
157 }
158
159 state->eflags = saved_state->efl;
160 state->eip = saved_state->eip;
161 state->cs = saved_state->cs;
162 state->ds = saved_state->ds;
163 state->es = saved_state->es;
164 state->fs = saved_state->fs;
165 state->gs = saved_state->gs;
166 }
167
168
169 void
170 kdp_setstate(
171 x86_thread_state32_t *state
172 )
173 {
174 x86_saved_state32_t *saved_state;
175
176 saved_state = (x86_saved_state32_t *)kdp.saved_state;
177
178 saved_state->eax = state->eax;
179 saved_state->ebx = state->ebx;
180 saved_state->ecx = state->ecx;
181 saved_state->edx = state->edx;
182 saved_state->edi = state->edi;
183 saved_state->esi = state->esi;
184 saved_state->ebp = state->ebp;
185 saved_state->efl = state->eflags;
186 #if 0
187 saved_state->frame.eflags &= ~( EFL_VM | EFL_NT | EFL_IOPL | EFL_CLR );
188 saved_state->frame.eflags |= ( EFL_IF | EFL_SET );
189 #endif
190 saved_state->eip = state->eip;
191 saved_state->fs = state->fs;
192 saved_state->gs = state->gs;
193 }
194
195
196 kdp_error_t
197 kdp_machine_read_regs(
198 __unused unsigned int cpu,
199 __unused unsigned int flavor,
200 char *data,
201 __unused int *size
202 )
203 {
204 static struct i386_float_state null_fpstate;
205
206 switch (flavor) {
207
208 case OLD_i386_THREAD_STATE:
209 case x86_THREAD_STATE32:
210 dprintf(("kdp_readregs THREAD_STATE\n"));
211 kdp_getstate((x86_thread_state32_t *)data);
212 *size = sizeof (x86_thread_state32_t);
213 return KDPERR_NO_ERROR;
214
215 case x86_FLOAT_STATE32:
216 dprintf(("kdp_readregs THREAD_FPSTATE\n"));
217 *(x86_float_state32_t *)data = null_fpstate;
218 *size = sizeof (x86_float_state32_t);
219 return KDPERR_NO_ERROR;
220
221 default:
222 dprintf(("kdp_readregs bad flavor %d\n", flavor));
223 *size = 0;
224 return KDPERR_BADFLAVOR;
225 }
226 }
227
228 kdp_error_t
229 kdp_machine_write_regs(
230 __unused unsigned int cpu,
231 unsigned int flavor,
232 char *data,
233 __unused int *size
234 )
235 {
236 switch (flavor) {
237
238 case OLD_i386_THREAD_STATE:
239 case x86_THREAD_STATE32:
240 dprintf(("kdp_writeregs THREAD_STATE\n"));
241 kdp_setstate((x86_thread_state32_t *)data);
242 return KDPERR_NO_ERROR;
243
244 case x86_FLOAT_STATE32:
245 dprintf(("kdp_writeregs THREAD_FPSTATE\n"));
246 return KDPERR_NO_ERROR;
247
248 default:
249 dprintf(("kdp_writeregs bad flavor %d\n"));
250 return KDPERR_BADFLAVOR;
251 }
252 }
253
254
255
256 void
257 kdp_machine_hostinfo(
258 kdp_hostinfo_t *hostinfo
259 )
260 {
261 int i;
262
263 hostinfo->cpus_mask = 0;
264
265 for (i = 0; i < machine_info.max_cpus; i++) {
266 if (cpu_data_ptr[i] == NULL)
267 continue;
268
269 hostinfo->cpus_mask |= (1 << i);
270 }
271
272 hostinfo->cpu_type = cpuid_cputype();
273 hostinfo->cpu_subtype = cpuid_cpusubtype();
274 }
275
276 void
277 kdp_panic(
278 const char *msg
279 )
280 {
281 kprintf("kdp panic: %s\n", msg);
282 __asm__ volatile("hlt");
283 }
284
285
286 void
287 kdp_reboot(void)
288 {
289 printf("Attempting system restart...");
290 /* Call the platform specific restart*/
291 if (PE_halt_restart)
292 (*PE_halt_restart)(kPERestartCPU);
293 /* If we do reach this, give up */
294 halt_all_cpus(TRUE);
295 }
296
297 int
298 kdp_intr_disbl(void)
299 {
300 return splhigh();
301 }
302
303 void
304 kdp_intr_enbl(int s)
305 {
306 splx(s);
307 }
308
309 int
310 kdp_getc()
311 {
312 return cnmaygetc();
313 }
314
315 void
316 kdp_us_spin(int usec)
317 {
318 delay(usec/100);
319 }
320
321 void print_saved_state(void *state)
322 {
323 x86_saved_state32_t *saved_state;
324
325 saved_state = state;
326
327 kprintf("pc = 0x%x\n", saved_state->eip);
328 kprintf("cr2= 0x%x\n", saved_state->cr2);
329 kprintf("rp = TODO FIXME\n");
330 kprintf("sp = 0x%x\n", saved_state);
331
332 }
333
334 void
335 kdp_sync_cache()
336 {
337 return; /* No op here. */
338 }
339
340 void
341 kdp_call()
342 {
343 __asm__ volatile ("int $3"); /* Let the processor do the work */
344 }
345
346
347 typedef struct _cframe_t {
348 struct _cframe_t *prev;
349 unsigned caller;
350 unsigned args[0];
351 } cframe_t;
352
353 #include <i386/pmap.h>
354 extern pt_entry_t *DMAP2;
355 extern caddr_t DADDR2;
356
357 void
358 kdp_print_phys(int src)
359 {
360 unsigned int *iptr;
361 int i;
362
363 *(int *) DMAP2 = 0x63 | (src & 0xfffff000);
364 invlpg((u_int) DADDR2);
365 iptr = (unsigned int *) DADDR2;
366 for (i = 0; i < 100; i++) {
367 kprintf("0x%x ", *iptr++);
368 if ((i % 8) == 0)
369 kprintf("\n");
370 }
371 kprintf("\n");
372 *(int *) DMAP2 = 0;
373
374 }
375
376 boolean_t
377 kdp_i386_trap(
378 unsigned int trapno,
379 x86_saved_state32_t *saved_state,
380 kern_return_t result,
381 vm_offset_t va
382 )
383 {
384 unsigned int exception, subcode = 0, code;
385
386 if (trapno != T_INT3 && trapno != T_DEBUG) {
387 kprintf("unexpected kernel trap 0x%x eip 0x%x cr2 0x%x \n",
388 trapno, saved_state->eip, saved_state->cr2);
389 if (!kdp.is_conn)
390 return FALSE;
391 }
392
393 mp_kdp_enter();
394
395 switch (trapno) {
396
397 case T_DIVIDE_ERROR:
398 exception = EXC_ARITHMETIC;
399 code = EXC_I386_DIVERR;
400 break;
401
402 case T_OVERFLOW:
403 exception = EXC_SOFTWARE;
404 code = EXC_I386_INTOFLT;
405 break;
406
407 case T_OUT_OF_BOUNDS:
408 exception = EXC_ARITHMETIC;
409 code = EXC_I386_BOUNDFLT;
410 break;
411
412 case T_INVALID_OPCODE:
413 exception = EXC_BAD_INSTRUCTION;
414 code = EXC_I386_INVOPFLT;
415 break;
416
417 case T_SEGMENT_NOT_PRESENT:
418 exception = EXC_BAD_INSTRUCTION;
419 code = EXC_I386_SEGNPFLT;
420 subcode = saved_state->err;
421 break;
422
423 case T_STACK_FAULT:
424 exception = EXC_BAD_INSTRUCTION;
425 code = EXC_I386_STKFLT;
426 subcode = saved_state->err;
427 break;
428
429 case T_GENERAL_PROTECTION:
430 exception = EXC_BAD_INSTRUCTION;
431 code = EXC_I386_GPFLT;
432 subcode = saved_state->err;
433 break;
434
435 case T_PAGE_FAULT:
436 exception = EXC_BAD_ACCESS;
437 code = result;
438 subcode = va;
439 break;
440
441 case T_WATCHPOINT:
442 exception = EXC_SOFTWARE;
443 code = EXC_I386_ALIGNFLT;
444 break;
445
446 case T_DEBUG:
447 case T_INT3:
448 exception = EXC_BREAKPOINT;
449 code = EXC_I386_BPTFLT;
450 break;
451
452 default:
453 exception = EXC_BAD_INSTRUCTION;
454 code = trapno;
455 break;
456 }
457
458 kdp_raise_exception(exception, code, subcode, saved_state);
459
460 mp_kdp_exit();
461
462 return TRUE;
463 }
464
465 boolean_t
466 kdp_call_kdb(
467 void)
468 {
469 return(FALSE);
470 }
471
472 unsigned int
473 kdp_ml_get_breakinsn(void)
474 {
475 return 0xcc;
476 }
477 extern pmap_t kdp_pmap;
478
479 #define RETURN_OFFSET 4
480 int
481 machine_trace_thread(thread_t thread, uint32_t tracepos, uint32_t tracebound, int nframes, boolean_t user_p)
482 {
483 uint32_t *tracebuf = (uint32_t *)tracepos;
484 uint32_t fence = 0;
485 uint32_t stackptr = 0;
486 uint32_t stacklimit = 0xfc000000;
487 int framecount = 0;
488 uint32_t init_eip = 0;
489 uint32_t prevsp = 0;
490 uint32_t framesize = 2 * sizeof(vm_offset_t);
491
492 if (user_p) {
493 x86_saved_state32_t *iss32;
494
495 iss32 = USER_REGS32(thread);
496
497 init_eip = iss32->eip;
498 stackptr = iss32->ebp;
499
500 /* This bound isn't useful, but it doesn't hinder us*/
501 stacklimit = 0xffffffff;
502 kdp_pmap = thread->task->map->pmap;
503 }
504 else {
505 /*Examine the i386_saved_state at the base of the kernel stack*/
506 stackptr = STACK_IKS(thread->kernel_stack)->k_ebp;
507 init_eip = STACK_IKS(thread->kernel_stack)->k_eip;
508 }
509
510 *tracebuf++ = init_eip;
511
512 for (framecount = 0; framecount < nframes; framecount++) {
513
514 if ((tracebound - ((uint32_t) tracebuf)) < (4 * framesize)) {
515 tracebuf--;
516 break;
517 }
518
519 *tracebuf++ = stackptr;
520 /* Invalid frame, or hit fence */
521 if (!stackptr || (stackptr == fence)) {
522 break;
523 }
524 /* Stack grows downward */
525 if (stackptr < prevsp) {
526 break;
527 }
528 /* Unaligned frame */
529 if (stackptr & 0x0000003) {
530 break;
531 }
532 if (stackptr > stacklimit) {
533 break;
534 }
535
536 if (kdp_vm_read((caddr_t) (stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(caddr_t)) != sizeof(caddr_t)) {
537 break;
538 }
539 tracebuf++;
540
541 prevsp = stackptr;
542 if (kdp_vm_read((caddr_t) stackptr, (caddr_t) &stackptr, sizeof(caddr_t)) != sizeof(caddr_t)) {
543 *tracebuf++ = 0;
544 break;
545 }
546 }
547
548 kdp_pmap = 0;
549
550 return ((uint32_t) tracebuf - tracepos);
551 }
552
553 /* This is a stub until the x86 64-bit model becomes clear */
554 int
555 machine_trace_thread64(__unused thread_t thread, __unused uint32_t tracepos, __unused uint32_t tracebound, __unused int nframes, __unused boolean_t user_p) {
556 return 0;
557 }