]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/ml/x86_64/kdp_machdep.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / x86_64 / kdp_machdep.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
b0d623f7
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
b0d623f7
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
b0d623f7
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
b0d623f7
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
0a7de745 28
b0d623f7
A
29#include <mach_kdp.h>
30#include <mach/mach_types.h>
31#include <mach/machine.h>
32#include <mach/exception_types.h>
33#include <kern/cpu_data.h>
34#include <i386/trap.h>
35#include <i386/mp.h>
36#include <kdp/kdp_internal.h>
b0d623f7
A
37#include <mach-o/loader.h>
38#include <mach-o/nlist.h>
39#include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
40#include <kern/machine.h> /* for halt_all_cpus */
41#include <libkern/OSAtomic.h>
42
43#include <kern/thread.h>
44#include <i386/thread.h>
45#include <vm/vm_map.h>
46#include <i386/pmap.h>
b0d623f7
A
47
48#define KDP_TEST_HARNESS 0
49#if KDP_TEST_HARNESS
50#define dprintf(x) printf x
51#else
52#define dprintf(x)
53#endif
54
55extern cpu_type_t cpuid_cputype(void);
56extern cpu_subtype_t cpuid_cpusubtype(void);
57
3e170ce0 58extern vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
fe8ab488 59extern void machine_trace_thread_clear_validation_cache(void);
39037602 60extern vm_map_t kernel_map;
fe8ab488 61
0a7de745
A
62void print_saved_state(void *);
63void kdp_call(void);
64int kdp_getc(void);
65void kdp_getstate(x86_thread_state64_t *);
66void kdp_setstate(x86_thread_state64_t *);
39037602 67unsigned machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
b0d623f7 68
b0d623f7
A
69void
70kdp_exception(
0a7de745
A
71 unsigned char *pkt,
72 int *len,
73 unsigned short *remote_port,
74 unsigned int exception,
75 unsigned int code,
76 unsigned int subcode
77 )
b0d623f7 78{
0a7de745
A
79 kdp_exception_t *rq = (kdp_exception_t *)pkt;
80
81 rq->hdr.request = KDP_EXCEPTION;
82 rq->hdr.is_reply = 0;
83 rq->hdr.seq = kdp.exception_seq;
84 rq->hdr.key = 0;
85 rq->hdr.len = sizeof(*rq);
86
87 rq->n_exc_info = 1;
88 rq->exc_info[0].cpu = 0;
89 rq->exc_info[0].exception = exception;
90 rq->exc_info[0].code = code;
91 rq->exc_info[0].subcode = subcode;
92
93 rq->hdr.len += rq->n_exc_info * sizeof(kdp_exc_info_t);
94
95 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
96
97 kdp.exception_ack_needed = TRUE;
98
99 *remote_port = kdp.exception_port;
100 *len = rq->hdr.len;
b0d623f7
A
101}
102
103boolean_t
104kdp_exception_ack(
0a7de745
A
105 unsigned char *pkt,
106 int len
107 )
b0d623f7 108{
0a7de745
A
109 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
110
111 if (((unsigned int) len) < sizeof(*rq)) {
112 return FALSE;
113 }
114
115 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION) {
116 return FALSE;
117 }
118
119 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
120
121 if (rq->hdr.seq == kdp.exception_seq) {
122 kdp.exception_ack_needed = FALSE;
123 kdp.exception_seq++;
124 }
125 return TRUE;
b0d623f7
A
126}
127
128void
129kdp_getstate(
0a7de745
A
130 x86_thread_state64_t *state
131 )
b0d623f7 132{
0a7de745
A
133 x86_saved_state64_t *saved_state;
134
135 saved_state = (x86_saved_state64_t *)kdp.saved_state;
136
137 state->rax = saved_state->rax;
138 state->rbx = saved_state->rbx;
139 state->rcx = saved_state->rcx;
140 state->rdx = saved_state->rdx;
141 state->rdi = saved_state->rdi;
142 state->rsi = saved_state->rsi;
143 state->rbp = saved_state->rbp;
144
145 state->r8 = saved_state->r8;
146 state->r9 = saved_state->r9;
147 state->r10 = saved_state->r10;
148 state->r11 = saved_state->r11;
149 state->r12 = saved_state->r12;
150 state->r13 = saved_state->r13;
151 state->r14 = saved_state->r14;
152 state->r15 = saved_state->r15;
153
154 state->rsp = saved_state->isf.rsp;
155 state->rflags = saved_state->isf.rflags;
156 state->rip = saved_state->isf.rip;
157
158 state->cs = saved_state->isf.cs;
159 state->fs = saved_state->fs;
160 state->gs = saved_state->gs;
b0d623f7
A
161}
162
163
164void
165kdp_setstate(
0a7de745
A
166 x86_thread_state64_t *state
167 )
b0d623f7 168{
0a7de745
A
169 x86_saved_state64_t *saved_state;
170
171 saved_state = (x86_saved_state64_t *)kdp.saved_state;
172 saved_state->rax = state->rax;
173 saved_state->rbx = state->rbx;
174 saved_state->rcx = state->rcx;
175 saved_state->rdx = state->rdx;
176 saved_state->rdi = state->rdi;
177 saved_state->rsi = state->rsi;
178 saved_state->rbp = state->rbp;
179 saved_state->r8 = state->r8;
180 saved_state->r9 = state->r9;
181 saved_state->r10 = state->r10;
182 saved_state->r11 = state->r11;
183 saved_state->r12 = state->r12;
184 saved_state->r13 = state->r13;
185 saved_state->r14 = state->r14;
186 saved_state->r15 = state->r15;
187
188 saved_state->isf.rflags = state->rflags;
189 saved_state->isf.rsp = state->rsp;
190 saved_state->isf.rip = state->rip;
191
192 saved_state->fs = (uint32_t)state->fs;
193 saved_state->gs = (uint32_t)state->gs;
b0d623f7
A
194}
195
196
197kdp_error_t
198kdp_machine_read_regs(
0a7de745
A
199 __unused unsigned int cpu,
200 unsigned int flavor,
201 char *data,
202 int *size
203 )
b0d623f7 204{
0a7de745
A
205 static x86_float_state64_t null_fpstate;
206
207 switch (flavor) {
208 case x86_THREAD_STATE64:
209 dprintf(("kdp_readregs THREAD_STATE64\n"));
210 kdp_getstate((x86_thread_state64_t *)data);
211 *size = sizeof(x86_thread_state64_t);
212 return KDPERR_NO_ERROR;
213
214 case x86_FLOAT_STATE64:
215 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
216 *(x86_float_state64_t *)data = null_fpstate;
217 *size = sizeof(x86_float_state64_t);
218 return KDPERR_NO_ERROR;
219
220 default:
221 dprintf(("kdp_readregs bad flavor %d\n", flavor));
222 *size = 0;
223 return KDPERR_BADFLAVOR;
224 }
b0d623f7
A
225}
226
227kdp_error_t
228kdp_machine_write_regs(
0a7de745
A
229 __unused unsigned int cpu,
230 unsigned int flavor,
231 char *data,
232 __unused int *size
233 )
b0d623f7 234{
0a7de745
A
235 switch (flavor) {
236 case x86_THREAD_STATE64:
237 dprintf(("kdp_writeregs THREAD_STATE64\n"));
238 kdp_setstate((x86_thread_state64_t *)data);
239 return KDPERR_NO_ERROR;
240
241 case x86_FLOAT_STATE64:
242 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
243 return KDPERR_NO_ERROR;
244
245 default:
246 dprintf(("kdp_writeregs bad flavor %d\n", flavor));
247 return KDPERR_BADFLAVOR;
248 }
b0d623f7
A
249}
250
251
252
253void
254kdp_machine_hostinfo(
0a7de745
A
255 kdp_hostinfo_t *hostinfo
256 )
b0d623f7 257{
0a7de745 258 int i;
b0d623f7 259
0a7de745 260 hostinfo->cpus_mask = 0;
b0d623f7 261
0a7de745
A
262 for (i = 0; i < machine_info.max_cpus; i++) {
263 if (cpu_data_ptr[i] == NULL) {
264 continue;
265 }
b0d623f7 266
0a7de745
A
267 hostinfo->cpus_mask |= (1 << i);
268 }
269
270 hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
271 hostinfo->cpu_subtype = cpuid_cpusubtype();
b0d623f7
A
272}
273
274void
275kdp_panic(
f427ee49
A
276 const char *fmt,
277 ...
0a7de745 278 )
b0d623f7 279{
f427ee49
A
280 char kdp_fmt[256];
281 va_list args;
282
283 va_start(args, fmt);
284 (void) snprintf(kdp_fmt, sizeof(kdp_fmt), "kdp panic: %s", fmt);
285 vprintf(kdp_fmt, args);
286 va_end(args);
287
0a7de745 288 __asm__ volatile ("hlt");
b0d623f7
A
289}
290
b0d623f7
A
291int
292kdp_intr_disbl(void)
293{
0a7de745 294 return splhigh();
b0d623f7
A
295}
296
297void
298kdp_intr_enbl(int s)
299{
300 splx(s);
301}
302
303int
304kdp_getc(void)
305{
0a7de745 306 return cnmaygetc();
b0d623f7
A
307}
308
309void
310kdp_us_spin(int usec)
311{
0a7de745 312 delay(usec / 100);
b0d623f7
A
313}
314
0a7de745
A
315void
316print_saved_state(void *state)
b0d623f7 317{
0a7de745 318 x86_saved_state64_t *saved_state;
b0d623f7 319
0a7de745 320 saved_state = state;
b0d623f7
A
321
322 kprintf("pc = 0x%llx\n", saved_state->isf.rip);
323 kprintf("cr2= 0x%llx\n", saved_state->cr2);
324 kprintf("rp = TODO FIXME\n");
325 kprintf("sp = %p\n", saved_state);
b0d623f7
A
326}
327
328void
329kdp_sync_cache(void)
330{
0a7de745 331 return; /* No op here. */
b0d623f7
A
332}
333
334void
335kdp_call(void)
336{
0a7de745 337 __asm__ volatile ("int $3"); /* Let the processor do the work */
b0d623f7
A
338}
339
340
341typedef struct _cframe_t {
0a7de745
A
342 struct _cframe_t *prev;
343 unsigned caller;
344 unsigned args[0];
b0d623f7
A
345} cframe_t;
346
0a7de745
A
347boolean_t
348kdp_i386_trap(
349 unsigned int trapno,
350 x86_saved_state64_t *saved_state,
351 kern_return_t result,
352 vm_offset_t va
353 )
b0d623f7 354{
0a7de745
A
355 unsigned int exception, code, subcode = 0;
356 boolean_t prev_interrupts_state;
357
358 if (trapno != T_INT3 && trapno != T_DEBUG) {
359 kprintf("Debugger: Unexpected kernel trap number: "
360 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
361 trapno, saved_state->isf.rip, saved_state->cr2);
362 if (!kdp.is_conn) {
363 return FALSE;
364 }
b0d623f7 365 }
b0d623f7 366
0a7de745
A
367 prev_interrupts_state = ml_set_interrupts_enabled(FALSE);
368 disable_preemption();
b0d623f7 369
0a7de745
A
370 if (saved_state->isf.rflags & EFL_TF) {
371 enable_preemption_no_check();
372 }
373
374 switch (trapno) {
375 case T_DIVIDE_ERROR:
376 exception = EXC_ARITHMETIC;
377 code = EXC_I386_DIVERR;
378 break;
379
380 case T_OVERFLOW:
381 exception = EXC_SOFTWARE;
382 code = EXC_I386_INTOFLT;
383 break;
384
385 case T_OUT_OF_BOUNDS:
386 exception = EXC_ARITHMETIC;
387 code = EXC_I386_BOUNDFLT;
388 break;
389
390 case T_INVALID_OPCODE:
391 exception = EXC_BAD_INSTRUCTION;
392 code = EXC_I386_INVOPFLT;
393 break;
394
395 case T_SEGMENT_NOT_PRESENT:
396 exception = EXC_BAD_INSTRUCTION;
397 code = EXC_I386_SEGNPFLT;
398 subcode = (unsigned int)saved_state->isf.err;
399 break;
400
401 case T_STACK_FAULT:
402 exception = EXC_BAD_INSTRUCTION;
403 code = EXC_I386_STKFLT;
404 subcode = (unsigned int)saved_state->isf.err;
405 break;
406
407 case T_GENERAL_PROTECTION:
408 exception = EXC_BAD_INSTRUCTION;
409 code = EXC_I386_GPFLT;
410 subcode = (unsigned int)saved_state->isf.err;
411 break;
412
413 case T_PAGE_FAULT:
414 exception = EXC_BAD_ACCESS;
415 code = result;
416 subcode = (unsigned int)va;
417 break;
418
419 case T_WATCHPOINT:
420 exception = EXC_SOFTWARE;
421 code = EXC_I386_ALIGNFLT;
422 break;
423
424 case T_DEBUG:
425 case T_INT3:
426 exception = EXC_BREAKPOINT;
427 code = EXC_I386_BPTFLT;
428 break;
429
430 default:
431 exception = EXC_BAD_INSTRUCTION;
432 code = trapno;
433 break;
434 }
435
436 if (current_cpu_datap()->cpu_fatal_trap_state) {
437 current_cpu_datap()->cpu_post_fatal_trap_state = saved_state;
438 saved_state = current_cpu_datap()->cpu_fatal_trap_state;
439 }
440
441 handle_debugger_trap(exception, code, subcode, saved_state);
442
443 enable_preemption();
444 ml_set_interrupts_enabled(prev_interrupts_state);
445
446 /* If the instruction single step bit is set, disable kernel preemption
447 */
448 if (saved_state->isf.rflags & EFL_TF) {
449 disable_preemption();
450 }
451
452 return TRUE;
b0d623f7
A
453}
454
b0d623f7
A
455void
456kdp_machine_get_breakinsn(
0a7de745
A
457 uint8_t *bytes,
458 uint32_t *size
459 )
b0d623f7
A
460{
461 bytes[0] = 0xcc;
462 *size = 1;
463}
464
b0d623f7
A
465#define RETURN_OFFSET 4
466
467int
39037602 468machine_trace_thread(thread_t thread,
0a7de745
A
469 char * tracepos,
470 char * tracebound,
471 int nframes,
472 boolean_t user_p,
0a7de745 473 uint32_t * thread_trace_flags)
b0d623f7 474{
39037602 475 uint32_t * tracebuf = (uint32_t *)tracepos;
f427ee49 476 uint32_t framesize = sizeof(uint32_t);
39037602
A
477
478 uint32_t fence = 0;
479 uint32_t stackptr = 0;
480 uint32_t stacklimit = 0xfc000000;
481 int framecount = 0;
482 uint32_t prev_eip = 0;
483 uint32_t prevsp = 0;
fe8ab488 484 vm_offset_t kern_virt_addr = 0;
39037602
A
485 vm_map_t bt_vm_map = VM_MAP_NULL;
486
487 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
488
b0d623f7 489 if (user_p) {
0a7de745
A
490 x86_saved_state32_t *iss32;
491
b0d623f7 492 iss32 = USER_REGS32(thread);
39037602 493 prev_eip = iss32->eip;
b0d623f7
A
494 stackptr = iss32->ebp;
495
496 stacklimit = 0xffffffff;
39037602 497 bt_vm_map = thread->task->map;
0a7de745 498 } else {
b0d623f7 499 panic("32-bit trace attempted on 64-bit kernel");
0a7de745 500 }
b0d623f7 501
b0d623f7 502 for (framecount = 0; framecount < nframes; framecount++) {
39037602 503 *tracebuf++ = prev_eip;
b0d623f7 504
3e170ce0 505 /* Invalid frame, or hit fence */
b0d623f7
A
506 if (!stackptr || (stackptr == fence)) {
507 break;
508 }
509
510 /* Unaligned frame */
511 if (stackptr & 0x0000003) {
512 break;
513 }
0a7de745 514
b0d623f7
A
515 if (stackptr <= prevsp) {
516 break;
517 }
518
519 if (stackptr > stacklimit) {
520 break;
521 }
522
39037602 523 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET, bt_vm_map, thread_trace_flags);
fe8ab488
A
524
525 if (!kern_virt_addr) {
3e170ce0
A
526 if (thread_trace_flags) {
527 *thread_trace_flags |= kThreadTruncatedBT;
528 }
b0d623f7
A
529 break;
530 }
fe8ab488 531
39037602 532 prev_eip = *(uint32_t *)kern_virt_addr;
0a7de745 533
b0d623f7 534 prevsp = stackptr;
fe8ab488 535
39037602
A
536 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
537
538 if (kern_virt_addr) {
539 stackptr = *(uint32_t *)kern_virt_addr;
540 } else {
541 stackptr = 0;
3e170ce0
A
542 if (thread_trace_flags) {
543 *thread_trace_flags |= kThreadTruncatedBT;
544 }
b0d623f7
A
545 }
546 }
0a7de745 547
fe8ab488 548 machine_trace_thread_clear_validation_cache();
b0d623f7
A
549
550 return (uint32_t) (((char *) tracebuf) - tracepos);
551}
552
553
0a7de745 554#define RETURN_OFFSET64 8
b0d623f7
A
555/* Routine to encapsulate the 64-bit address read hack*/
556unsigned
557machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
558{
559 return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
560}
561
562int
39037602 563machine_trace_thread64(thread_t thread,
0a7de745
A
564 char * tracepos,
565 char * tracebound,
566 int nframes,
567 boolean_t user_p,
0a7de745 568 uint32_t * thread_trace_flags,
f427ee49
A
569 uint64_t *sp,
570 vm_offset_t fp)
b0d623f7 571{
39037602 572 uint64_t * tracebuf = (uint64_t *)tracepos;
f427ee49 573 unsigned framesize = sizeof(addr64_t);
39037602
A
574
575 uint32_t fence = 0;
576 addr64_t stackptr = 0;
577 int framecount = 0;
578 addr64_t prev_rip = 0;
579 addr64_t prevsp = 0;
fe8ab488 580 vm_offset_t kern_virt_addr = 0;
39037602
A
581 vm_map_t bt_vm_map = VM_MAP_NULL;
582
39037602 583 nframes = (tracebound > tracepos) ? MIN(nframes, (int)((tracebound - tracepos) / framesize)) : 0;
b0d623f7
A
584
585 if (user_p) {
0a7de745 586 x86_saved_state64_t *iss64;
b0d623f7 587 iss64 = USER_REGS64(thread);
39037602 588 prev_rip = iss64->isf.rip;
f427ee49
A
589 if (fp == 0) {
590 stackptr = iss64->rbp;
591 }
39037602 592 bt_vm_map = thread->task->map;
0a7de745
A
593 if (sp && user_p) {
594 *sp = iss64->isf.rsp;
595 }
596 } else {
f427ee49
A
597 if (fp == 0) {
598 stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
599 }
39037602
A
600 prev_rip = STACK_IKS(thread->kernel_stack)->k_rip;
601 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
602 bt_vm_map = kernel_map;
b0d623f7
A
603 }
604
b0d623f7 605 for (framecount = 0; framecount < nframes; framecount++) {
39037602 606 *tracebuf++ = prev_rip;
b0d623f7 607
39037602 608 if (!stackptr || (stackptr == fence)) {
b0d623f7
A
609 break;
610 }
fe8ab488 611 if (stackptr & 0x0000007) {
b0d623f7
A
612 break;
613 }
b0d623f7
A
614 if (stackptr <= prevsp) {
615 break;
616 }
617
39037602 618 kern_virt_addr = machine_trace_thread_get_kva(stackptr + RETURN_OFFSET64, bt_vm_map, thread_trace_flags);
fe8ab488 619 if (!kern_virt_addr) {
3e170ce0
A
620 if (thread_trace_flags) {
621 *thread_trace_flags |= kThreadTruncatedBT;
622 }
b0d623f7
A
623 break;
624 }
fe8ab488 625
39037602
A
626 prev_rip = *(uint64_t *)kern_virt_addr;
627 if (!user_p) {
628 prev_rip = VM_KERNEL_UNSLIDE(prev_rip);
629 }
b0d623f7
A
630
631 prevsp = stackptr;
fe8ab488 632
39037602
A
633 kern_virt_addr = machine_trace_thread_get_kva(stackptr, bt_vm_map, thread_trace_flags);
634
635 if (kern_virt_addr) {
636 stackptr = *(uint64_t *)kern_virt_addr;
637 } else {
638 stackptr = 0;
3e170ce0
A
639 if (thread_trace_flags) {
640 *thread_trace_flags |= kThreadTruncatedBT;
641 }
b0d623f7
A
642 }
643 }
644
fe8ab488 645 machine_trace_thread_clear_validation_cache();
b0d623f7
A
646
647 return (uint32_t) (((char *) tracebuf) - tracepos);
648}
649
b0d623f7
A
650void
651kdp_ml_enter_debugger(void)
652{
0a7de745 653 __asm__ __volatile__ ("int3");
b0d623f7 654}