]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kdp/ml/x86_64/kdp_machdep.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kdp / ml / x86_64 / kdp_machdep.c
CommitLineData
b0d623f7
A
1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach_kdp.h>
30#include <mach/mach_types.h>
31#include <mach/machine.h>
32#include <mach/exception_types.h>
33#include <kern/cpu_data.h>
34#include <i386/trap.h>
35#include <i386/mp.h>
36#include <kdp/kdp_internal.h>
37#include <kdp/kdp_callout.h>
38#include <mach-o/loader.h>
39#include <mach-o/nlist.h>
40#include <IOKit/IOPlatformExpert.h> /* for PE_halt_restart */
41#include <kern/machine.h> /* for halt_all_cpus */
42#include <libkern/OSAtomic.h>
43
44#include <kern/thread.h>
45#include <i386/thread.h>
46#include <vm/vm_map.h>
47#include <i386/pmap.h>
48#include <kern/kalloc.h>
49
50#define KDP_TEST_HARNESS 0
51#if KDP_TEST_HARNESS
52#define dprintf(x) printf x
53#else
54#define dprintf(x)
55#endif
56
57extern cpu_type_t cpuid_cputype(void);
58extern cpu_subtype_t cpuid_cpusubtype(void);
59
60void print_saved_state(void *);
61void kdp_call(void);
62int kdp_getc(void);
63boolean_t kdp_call_kdb(void);
64void kdp_getstate(x86_thread_state64_t *);
65void kdp_setstate(x86_thread_state64_t *);
66void kdp_print_phys(int);
67
68int
69machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
70
71int
72machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p);
73
74unsigned
75machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len);
76
77static void kdp_callouts(kdp_event_t event);
78
79void
80kdp_exception(
81 unsigned char *pkt,
82 int *len,
83 unsigned short *remote_port,
84 unsigned int exception,
85 unsigned int code,
86 unsigned int subcode
87)
88{
89 kdp_exception_t *rq = (kdp_exception_t *)pkt;
90
91 rq->hdr.request = KDP_EXCEPTION;
92 rq->hdr.is_reply = 0;
93 rq->hdr.seq = kdp.exception_seq;
94 rq->hdr.key = 0;
95 rq->hdr.len = sizeof (*rq);
96
97 rq->n_exc_info = 1;
98 rq->exc_info[0].cpu = 0;
99 rq->exc_info[0].exception = exception;
100 rq->exc_info[0].code = code;
101 rq->exc_info[0].subcode = subcode;
102
103 rq->hdr.len += rq->n_exc_info * sizeof (kdp_exc_info_t);
104
105 bcopy((char *)rq, (char *)pkt, rq->hdr.len);
106
107 kdp.exception_ack_needed = TRUE;
108
109 *remote_port = kdp.exception_port;
110 *len = rq->hdr.len;
111}
112
113boolean_t
114kdp_exception_ack(
115 unsigned char *pkt,
116 int len
117)
118{
119 kdp_exception_ack_t *rq = (kdp_exception_ack_t *)pkt;
120
121 if (((unsigned int) len) < sizeof (*rq))
122 return(FALSE);
123
124 if (!rq->hdr.is_reply || rq->hdr.request != KDP_EXCEPTION)
125 return(FALSE);
126
127 dprintf(("kdp_exception_ack seq %x %x\n", rq->hdr.seq, kdp.exception_seq));
128
129 if (rq->hdr.seq == kdp.exception_seq) {
130 kdp.exception_ack_needed = FALSE;
131 kdp.exception_seq++;
132 }
133 return(TRUE);
134}
135
136void
137kdp_getstate(
138 x86_thread_state64_t *state
139)
140{
141 x86_saved_state64_t *saved_state;
142
143 saved_state = (x86_saved_state64_t *)kdp.saved_state;
144
145 state->rax = saved_state->rax;
146 state->rbx = saved_state->rbx;
147 state->rcx = saved_state->rcx;
148 state->rdx = saved_state->rdx;
149 state->rdi = saved_state->rdi;
150 state->rsi = saved_state->rsi;
151 state->rbp = saved_state->rbp;
152
153 state->r8 = saved_state->r8;
154 state->r9 = saved_state->r9;
155 state->r10 = saved_state->r10;
156 state->r11 = saved_state->r11;
157 state->r12 = saved_state->r12;
158 state->r13 = saved_state->r13;
159 state->r14 = saved_state->r14;
160 state->r15 = saved_state->r15;
161
162 state->rsp = saved_state->isf.rsp;
163 state->rflags = saved_state->isf.rflags;
164 state->rip = saved_state->isf.rip;
165
166 state->cs = saved_state->isf.cs;
167 state->fs = saved_state->fs;
168 state->gs = saved_state->gs;
169}
170
171
172void
173kdp_setstate(
174 x86_thread_state64_t *state
175)
176{
177 x86_saved_state64_t *saved_state;
178
179 saved_state = (x86_saved_state64_t *)kdp.saved_state;
180 saved_state->rax = state->rax;
181 saved_state->rbx = state->rbx;
182 saved_state->rcx = state->rcx;
183 saved_state->rdx = state->rdx;
184 saved_state->rdi = state->rdi;
185 saved_state->rsi = state->rsi;
186 saved_state->rbp = state->rbp;
187 saved_state->r8 = state->r8;
188 saved_state->r9 = state->r9;
189 saved_state->r10 = state->r10;
190 saved_state->r11 = state->r11;
191 saved_state->r12 = state->r12;
192 saved_state->r13 = state->r13;
193 saved_state->r14 = state->r14;
194 saved_state->r15 = state->r15;
195
196 saved_state->isf.rflags = state->rflags;
197 saved_state->isf.rsp = state->rsp;
198 saved_state->isf.rip = state->rip;
199
200 saved_state->fs = (uint32_t)state->fs;
201 saved_state->gs = (uint32_t)state->gs;
202}
203
204
205kdp_error_t
206kdp_machine_read_regs(
207 __unused unsigned int cpu,
208 unsigned int flavor,
209 char *data,
210 int *size
211)
212{
213 static x86_float_state64_t null_fpstate;
214
215 switch (flavor) {
216
217 case x86_THREAD_STATE64:
218 dprintf(("kdp_readregs THREAD_STATE64\n"));
219 kdp_getstate((x86_thread_state64_t *)data);
220 *size = sizeof (x86_thread_state64_t);
221 return KDPERR_NO_ERROR;
222
223 case x86_FLOAT_STATE64:
224 dprintf(("kdp_readregs THREAD_FPSTATE64\n"));
225 *(x86_float_state64_t *)data = null_fpstate;
226 *size = sizeof (x86_float_state64_t);
227 return KDPERR_NO_ERROR;
228
229 default:
230 dprintf(("kdp_readregs bad flavor %d\n", flavor));
231 *size = 0;
232 return KDPERR_BADFLAVOR;
233 }
234}
235
236kdp_error_t
237kdp_machine_write_regs(
238 __unused unsigned int cpu,
239 unsigned int flavor,
240 char *data,
241 __unused int *size
242)
243{
244 switch (flavor) {
245
246 case x86_THREAD_STATE64:
247 dprintf(("kdp_writeregs THREAD_STATE64\n"));
248 kdp_setstate((x86_thread_state64_t *)data);
249 return KDPERR_NO_ERROR;
250
251 case x86_FLOAT_STATE64:
252 dprintf(("kdp_writeregs THREAD_FPSTATE64\n"));
253 return KDPERR_NO_ERROR;
254
255 default:
256 dprintf(("kdp_writeregs bad flavor %d\n", flavor));
257 return KDPERR_BADFLAVOR;
258 }
259}
260
261
262
263void
264kdp_machine_hostinfo(
265 kdp_hostinfo_t *hostinfo
266)
267{
268 int i;
269
270 hostinfo->cpus_mask = 0;
271
272 for (i = 0; i < machine_info.max_cpus; i++) {
273 if (cpu_data_ptr[i] == NULL)
274 continue;
275
276 hostinfo->cpus_mask |= (1 << i);
277 }
278
279 hostinfo->cpu_type = cpuid_cputype() | CPU_ARCH_ABI64;
280 hostinfo->cpu_subtype = cpuid_cpusubtype();
281}
282
283void
284kdp_panic(
285 const char *msg
286)
287{
288 kprintf("kdp panic: %s\n", msg);
289 __asm__ volatile("hlt");
290}
291
292
293void
294kdp_machine_reboot(void)
295{
296 printf("Attempting system restart...");
297 /* Call the platform specific restart*/
298 if (PE_halt_restart)
299 (*PE_halt_restart)(kPERestartCPU);
300 /* If we do reach this, give up */
301 halt_all_cpus(TRUE);
302}
303
304int
305kdp_intr_disbl(void)
306{
307 return splhigh();
308}
309
310void
311kdp_intr_enbl(int s)
312{
313 splx(s);
314}
315
316int
317kdp_getc(void)
318{
319 return cnmaygetc();
320}
321
322void
323kdp_us_spin(int usec)
324{
325 delay(usec/100);
326}
327
328void print_saved_state(void *state)
329{
330 x86_saved_state64_t *saved_state;
331
332 saved_state = state;
333
334 kprintf("pc = 0x%llx\n", saved_state->isf.rip);
335 kprintf("cr2= 0x%llx\n", saved_state->cr2);
336 kprintf("rp = TODO FIXME\n");
337 kprintf("sp = %p\n", saved_state);
338
339}
340
341void
342kdp_sync_cache(void)
343{
344 return; /* No op here. */
345}
346
347void
348kdp_call(void)
349{
350 __asm__ volatile ("int $3"); /* Let the processor do the work */
351}
352
353
354typedef struct _cframe_t {
355 struct _cframe_t *prev;
356 unsigned caller;
357 unsigned args[0];
358} cframe_t;
359
360extern pt_entry_t *DMAP2;
361extern caddr_t DADDR2;
362
363void
364kdp_print_phys(int src)
365{
366 unsigned int *iptr;
367 int i;
368
369 *(int *) DMAP2 = 0x63 | (src & 0xfffff000);
370 invlpg((uintptr_t) DADDR2);
371 iptr = (unsigned int *) DADDR2;
372 for (i = 0; i < 100; i++) {
373 kprintf("0x%x ", *iptr++);
374 if ((i % 8) == 0)
375 kprintf("\n");
376 }
377 kprintf("\n");
378 *(int *) DMAP2 = 0;
379
380}
381
382boolean_t
383kdp_i386_trap(
384 unsigned int trapno,
385 x86_saved_state64_t *saved_state,
386 kern_return_t result,
387 vm_offset_t va
388)
389{
390 unsigned int exception, subcode = 0, code;
391
392 if (trapno != T_INT3 && trapno != T_DEBUG) {
393 kprintf("Debugger: Unexpected kernel trap number: "
394 "0x%x, RIP: 0x%llx, CR2: 0x%llx\n",
395 trapno, saved_state->isf.rip, saved_state->cr2);
396 if (!kdp.is_conn)
397 return FALSE;
398 }
399
400 mp_kdp_enter();
401 kdp_callouts(KDP_EVENT_ENTER);
402
403 if (saved_state->isf.rflags & EFL_TF) {
404 enable_preemption_no_check();
405 }
406
407 switch (trapno) {
408
409 case T_DIVIDE_ERROR:
410 exception = EXC_ARITHMETIC;
411 code = EXC_I386_DIVERR;
412 break;
413
414 case T_OVERFLOW:
415 exception = EXC_SOFTWARE;
416 code = EXC_I386_INTOFLT;
417 break;
418
419 case T_OUT_OF_BOUNDS:
420 exception = EXC_ARITHMETIC;
421 code = EXC_I386_BOUNDFLT;
422 break;
423
424 case T_INVALID_OPCODE:
425 exception = EXC_BAD_INSTRUCTION;
426 code = EXC_I386_INVOPFLT;
427 break;
428
429 case T_SEGMENT_NOT_PRESENT:
430 exception = EXC_BAD_INSTRUCTION;
431 code = EXC_I386_SEGNPFLT;
432 subcode = (unsigned int)saved_state->isf.err;
433 break;
434
435 case T_STACK_FAULT:
436 exception = EXC_BAD_INSTRUCTION;
437 code = EXC_I386_STKFLT;
438 subcode = (unsigned int)saved_state->isf.err;
439 break;
440
441 case T_GENERAL_PROTECTION:
442 exception = EXC_BAD_INSTRUCTION;
443 code = EXC_I386_GPFLT;
444 subcode = (unsigned int)saved_state->isf.err;
445 break;
446
447 case T_PAGE_FAULT:
448 exception = EXC_BAD_ACCESS;
449 code = result;
450 subcode = (unsigned int)va;
451 break;
452
453 case T_WATCHPOINT:
454 exception = EXC_SOFTWARE;
455 code = EXC_I386_ALIGNFLT;
456 break;
457
458 case T_DEBUG:
459 case T_INT3:
460 exception = EXC_BREAKPOINT;
461 code = EXC_I386_BPTFLT;
462 break;
463
464 default:
465 exception = EXC_BAD_INSTRUCTION;
466 code = trapno;
467 break;
468 }
469
470 kdp_raise_exception(exception, code, subcode, saved_state);
471 /* If the instruction single step bit is set, disable kernel preemption
472 */
473 if (saved_state->isf.rflags & EFL_TF) {
474 disable_preemption();
475 }
476
477 kdp_callouts(KDP_EVENT_EXIT);
478 mp_kdp_exit();
479
480 return TRUE;
481}
482
483boolean_t
484kdp_call_kdb(
485 void)
486{
487 return(FALSE);
488}
489
490void
491kdp_machine_get_breakinsn(
492 uint8_t *bytes,
493 uint32_t *size
494)
495{
496 bytes[0] = 0xcc;
497 *size = 1;
498}
499
500extern pmap_t kdp_pmap;
501
502#define RETURN_OFFSET 4
503
504int
505machine_trace_thread(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
506{
507 uint32_t *tracebuf = (uint32_t *)tracepos;
508 uint32_t fence = 0;
509 uint32_t stackptr = 0;
510 uint32_t stacklimit = 0xfc000000;
511 int framecount = 0;
512 uint32_t init_eip = 0;
513 uint32_t prevsp = 0;
514 uint32_t framesize = 2 * sizeof(vm_offset_t);
515
516 if (user_p) {
517 x86_saved_state32_t *iss32;
518
519 iss32 = USER_REGS32(thread);
520 init_eip = iss32->eip;
521 stackptr = iss32->ebp;
522
523 stacklimit = 0xffffffff;
524 kdp_pmap = thread->task->map->pmap;
525 }
526 else
527 panic("32-bit trace attempted on 64-bit kernel");
528
529 *tracebuf++ = init_eip;
530
531 for (framecount = 0; framecount < nframes; framecount++) {
532
533 if ((tracebound - ((char *)tracebuf)) < (4 * framesize)) {
534 tracebuf--;
535 break;
536 }
537
538 *tracebuf++ = stackptr;
539/* Invalid frame, or hit fence */
540 if (!stackptr || (stackptr == fence)) {
541 break;
542 }
543
544 /* Unaligned frame */
545 if (stackptr & 0x0000003) {
546 break;
547 }
548
549 if (stackptr <= prevsp) {
550 break;
551 }
552
553 if (stackptr > stacklimit) {
554 break;
555 }
556
557 if (kdp_machine_vm_read((mach_vm_address_t)(stackptr + RETURN_OFFSET), (caddr_t) tracebuf, sizeof(*tracebuf)) != sizeof(*tracebuf)) {
558 break;
559 }
560 tracebuf++;
561
562 prevsp = stackptr;
563 if (kdp_machine_vm_read((mach_vm_address_t)stackptr, (caddr_t) &stackptr, sizeof(stackptr)) != sizeof(stackptr)) {
564 *tracebuf++ = 0;
565 break;
566 }
567 }
568
569 kdp_pmap = 0;
570
571 return (uint32_t) (((char *) tracebuf) - tracepos);
572}
573
574
575#define RETURN_OFFSET64 8
576/* Routine to encapsulate the 64-bit address read hack*/
577unsigned
578machine_read64(addr64_t srcaddr, caddr_t dstaddr, uint32_t len)
579{
580 return (unsigned)kdp_machine_vm_read(srcaddr, dstaddr, len);
581}
582
583int
584machine_trace_thread64(thread_t thread, char *tracepos, char *tracebound, int nframes, boolean_t user_p)
585{
586 uint64_t *tracebuf = (uint64_t *)tracepos;
587 uint32_t fence = 0;
588 addr64_t stackptr = 0;
589 int framecount = 0;
590 addr64_t init_rip = 0;
591 addr64_t prevsp = 0;
592 unsigned framesize = 2 * sizeof(addr64_t);
593
594 if (user_p) {
595 x86_saved_state64_t *iss64;
596 iss64 = USER_REGS64(thread);
597 init_rip = iss64->isf.rip;
598 stackptr = iss64->rbp;
599 kdp_pmap = thread->task->map->pmap;
600 }
601 else {
602 stackptr = STACK_IKS(thread->kernel_stack)->k_rbp;
603 init_rip = STACK_IKS(thread->kernel_stack)->k_rip;
604 kdp_pmap = 0;
605 }
606
607 *tracebuf++ = init_rip;
608
609 for (framecount = 0; framecount < nframes; framecount++) {
610
611 if ((uint32_t)(tracebound - ((char *)tracebuf)) < (4 * framesize)) {
612 tracebuf--;
613 break;
614 }
615
616 *tracebuf++ = stackptr;
617
618 if (!stackptr || (stackptr == fence)){
619 break;
620 }
621
622 if (stackptr & 0x0000003) {
623 break;
624 }
625
626 if (stackptr <= prevsp) {
627 break;
628 }
629
630 if (machine_read64(stackptr + RETURN_OFFSET64, (caddr_t) tracebuf, sizeof(addr64_t)) != sizeof(addr64_t)) {
631 break;
632 }
633 tracebuf++;
634
635 prevsp = stackptr;
636 if (machine_read64(stackptr, (caddr_t) &stackptr, sizeof(addr64_t)) != sizeof(addr64_t)) {
637 *tracebuf++ = 0;
638 break;
639 }
640 }
641
642 kdp_pmap = NULL;
643
644 return (uint32_t) (((char *) tracebuf) - tracepos);
645}
646
647static struct kdp_callout {
648 struct kdp_callout *callout_next;
649 kdp_callout_fn_t callout_fn;
650 void *callout_arg;
651} *kdp_callout_list = NULL;
652
653
654/*
655 * Called from kernel context to register a kdp event callout.
656 */
657void
658kdp_register_callout(
659 kdp_callout_fn_t fn,
660 void *arg)
661{
662 struct kdp_callout *kcp;
663 struct kdp_callout *list_head;
664
665 kcp = kalloc(sizeof(*kcp));
666 if (kcp == NULL)
667 panic("kdp_register_callout() kalloc failed");
668
669 kcp->callout_fn = fn;
670 kcp->callout_arg = arg;
671
672 /* Lock-less list insertion using compare and exchange. */
673 do {
674 list_head = kdp_callout_list;
675 kcp->callout_next = list_head;
676 } while (!OSCompareAndSwapPtr(list_head, kcp, (void * volatile *)&kdp_callout_list));
677}
678
679/*
680 * Called at exception/panic time when extering or exiting kdp.
681 * We are single-threaded at this time and so we don't use locks.
682 */
683static void
684kdp_callouts(kdp_event_t event)
685{
686 struct kdp_callout *kcp = kdp_callout_list;
687
688 while (kcp) {
689 kcp->callout_fn(kcp->callout_arg, event);
690 kcp = kcp->callout_next;
691 }
692}
693
694void
695kdp_ml_enter_debugger(void)
696{
697 __asm__ __volatile__("int3");
698}