]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/trap.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * Hardware trap/fault handler.
54 */
55
56 #include <mach_kdb.h>
57 #include <mach_kgdb.h>
58 #include <mach_kdp.h>
59 #include <mach_ldebug.h>
60
61 #include <types.h>
62 #include <i386/eflags.h>
63 #include <i386/trap.h>
64 #include <i386/pmap.h>
65 #include <i386/fpu.h>
66 #include <architecture/i386/pio.h> /* inb() */
67
68 #include <mach/exception.h>
69 #include <mach/kern_return.h>
70 #include <mach/vm_param.h>
71 #include <mach/i386/thread_status.h>
72
73 #include <vm/vm_kern.h>
74 #include <vm/vm_fault.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/processor.h>
78 #include <kern/thread.h>
79 #include <kern/task.h>
80 #include <kern/sched.h>
81 #include <kern/sched_prim.h>
82 #include <kern/exception.h>
83 #include <kern/spl.h>
84 #include <kern/misc_protos.h>
85
86 #include <sys/kdebug.h>
87
88 #if MACH_KGDB
89 #include <kgdb/kgdb_defs.h>
90 #endif /* MACH_KGDB */
91
92 #if MACH_KDB
93 #include <debug.h>
94 #include <ddb/db_watch.h>
95 #include <ddb/db_run.h>
96 #include <ddb/db_break.h>
97 #include <ddb/db_trap.h>
98 #endif /* MACH_KDB */
99
100 #include <string.h>
101
102 #include <i386/io_emulate.h>
103 #include <i386/postcode.h>
104 #include <i386/mp_desc.h>
105 #include <i386/proc_reg.h>
106 #include <mach/i386/syscall_sw.h>
107
108 /*
109 * Forward declarations
110 */
111 static void user_page_fault_continue(kern_return_t kret);
112 static void panic_trap(x86_saved_state32_t *saved_state);
113 static void set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip);
114
115 perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
116 perfCallback perfASTHook = NULL; /* Pointer to CHUD AST hook routine */
117
118 void
119 thread_syscall_return(
120 kern_return_t ret)
121 {
122 thread_t thr_act = current_thread();
123
124 if (thread_is_64bit(thr_act)) {
125 x86_saved_state64_t *regs;
126
127 regs = USER_REGS64(thr_act);
128
129 if (kdebug_enable && ((regs->rax & SYSCALL_CLASS_MASK) == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT))) {
130 /* Mach trap */
131 KERNEL_DEBUG_CONSTANT(
132 MACHDBG_CODE(DBG_MACH_EXCP_SC, ((int) (regs->rax & SYSCALL_NUMBER_MASK)))
133 | DBG_FUNC_END,
134 ret, 0, 0, 0, 0);
135 }
136 regs->rax = ret;
137
138 } else {
139 x86_saved_state32_t *regs;
140
141 regs = USER_REGS32(thr_act);
142
143 if (kdebug_enable && ((int) regs->eax < 0)) {
144 /* Mach trap */
145 KERNEL_DEBUG_CONSTANT(
146 MACHDBG_CODE(DBG_MACH_EXCP_SC, -((int) regs->eax))
147 | DBG_FUNC_END,
148 ret, 0, 0, 0, 0);
149 }
150 regs->eax = ret;
151 }
152 thread_exception_return();
153 /*NOTREACHED*/
154 }
155
156
157 #if MACH_KDB
158 boolean_t debug_all_traps_with_kdb = FALSE;
159 extern struct db_watchpoint *db_watchpoint_list;
160 extern boolean_t db_watchpoints_inserted;
161 extern boolean_t db_breakpoints_inserted;
162
163 void
164 thread_kdb_return(void)
165 {
166 thread_t thr_act = current_thread();
167 x86_saved_state_t *iss = USER_STATE(thr_act);
168
169 if (is_saved_state64(iss)) {
170 x86_saved_state64_t *regs;
171
172 regs = saved_state64(iss);
173
174 if (kdb_trap(regs->isf.trapno, (int)regs->isf.err, (void *)regs)) {
175 thread_exception_return();
176 /*NOTREACHED*/
177 }
178
179 } else {
180 x86_saved_state32_t *regs;
181
182 regs = saved_state32(iss);
183
184 if (kdb_trap(regs->trapno, regs->err, (void *)regs)) {
185 thread_exception_return();
186 /*NOTREACHED*/
187 }
188 }
189 }
190
191 #endif /* MACH_KDB */
192
193 void
194 user_page_fault_continue(
195 kern_return_t kr)
196 {
197 thread_t thread = current_thread();
198 x86_saved_state_t *regs = USER_STATE(thread);
199 ast_t *myast;
200 boolean_t intr;
201 user_addr_t vaddr;
202 #if MACH_KDB
203 int err;
204 int trapno;
205 #endif
206
207 assert((is_saved_state32(regs) && !thread_is_64bit(thread)) ||
208 (is_saved_state64(regs) && thread_is_64bit(thread)));
209
210 if (thread_is_64bit(thread)) {
211 x86_saved_state64_t *uregs;
212
213 uregs = USER_REGS64(thread);
214
215 #if MACH_KDB
216 trapno = uregs->isf.trapno;
217 err = uregs->isf.err;
218 #endif
219 vaddr = (user_addr_t)uregs->cr2;
220 } else {
221 x86_saved_state32_t *uregs;
222
223 uregs = USER_REGS32(thread);
224
225 #if MACH_KDB
226 trapno = uregs->trapno;
227 err = uregs->err;
228 #endif
229 vaddr = uregs->cr2;
230 }
231
232 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
233 #if MACH_KDB
234 if (!db_breakpoints_inserted) {
235 db_set_breakpoints();
236 }
237 if (db_watchpoint_list &&
238 db_watchpoints_inserted &&
239 (err & T_PF_WRITE) &&
240 db_find_watchpoint(thread->map,
241 (vm_offset_t)vaddr,
242 regs))
243 kdb_trap(T_WATCHPOINT, 0, regs);
244 #endif /* MACH_KDB */
245 intr = ml_set_interrupts_enabled(FALSE);
246 myast = ast_pending();
247 while (*myast & AST_ALL) {
248 ast_taken(AST_ALL, intr);
249 ml_set_interrupts_enabled(FALSE);
250 myast = ast_pending();
251 }
252 ml_set_interrupts_enabled(intr);
253
254 thread_exception_return();
255 /*NOTREACHED*/
256 }
257
258 #if MACH_KDB
259 if (debug_all_traps_with_kdb &&
260 kdb_trap(trapno, err, regs)) {
261 thread_exception_return();
262 /*NOTREACHED*/
263 }
264 #endif /* MACH_KDB */
265
266 i386_exception(EXC_BAD_ACCESS, kr, vaddr);
267 /*NOTREACHED*/
268 }
269
270 /*
271 * Fault recovery in copyin/copyout routines.
272 */
273 struct recovery {
274 uint32_t fault_addr;
275 uint32_t recover_addr;
276 };
277
278 extern struct recovery recover_table[];
279 extern struct recovery recover_table_end[];
280
281 const char * trap_type[] = {TRAP_NAMES};
282 unsigned TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
283
284 extern unsigned panic_io_port;
285
286 static inline void
287 reset_dr7(void)
288 {
289 uint32_t dr7 = 0x400; /* magic dr7 reset value */
290 __asm__ volatile("movl %0,%%dr7" : : "r" (dr7));
291 }
292 #if MACH_KDP
293 unsigned kdp_has_active_watchpoints = 0;
294 #endif
295 /*
296 * Trap from kernel mode. Only page-fault errors are recoverable,
297 * and then only in special circumstances. All other errors are
298 * fatal. Return value indicates if trap was handled.
299 */
300 void
301 kernel_trap(
302 x86_saved_state_t *state)
303 {
304 x86_saved_state32_t *saved_state;
305 int code;
306 user_addr_t vaddr;
307 int type;
308 vm_map_t map;
309 kern_return_t result = KERN_FAILURE;
310 thread_t thread;
311 ast_t *myast;
312 boolean_t intr;
313 vm_prot_t prot;
314 struct recovery *rp;
315 vm_offset_t kern_ip;
316 int fault_in_copy_window = -1;
317 int is_user = 0;
318 #if MACH_KDB
319 pt_entry_t *pte;
320 #endif /* MACH_KDB */
321
322 thread = current_thread();
323
324 if (is_saved_state64(state))
325 panic("kernel_trap(%p) with 64-bit state", state);
326 saved_state = saved_state32(state);
327
328 vaddr = (user_addr_t)saved_state->cr2;
329 type = saved_state->trapno;
330 code = saved_state->err & 0xffff;
331 intr = (saved_state->efl & EFL_IF) != 0; /* state of ints at trap */
332
333 kern_ip = (vm_offset_t)saved_state->eip;
334
335 myast = ast_pending();
336
337 if (perfASTHook) {
338 if (*myast & AST_CHUD_ALL)
339 perfASTHook(type, NULL, 0, 0);
340 } else
341 *myast &= ~AST_CHUD_ALL;
342
343 /*
344 * Is there a hook?
345 */
346 if (perfTrapHook) {
347 if (perfTrapHook(type, NULL, 0, 0) == KERN_SUCCESS) {
348 /*
349 * If it succeeds, we are done...
350 */
351 return;
352 }
353 }
354 /*
355 * we come here with interrupts off as we don't want to recurse
356 * on preemption below. but we do want to re-enable interrupts
357 * as soon we possibly can to hold latency down
358 */
359 if (T_PREEMPT == type) {
360
361 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
362 0, 0, 0, kern_ip, 0);
363
364 ast_taken(AST_PREEMPTION, FALSE);
365 return;
366 }
367
368 if (T_PAGE_FAULT == type) {
369 /*
370 * assume we're faulting in the kernel map
371 */
372 map = kernel_map;
373
374 if (thread != THREAD_NULL && thread->map != kernel_map) {
375 vm_offset_t copy_window_base;
376 vm_offset_t kvaddr;
377 int window_index;
378
379 kvaddr = (vm_offset_t)vaddr;
380 /*
381 * must determine if fault occurred in
382 * the copy window while pre-emption is
383 * disabled for this processor so that
384 * we only need to look at the window
385 * associated with this processor
386 */
387 copy_window_base = current_cpu_datap()->cpu_copywindow_base;
388
389 if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS)) ) {
390
391 window_index = (kvaddr - copy_window_base) / NBPDE;
392
393 if (thread->machine.copy_window[window_index].user_base != (user_addr_t)-1) {
394
395 kvaddr -= (copy_window_base + (NBPDE * window_index));
396 vaddr = thread->machine.copy_window[window_index].user_base + kvaddr;
397
398 map = thread->map;
399 fault_in_copy_window = window_index;
400 }
401 is_user = -1;
402 }
403 }
404 }
405 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
406 (int)(vaddr >> 32), (int)vaddr, is_user, kern_ip, 0);
407
408
409 (void) ml_set_interrupts_enabled(intr);
410
411 switch (type) {
412
413 case T_NO_FPU:
414 fpnoextflt();
415 return;
416
417 case T_FPU_FAULT:
418 fpextovrflt();
419 return;
420
421 case T_FLOATING_POINT_ERROR:
422 fpexterrflt();
423 return;
424
425 case T_SSE_FLOAT_ERROR:
426 fpSSEexterrflt();
427 return;
428 case T_DEBUG:
429 if ((saved_state->efl & EFL_TF) == 0
430 && !kdp_has_active_watchpoints) {
431 /* We've somehow encountered a debug
432 * register match that does not belong
433 * to the kernel debugger.
434 * This isn't supposed to happen.
435 */
436 reset_dr7();
437 return;
438 }
439 goto debugger_entry;
440 case T_PAGE_FAULT:
441 /*
442 * If the current map is a submap of the kernel map,
443 * and the address is within that map, fault on that
444 * map. If the same check is done in vm_fault
445 * (vm_map_lookup), we may deadlock on the kernel map
446 * lock.
447 */
448
449 prot = VM_PROT_READ;
450
451 if (code & T_PF_WRITE)
452 prot |= VM_PROT_WRITE;
453 #if PAE
454 if (code & T_PF_EXECUTE)
455 prot |= VM_PROT_EXECUTE;
456 #endif
457
458 #if MACH_KDB
459 /*
460 * Check for watchpoint on kernel static data.
461 * vm_fault would fail in this case
462 */
463 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted &&
464 (code & T_PF_WRITE) && vaddr < vm_map_max(map) &&
465 ((*(pte = pmap_pte(kernel_pmap, (vm_map_offset_t)vaddr))) & INTEL_PTE_WRITE) == 0) {
466 pmap_store_pte(
467 pte,
468 *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE);
469 /* XXX need invltlb here? */
470
471 result = KERN_SUCCESS;
472 goto look_for_watchpoints;
473 }
474 #endif /* MACH_KDB */
475
476 result = vm_fault(map,
477 vm_map_trunc_page(vaddr),
478 prot,
479 FALSE,
480 THREAD_UNINT, NULL, 0);
481
482 #if MACH_KDB
483 if (result == KERN_SUCCESS) {
484 /*
485 * Look for watchpoints
486 */
487 look_for_watchpoints:
488 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted && (code & T_PF_WRITE) &&
489 db_find_watchpoint(map, vaddr, saved_state))
490 kdb_trap(T_WATCHPOINT, 0, saved_state);
491 }
492 #endif /* MACH_KDB */
493
494 if (result == KERN_SUCCESS) {
495
496 if (fault_in_copy_window != -1) {
497 pt_entry_t *updp;
498 pt_entry_t *kpdp;
499
500 /*
501 * in case there was no page table assigned
502 * for the user base address and the pmap
503 * got 'expanded' due to this fault, we'll
504 * copy in the descriptor
505 *
506 * we're either setting the page table descriptor
507 * to the same value or it was 0... no need
508 * for a TLB flush in either case
509 */
510
511 ml_set_interrupts_enabled(FALSE);
512 updp = pmap_pde(map->pmap, thread->machine.copy_window[fault_in_copy_window].user_base);
513 assert(updp);
514 if (0 == updp) panic("trap: updp 0"); /* XXX DEBUG */
515 kpdp = current_cpu_datap()->cpu_copywindow_pdp;
516 kpdp += fault_in_copy_window;
517
518 #if JOE_DEBUG
519 if (*kpdp && (*kpdp & PG_FRAME) != (*updp & PG_FRAME))
520 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp, kpdp);
521 #endif
522 pmap_store_pte(kpdp, *updp);
523
524 (void) ml_set_interrupts_enabled(intr);
525 }
526 return;
527 }
528 /*
529 * fall through
530 */
531
532 case T_GENERAL_PROTECTION:
533 /*
534 * If there is a failure recovery address
535 * for this fault, go there.
536 */
537 for (rp = recover_table; rp < recover_table_end; rp++) {
538 if (kern_ip == rp->fault_addr) {
539 set_recovery_ip(saved_state, rp->recover_addr);
540 return;
541 }
542 }
543
544 /*
545 * Check thread recovery address also.
546 */
547 if (thread->recover) {
548 set_recovery_ip(saved_state, thread->recover);
549 thread->recover = 0;
550 return;
551 }
552 /*
553 * Unanticipated page-fault errors in kernel
554 * should not happen.
555 *
556 * fall through...
557 */
558
559 default:
560 /*
561 * Exception 15 is reserved but some chips may generate it
562 * spuriously. Seen at startup on AMD Athlon-64.
563 */
564 if (type == 15) {
565 kprintf("kernel_trap() ignoring spurious trap 15\n");
566 return;
567 }
568 debugger_entry:
569 /* Ensure that the i386_kernel_state at the base of the
570 * current thread's stack (if any) is synchronized with the
571 * context at the moment of the trap, to facilitate
572 * access through the debugger.
573 */
574 sync_iss_to_iks(saved_state);
575 #if MACH_KDB
576 restart_debugger:
577 #endif /* MACH_KDB */
578 #if MACH_KDP
579 if (current_debugger != KDB_CUR_DB) {
580 if (kdp_i386_trap(type, saved_state, result, vaddr))
581 return;
582 }
583 #endif /* MACH_KDP */
584 #if MACH_KDB
585 else
586 if (kdb_trap(type, code, saved_state)) {
587 if (switch_debugger) {
588 current_debugger = KDP_CUR_DB;
589 switch_debugger = 0;
590 goto restart_debugger;
591 }
592 return;
593 }
594 #endif /* MACH_KDB */
595 }
596
597 panic_trap(saved_state);
598 /*
599 * NO RETURN
600 */
601 }
602
603
604 static void
605 set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip)
606 {
607 saved_state->eip = ip;
608 }
609
610
611 static void
612 panic_trap(x86_saved_state32_t *regs)
613 {
614 const char *trapname = "Unknown";
615 uint32_t cr0 = get_cr0();
616 uint32_t cr2 = get_cr2();
617 uint32_t cr3 = get_cr3();
618 uint32_t cr4 = get_cr4();
619
620 if (panic_io_port)
621 (void)inb(panic_io_port);
622
623 kprintf("panic trap number 0x%x, eip 0x%x\n", regs->trapno, regs->eip);
624 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
625 cr0, cr2, cr3, cr4);
626
627 if (regs->trapno < TRAP_TYPES)
628 trapname = trap_type[regs->trapno];
629
630 panic("Unresolved kernel trap (CPU %d, Type %d=%s), registers:\n"
631 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
632 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
633 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
634 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
635 cpu_number(), regs->trapno, trapname, cr0, cr2, cr3, cr4,
636 regs->eax,regs->ebx,regs->ecx,regs->edx,
637 regs->cr2,regs->ebp,regs->esi,regs->edi,
638 regs->efl,regs->eip,regs->cs, regs->ds);
639 /*
640 * This next statement is not executed,
641 * but it's needed to stop the compiler using tail call optimization
642 * for the panic call - which confuses the subsequent backtrace.
643 */
644 cr0 = 0;
645 }
646
647 extern void kprintf_break_lock(void);
648
649
650 /*
651 * Called from locore on a special reserved stack after a double-fault
652 * is taken in kernel space.
653 * Kernel stack overflow is one route here.
654 */
655 void
656 panic_double_fault(int code)
657 {
658 struct i386_tss *my_ktss = current_ktss();
659
660 /* Set postcode (DEBUG only) */
661 postcode(PANIC_DOUBLE_FAULT);
662
663 /* Issue an I/O port read if one has been requested - this is an event logic
664 * analyzers can use as a trigger point.
665 */
666 if (panic_io_port)
667 (void)inb(panic_io_port);
668
669 /*
670 * Break kprintf lock in case of recursion,
671 * and record originally faulted instruction address.
672 */
673 kprintf_break_lock();
674
675 #if MACH_KDP
676 /*
677 * Print backtrace leading to first fault:
678 */
679 panic_i386_backtrace((void *) my_ktss->ebp, 10);
680 #endif
681
682 panic("Double fault (CPU:%d, thread:%p, code:0x%x),"
683 "registers:\n"
684 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
685 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
686 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
687 "EFL: 0x%08x, EIP: 0x%08x\n",
688 cpu_number(), current_thread(), code,
689 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
690 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
691 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
692 my_ktss->eflags, my_ktss->eip);
693 }
694
695
696 /*
697 * Called from locore on a special reserved stack after a machine-check
698 */
699 void
700 panic_machine_check(int code)
701 {
702 struct i386_tss *my_ktss = current_ktss();
703
704 /* Set postcode (DEBUG only) */
705 postcode(PANIC_MACHINE_CHECK);
706
707 /*
708 * Break kprintf lock in case of recursion,
709 * and record originally faulted instruction address.
710 */
711 kprintf_break_lock();
712 panic("Machine-check (CPU:%d, thread:%p, code:0x%x),"
713 "registers:\n"
714 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
715 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
716 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
717 "EFL: 0x%08x, EIP: 0x%08x\n",
718 cpu_number(), current_thread(), code,
719 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
720 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
721 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
722 my_ktss->eflags, my_ktss->eip);
723 }
724
725 void
726 panic_double_fault64(x86_saved_state_t *esp)
727 {
728 /* Set postcode (DEBUG only) */
729 postcode(PANIC_DOUBLE_FAULT);
730
731 /*
732 * Break kprintf lock in case of recursion,
733 * and record originally faulted instruction address.
734 */
735 kprintf_break_lock();
736
737 /*
738 * Dump the interrupt stack frame at last kernel entry.
739 */
740 if (is_saved_state64(esp)) {
741 x86_saved_state64_t *ss64p = saved_state64(esp);
742 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
743 "registers:\n"
744 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
745 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
746 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
747 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
748 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
749 "RFL: 0x%016qx, RIP: 0x%016qx\n",
750 cpu_number(), current_thread(), ss64p->isf.trapno, ss64p->isf.err,
751 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
752 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
753 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
754 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
755 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
756 ss64p->isf.rflags, ss64p->isf.rip);
757 } else {
758 x86_saved_state32_t *ss32p = saved_state32(esp);
759 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
760 "registers:\n"
761 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
762 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
763 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
764 "EFL: 0x%08x, EIP: 0x%08x\n",
765 cpu_number(), current_thread(), ss32p->trapno, ss32p->err,
766 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
767 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
768 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
769 ss32p->efl, ss32p->eip);
770 }
771 }
772
773 /*
774 * Simplistic machine check handler.
775 * We could peruse all those MSRs but we only dump register state as we do for
776 * the double fault exception.
777 * Note: the machine check registers are non-volatile across warm boot - so
778 * they'll be around when we return.
779 */
780 void
781 panic_machine_check64(x86_saved_state_t *esp)
782 {
783 /* Set postcode (DEBUG only) */
784 postcode(PANIC_MACHINE_CHECK);
785
786 /*
787 * Break kprintf lock in case of recursion,
788 * and record originally faulted instruction address.
789 */
790 kprintf_break_lock();
791
792 /*
793 * Dump the interrupt stack frame at last kernel entry.
794 */
795 if (is_saved_state64(esp)) {
796 x86_saved_state64_t *ss64p = saved_state64(esp);
797 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
798 "registers:\n"
799 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
800 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
801 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
802 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
803 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
804 "RFL: 0x%016qx, RIP: 0x%016qx\n",
805 cpu_number(), current_thread(), ss64p->isf.trapno, ss64p->isf.err,
806 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
807 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
808 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
809 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
810 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
811 ss64p->isf.rflags, ss64p->isf.rip);
812 } else {
813 x86_saved_state32_t *ss32p = saved_state32(esp);
814 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
815 "registers:\n"
816 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
817 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
818 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
819 "EFL: 0x%08x, EIP: 0x%08x\n",
820 cpu_number(), current_thread(), ss32p->trapno, ss32p->err,
821 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
822 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
823 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
824 ss32p->efl, ss32p->eip);
825 }
826 }
827
828 /*
829 * Trap from user mode.
830 */
831 void
832 user_trap(
833 x86_saved_state_t *saved_state)
834 {
835 int exc;
836 int code;
837 int err;
838 unsigned int subcode;
839 int type;
840 user_addr_t vaddr;
841 vm_prot_t prot;
842 thread_t thread = current_thread();
843 ast_t *myast;
844 boolean_t intr;
845 kern_return_t kret;
846 user_addr_t rip;
847
848 assert((is_saved_state32(saved_state) && !thread_is_64bit(thread)) ||
849 (is_saved_state64(saved_state) && thread_is_64bit(thread)));
850
851 if (is_saved_state64(saved_state)) {
852 x86_saved_state64_t *regs;
853
854 regs = saved_state64(saved_state);
855
856 type = regs->isf.trapno;
857 err = regs->isf.err & 0xffff;
858 vaddr = (user_addr_t)regs->cr2;
859 rip = (user_addr_t)regs->isf.rip;
860 } else {
861 x86_saved_state32_t *regs;
862
863 regs = saved_state32(saved_state);
864
865 type = regs->trapno;
866 err = regs->err & 0xffff;
867 vaddr = (user_addr_t)regs->cr2;
868 rip = (user_addr_t)regs->eip;
869 }
870
871 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
872 (int)(vaddr>>32), (int)vaddr, (int)(rip>>32), (int)rip, 0);
873
874 code = 0;
875 subcode = 0;
876 exc = 0;
877
878 #if DEBUG_TRACE
879 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
880 saved_state, type, vaddr);
881 #endif
882 myast = ast_pending();
883 if (perfASTHook) {
884 if (*myast & AST_CHUD_ALL) {
885 perfASTHook(type, saved_state, 0, 0);
886 }
887 } else {
888 *myast &= ~AST_CHUD_ALL;
889 }
890
891 /* Is there a hook? */
892 if (perfTrapHook) {
893 if (perfTrapHook(type, saved_state, 0, 0) == KERN_SUCCESS)
894 return; /* If it succeeds, we are done... */
895 }
896
897 switch (type) {
898
899 case T_DIVIDE_ERROR:
900 exc = EXC_ARITHMETIC;
901 code = EXC_I386_DIV;
902 break;
903
904 case T_DEBUG:
905 {
906 pcb_t pcb;
907 unsigned int clear = 0;
908 /*
909 * get dr6 and set it in the thread's pcb before
910 * returning to userland
911 */
912 pcb = thread->machine.pcb;
913 if (pcb->ids) {
914 /*
915 * We can get and set the status register
916 * in 32-bit mode even on a 64-bit thread
917 * because the high order bits are not
918 * used on x86_64
919 */
920 if (thread_is_64bit(thread)) {
921 uint32_t dr6;
922 x86_debug_state64_t *ids = pcb->ids;
923 dr6 = (uint32_t)ids->dr6;
924 __asm__ volatile ("movl %%db6, %0" : "=r" (dr6));
925 ids->dr6 = dr6;
926 } else { /* 32 bit thread */
927 x86_debug_state32_t *ids = pcb->ids;
928 __asm__ volatile ("movl %%db6, %0" : "=r" (ids->dr6));
929 }
930 __asm__ volatile ("movl %0, %%db6" : : "r" (clear));
931 }
932 exc = EXC_BREAKPOINT;
933 code = EXC_I386_SGL;
934 break;
935 }
936 case T_INT3:
937 exc = EXC_BREAKPOINT;
938 code = EXC_I386_BPT;
939 break;
940
941 case T_OVERFLOW:
942 exc = EXC_ARITHMETIC;
943 code = EXC_I386_INTO;
944 break;
945
946 case T_OUT_OF_BOUNDS:
947 exc = EXC_SOFTWARE;
948 code = EXC_I386_BOUND;
949 break;
950
951 case T_INVALID_OPCODE:
952 exc = EXC_BAD_INSTRUCTION;
953 code = EXC_I386_INVOP;
954 break;
955
956 case T_NO_FPU:
957 case 32: /* XXX */
958 fpnoextflt();
959 return;
960
961 case T_FPU_FAULT:
962 fpextovrflt();
963 return;
964
965 case 10: /* invalid TSS == iret with NT flag set */
966 exc = EXC_BAD_INSTRUCTION;
967 code = EXC_I386_INVTSSFLT;
968 subcode = err;
969 break;
970
971 case T_SEGMENT_NOT_PRESENT:
972 exc = EXC_BAD_INSTRUCTION;
973 code = EXC_I386_SEGNPFLT;
974 subcode = err;
975 break;
976
977 case T_STACK_FAULT:
978 exc = EXC_BAD_INSTRUCTION;
979 code = EXC_I386_STKFLT;
980 subcode = err;
981 break;
982
983 case T_GENERAL_PROTECTION:
984 exc = EXC_BAD_INSTRUCTION;
985 code = EXC_I386_GPFLT;
986 subcode = err;
987 break;
988
989 case T_PAGE_FAULT:
990 prot = VM_PROT_READ;
991
992 if (err & T_PF_WRITE)
993 prot |= VM_PROT_WRITE;
994 #if PAE
995 if (err & T_PF_EXECUTE)
996 prot |= VM_PROT_EXECUTE;
997 #endif
998 kret = vm_fault(thread->map, vm_map_trunc_page(vaddr),
999 prot, FALSE,
1000 THREAD_ABORTSAFE, NULL, 0);
1001
1002 user_page_fault_continue(kret);
1003
1004 /* NOTREACHED */
1005 break;
1006
1007 case T_SSE_FLOAT_ERROR:
1008 fpSSEexterrflt();
1009 return;
1010
1011
1012 case T_FLOATING_POINT_ERROR:
1013 fpexterrflt();
1014 return;
1015
1016 default:
1017 #if MACH_KGDB
1018 Debugger("Unanticipated user trap");
1019 return;
1020 #endif /* MACH_KGDB */
1021 #if MACH_KDB
1022 if (kdb_trap(type, err, saved_state))
1023 return;
1024 #endif /* MACH_KDB */
1025 panic("user trap");
1026 return;
1027 }
1028 intr = ml_set_interrupts_enabled(FALSE);
1029 myast = ast_pending();
1030 while (*myast & AST_ALL) {
1031 ast_taken(AST_ALL, intr);
1032 ml_set_interrupts_enabled(FALSE);
1033 myast = ast_pending();
1034 }
1035 ml_set_interrupts_enabled(intr);
1036
1037 i386_exception(exc, code, subcode);
1038 /*NOTREACHED*/
1039 }
1040
1041
1042 /*
1043 * Handle AST traps for i386.
1044 * Check for delayed floating-point exception from
1045 * AT-bus machines.
1046 */
1047
1048 extern void log_thread_action (thread_t, char *);
1049
1050 void
1051 i386_astintr(int preemption)
1052 {
1053 ast_t mask = AST_ALL;
1054 spl_t s;
1055
1056 if (preemption)
1057 mask = AST_PREEMPTION;
1058
1059 s = splsched();
1060
1061 ast_taken(mask, s);
1062
1063 splx(s);
1064 }
1065
1066 /*
1067 * Handle exceptions for i386.
1068 *
1069 * If we are an AT bus machine, we must turn off the AST for a
1070 * delayed floating-point exception.
1071 *
1072 * If we are providing floating-point emulation, we may have
1073 * to retrieve the real register values from the floating point
1074 * emulator.
1075 */
1076 void
1077 i386_exception(
1078 int exc,
1079 int code,
1080 int subcode)
1081 {
1082 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1083
1084 codes[0] = code; /* new exception interface */
1085 codes[1] = subcode;
1086 exception_triage(exc, codes, 2);
1087 /*NOTREACHED*/
1088 }
1089
1090
1091 void
1092 kernel_preempt_check(void)
1093 {
1094 ast_t *myast;
1095 boolean_t intr;
1096
1097 /*
1098 * disable interrupts to both prevent pre-emption
1099 * and to keep the ast state from changing via
1100 * an interrupt handler making something runnable
1101 */
1102 intr = ml_set_interrupts_enabled(FALSE);
1103
1104 myast = ast_pending();
1105
1106 if ((*myast & AST_URGENT) && intr == TRUE && get_interrupt_level() == 0) {
1107 /*
1108 * can handle interrupts and preemptions
1109 * at this point
1110 */
1111 ml_set_interrupts_enabled(intr);
1112
1113 /*
1114 * now cause the PRE-EMPTION trap
1115 */
1116 __asm__ volatile (" int $0xff");
1117 } else {
1118 /*
1119 * if interrupts were already disabled or
1120 * we're in an interrupt context, we can't
1121 * preempt... of course if AST_URGENT
1122 * isn't set we also don't want to
1123 */
1124 ml_set_interrupts_enabled(intr);
1125 }
1126 }
1127
1128 #if MACH_KDB
1129
1130 extern void db_i386_state(x86_saved_state32_t *regs);
1131
1132 #include <ddb/db_output.h>
1133
1134 void
1135 db_i386_state(
1136 x86_saved_state32_t *regs)
1137 {
1138 db_printf("eip %8x\n", regs->eip);
1139 db_printf("trap %8x\n", regs->trapno);
1140 db_printf("err %8x\n", regs->err);
1141 db_printf("efl %8x\n", regs->efl);
1142 db_printf("ebp %8x\n", regs->ebp);
1143 db_printf("esp %8x\n", regs->cr2);
1144 db_printf("uesp %8x\n", regs->uesp);
1145 db_printf("cs %8x\n", regs->cs & 0xff);
1146 db_printf("ds %8x\n", regs->ds & 0xff);
1147 db_printf("es %8x\n", regs->es & 0xff);
1148 db_printf("fs %8x\n", regs->fs & 0xff);
1149 db_printf("gs %8x\n", regs->gs & 0xff);
1150 db_printf("ss %8x\n", regs->ss & 0xff);
1151 db_printf("eax %8x\n", regs->eax);
1152 db_printf("ebx %8x\n", regs->ebx);
1153 db_printf("ecx %8x\n", regs->ecx);
1154 db_printf("edx %8x\n", regs->edx);
1155 db_printf("esi %8x\n", regs->esi);
1156 db_printf("edi %8x\n", regs->edi);
1157 }
1158
1159 #endif /* MACH_KDB */
1160
1161 /* Synchronize a thread's i386_kernel_state (if any) with the given
1162 * i386_saved_state_t obtained from the trap/IPI handler; called in
1163 * kernel_trap() prior to entering the debugger, and when receiving
1164 * an "MP_KDP" IPI.
1165 */
1166
1167 void
1168 sync_iss_to_iks(x86_saved_state32_t *saved_state)
1169 {
1170 struct x86_kernel_state32 *iks;
1171 vm_offset_t kstack;
1172 boolean_t record_active_regs = FALSE;
1173
1174 if ((kstack = current_thread()->kernel_stack) != 0) {
1175 x86_saved_state32_t *regs;
1176
1177 regs = saved_state;
1178
1179 iks = STACK_IKS(kstack);
1180
1181 /*
1182 * Did we take the trap/interrupt in kernel mode?
1183 */
1184 if (regs == USER_REGS32(current_thread()))
1185 record_active_regs = TRUE;
1186 else {
1187 iks->k_ebx = regs->ebx;
1188 iks->k_esp = (int)regs;
1189 iks->k_ebp = regs->ebp;
1190 iks->k_edi = regs->edi;
1191 iks->k_esi = regs->esi;
1192 iks->k_eip = regs->eip;
1193 }
1194 }
1195
1196 if (record_active_regs == TRUE) {
1197 /*
1198 * Show the trap handler path
1199 */
1200 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1201 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1202 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1203 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1204 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1205 /*
1206 * "Current" instruction pointer
1207 */
1208 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1209 }
1210 }
1211
1212 /*
1213 * This is used by the NMI interrupt handler (from mp.c) to
1214 * uncondtionally sync the trap handler context to the IKS
1215 * irrespective of whether the NMI was fielded in kernel
1216 * or user space.
1217 */
1218 void
1219 sync_iss_to_iks_unconditionally(__unused x86_saved_state32_t *saved_state) {
1220 struct x86_kernel_state32 *iks;
1221 vm_offset_t kstack;
1222 boolean_t record_active_regs = FALSE;
1223
1224 if ((kstack = current_thread()->kernel_stack) != 0) {
1225
1226 iks = STACK_IKS(kstack);
1227 /*
1228 * Show the trap handler path
1229 */
1230 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1231 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1232 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1233 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1234 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1235 /*
1236 * "Current" instruction pointer
1237 */
1238 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1239
1240 }
1241 }