]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/trap.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * Hardware trap/fault handler.
62 */
63
64 #include <mach_kdb.h>
65 #include <mach_kgdb.h>
66 #include <mach_kdp.h>
67 #include <mach_ldebug.h>
68
69 #include <types.h>
70 #include <i386/eflags.h>
71 #include <i386/trap.h>
72 #include <i386/pmap.h>
73 #include <i386/fpu.h>
74 #include <architecture/i386/pio.h> /* inb() */
75
76 #include <mach/exception.h>
77 #include <mach/kern_return.h>
78 #include <mach/vm_param.h>
79 #include <mach/i386/thread_status.h>
80
81 #include <vm/vm_kern.h>
82 #include <vm/vm_fault.h>
83
84 #include <kern/kern_types.h>
85 #include <kern/processor.h>
86 #include <kern/thread.h>
87 #include <kern/task.h>
88 #include <kern/sched.h>
89 #include <kern/sched_prim.h>
90 #include <kern/exception.h>
91 #include <kern/spl.h>
92 #include <kern/misc_protos.h>
93
94 #include <sys/kdebug.h>
95
96 #if MACH_KGDB
97 #include <kgdb/kgdb_defs.h>
98 #endif /* MACH_KGDB */
99
100 #if MACH_KDB
101 #include <debug.h>
102 #include <ddb/db_watch.h>
103 #include <ddb/db_run.h>
104 #include <ddb/db_break.h>
105 #include <ddb/db_trap.h>
106 #endif /* MACH_KDB */
107
108 #include <string.h>
109
110 #include <i386/io_emulate.h>
111 #include <i386/postcode.h>
112 #include <i386/mp_desc.h>
113 #include <i386/proc_reg.h>
114 #include <mach/i386/syscall_sw.h>
115
116 /*
117 * Forward declarations
118 */
119 static void user_page_fault_continue(kern_return_t kret);
120 static void panic_trap(x86_saved_state32_t *saved_state);
121 static void set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip);
122
123 perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
124 perfCallback perfASTHook = NULL; /* Pointer to CHUD AST hook routine */
125
126 void
127 thread_syscall_return(
128 kern_return_t ret)
129 {
130 thread_t thr_act = current_thread();
131
132 if (thread_is_64bit(thr_act)) {
133 x86_saved_state64_t *regs;
134
135 regs = USER_REGS64(thr_act);
136
137 if (kdebug_enable && ((regs->rax & SYSCALL_CLASS_MASK) == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT))) {
138 /* Mach trap */
139 KERNEL_DEBUG_CONSTANT(
140 MACHDBG_CODE(DBG_MACH_EXCP_SC, ((int) (regs->rax & SYSCALL_NUMBER_MASK)))
141 | DBG_FUNC_END,
142 ret, 0, 0, 0, 0);
143 }
144 regs->rax = ret;
145
146 } else {
147 x86_saved_state32_t *regs;
148
149 regs = USER_REGS32(thr_act);
150
151 if (kdebug_enable && ((int) regs->eax < 0)) {
152 /* Mach trap */
153 KERNEL_DEBUG_CONSTANT(
154 MACHDBG_CODE(DBG_MACH_EXCP_SC, -((int) regs->eax))
155 | DBG_FUNC_END,
156 ret, 0, 0, 0, 0);
157 }
158 regs->eax = ret;
159 }
160 thread_exception_return();
161 /*NOTREACHED*/
162 }
163
164
165 #if MACH_KDB
166 boolean_t debug_all_traps_with_kdb = FALSE;
167 extern struct db_watchpoint *db_watchpoint_list;
168 extern boolean_t db_watchpoints_inserted;
169 extern boolean_t db_breakpoints_inserted;
170
171 void
172 thread_kdb_return(void)
173 {
174 thread_t thr_act = current_thread();
175 x86_saved_state_t *iss = USER_STATE(thr_act);
176
177 if (is_saved_state64(iss)) {
178 x86_saved_state64_t *regs;
179
180 regs = saved_state64(iss);
181
182 if (kdb_trap(regs->isf.trapno, (int)regs->isf.err, (void *)regs)) {
183 thread_exception_return();
184 /*NOTREACHED*/
185 }
186
187 } else {
188 x86_saved_state32_t *regs;
189
190 regs = saved_state32(iss);
191
192 if (kdb_trap(regs->trapno, regs->err, (void *)regs)) {
193 thread_exception_return();
194 /*NOTREACHED*/
195 }
196 }
197 }
198
199 #endif /* MACH_KDB */
200
201 void
202 user_page_fault_continue(
203 kern_return_t kr)
204 {
205 thread_t thread = current_thread();
206 x86_saved_state_t *regs = USER_STATE(thread);
207 ast_t *myast;
208 boolean_t intr;
209 user_addr_t vaddr;
210 #if MACH_KDB
211 int err;
212 int trapno;
213 #endif
214
215 assert((is_saved_state32(regs) && !thread_is_64bit(thread)) ||
216 (is_saved_state64(regs) && thread_is_64bit(thread)));
217
218 if (thread_is_64bit(thread)) {
219 x86_saved_state64_t *uregs;
220
221 uregs = USER_REGS64(thread);
222
223 #if MACH_KDB
224 trapno = uregs->isf.trapno;
225 err = uregs->isf.err;
226 #endif
227 vaddr = (user_addr_t)uregs->cr2;
228 } else {
229 x86_saved_state32_t *uregs;
230
231 uregs = USER_REGS32(thread);
232
233 #if MACH_KDB
234 trapno = uregs->trapno;
235 err = uregs->err;
236 #endif
237 vaddr = uregs->cr2;
238 }
239
240 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
241 #if MACH_KDB
242 if (!db_breakpoints_inserted) {
243 db_set_breakpoints();
244 }
245 if (db_watchpoint_list &&
246 db_watchpoints_inserted &&
247 (err & T_PF_WRITE) &&
248 db_find_watchpoint(thread->map,
249 (vm_offset_t)vaddr,
250 regs))
251 kdb_trap(T_WATCHPOINT, 0, regs);
252 #endif /* MACH_KDB */
253 intr = ml_set_interrupts_enabled(FALSE);
254 myast = ast_pending();
255 while (*myast & AST_ALL) {
256 ast_taken(AST_ALL, intr);
257 ml_set_interrupts_enabled(FALSE);
258 myast = ast_pending();
259 }
260 ml_set_interrupts_enabled(intr);
261
262 thread_exception_return();
263 /*NOTREACHED*/
264 }
265
266 #if MACH_KDB
267 if (debug_all_traps_with_kdb &&
268 kdb_trap(trapno, err, regs)) {
269 thread_exception_return();
270 /*NOTREACHED*/
271 }
272 #endif /* MACH_KDB */
273
274 i386_exception(EXC_BAD_ACCESS, kr, vaddr);
275 /*NOTREACHED*/
276 }
277
278 /*
279 * Fault recovery in copyin/copyout routines.
280 */
281 struct recovery {
282 uint32_t fault_addr;
283 uint32_t recover_addr;
284 };
285
286 extern struct recovery recover_table[];
287 extern struct recovery recover_table_end[];
288
289 const char * trap_type[] = {TRAP_NAMES};
290 unsigned TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
291
292 extern unsigned panic_io_port;
293
294 static inline void
295 reset_dr7(void)
296 {
297 uint32_t dr7 = 0x400; /* magic dr7 reset value */
298 __asm__ volatile("movl %0,%%dr7" : : "r" (dr7));
299 }
300 #if MACH_KDP
301 unsigned kdp_has_active_watchpoints = 0;
302 #endif
303 /*
304 * Trap from kernel mode. Only page-fault errors are recoverable,
305 * and then only in special circumstances. All other errors are
306 * fatal. Return value indicates if trap was handled.
307 */
308 void
309 kernel_trap(
310 x86_saved_state_t *state)
311 {
312 x86_saved_state32_t *saved_state;
313 int code;
314 user_addr_t vaddr;
315 int type;
316 vm_map_t map;
317 kern_return_t result = KERN_FAILURE;
318 thread_t thread;
319 ast_t *myast;
320 boolean_t intr;
321 vm_prot_t prot;
322 struct recovery *rp;
323 vm_offset_t kern_ip;
324 int fault_in_copy_window = -1;
325 int is_user = 0;
326 #if MACH_KDB
327 pt_entry_t *pte;
328 #endif /* MACH_KDB */
329
330 thread = current_thread();
331
332 if (is_saved_state64(state))
333 panic("kernel_trap(%p) with 64-bit state", state);
334 saved_state = saved_state32(state);
335
336 vaddr = (user_addr_t)saved_state->cr2;
337 type = saved_state->trapno;
338 code = saved_state->err & 0xffff;
339 intr = (saved_state->efl & EFL_IF) != 0; /* state of ints at trap */
340
341 kern_ip = (vm_offset_t)saved_state->eip;
342
343 myast = ast_pending();
344
345 if (perfASTHook) {
346 if (*myast & AST_CHUD_ALL)
347 perfASTHook(type, NULL, 0, 0);
348 } else
349 *myast &= ~AST_CHUD_ALL;
350
351 /*
352 * Is there a hook?
353 */
354 if (perfTrapHook) {
355 if (perfTrapHook(type, NULL, 0, 0) == KERN_SUCCESS) {
356 /*
357 * If it succeeds, we are done...
358 */
359 return;
360 }
361 }
362 /*
363 * we come here with interrupts off as we don't want to recurse
364 * on preemption below. but we do want to re-enable interrupts
365 * as soon we possibly can to hold latency down
366 */
367 if (T_PREEMPT == type) {
368
369 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
370 0, 0, 0, kern_ip, 0);
371
372 ast_taken(AST_PREEMPTION, FALSE);
373 return;
374 }
375
376 if (T_PAGE_FAULT == type) {
377 /*
378 * assume we're faulting in the kernel map
379 */
380 map = kernel_map;
381
382 if (thread != THREAD_NULL && thread->map != kernel_map) {
383 vm_offset_t copy_window_base;
384 vm_offset_t kvaddr;
385 int window_index;
386
387 kvaddr = (vm_offset_t)vaddr;
388 /*
389 * must determine if fault occurred in
390 * the copy window while pre-emption is
391 * disabled for this processor so that
392 * we only need to look at the window
393 * associated with this processor
394 */
395 copy_window_base = current_cpu_datap()->cpu_copywindow_base;
396
397 if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS)) ) {
398
399 window_index = (kvaddr - copy_window_base) / NBPDE;
400
401 if (thread->machine.copy_window[window_index].user_base != (user_addr_t)-1) {
402
403 kvaddr -= (copy_window_base + (NBPDE * window_index));
404 vaddr = thread->machine.copy_window[window_index].user_base + kvaddr;
405
406 map = thread->map;
407 fault_in_copy_window = window_index;
408 }
409 is_user = -1;
410 }
411 }
412 }
413 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
414 (int)(vaddr >> 32), (int)vaddr, is_user, kern_ip, 0);
415
416
417 (void) ml_set_interrupts_enabled(intr);
418
419 switch (type) {
420
421 case T_NO_FPU:
422 fpnoextflt();
423 return;
424
425 case T_FPU_FAULT:
426 fpextovrflt();
427 return;
428
429 case T_FLOATING_POINT_ERROR:
430 fpexterrflt();
431 return;
432
433 case T_SSE_FLOAT_ERROR:
434 fpSSEexterrflt();
435 return;
436 case T_DEBUG:
437 if ((saved_state->efl & EFL_TF) == 0
438 && !kdp_has_active_watchpoints) {
439 /* We've somehow encountered a debug
440 * register match that does not belong
441 * to the kernel debugger.
442 * This isn't supposed to happen.
443 */
444 reset_dr7();
445 return;
446 }
447 goto debugger_entry;
448 case T_PAGE_FAULT:
449 /*
450 * If the current map is a submap of the kernel map,
451 * and the address is within that map, fault on that
452 * map. If the same check is done in vm_fault
453 * (vm_map_lookup), we may deadlock on the kernel map
454 * lock.
455 */
456
457 prot = VM_PROT_READ;
458
459 if (code & T_PF_WRITE)
460 prot |= VM_PROT_WRITE;
461 #if PAE
462 if (code & T_PF_EXECUTE)
463 prot |= VM_PROT_EXECUTE;
464 #endif
465
466 #if MACH_KDB
467 /*
468 * Check for watchpoint on kernel static data.
469 * vm_fault would fail in this case
470 */
471 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted &&
472 (code & T_PF_WRITE) && vaddr < vm_map_max(map) &&
473 ((*(pte = pmap_pte(kernel_pmap, (vm_map_offset_t)vaddr))) & INTEL_PTE_WRITE) == 0) {
474 pmap_store_pte(
475 pte,
476 *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE);
477 /* XXX need invltlb here? */
478
479 result = KERN_SUCCESS;
480 goto look_for_watchpoints;
481 }
482 #endif /* MACH_KDB */
483
484 result = vm_fault(map,
485 vm_map_trunc_page(vaddr),
486 prot,
487 FALSE,
488 THREAD_UNINT, NULL, 0);
489
490 #if MACH_KDB
491 if (result == KERN_SUCCESS) {
492 /*
493 * Look for watchpoints
494 */
495 look_for_watchpoints:
496 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted && (code & T_PF_WRITE) &&
497 db_find_watchpoint(map, vaddr, saved_state))
498 kdb_trap(T_WATCHPOINT, 0, saved_state);
499 }
500 #endif /* MACH_KDB */
501
502 if (result == KERN_SUCCESS) {
503
504 if (fault_in_copy_window != -1) {
505 pt_entry_t *updp;
506 pt_entry_t *kpdp;
507
508 /*
509 * in case there was no page table assigned
510 * for the user base address and the pmap
511 * got 'expanded' due to this fault, we'll
512 * copy in the descriptor
513 *
514 * we're either setting the page table descriptor
515 * to the same value or it was 0... no need
516 * for a TLB flush in either case
517 */
518
519 ml_set_interrupts_enabled(FALSE);
520 updp = pmap_pde(map->pmap, thread->machine.copy_window[fault_in_copy_window].user_base);
521 assert(updp);
522 if (0 == updp) panic("trap: updp 0"); /* XXX DEBUG */
523 kpdp = current_cpu_datap()->cpu_copywindow_pdp;
524 kpdp += fault_in_copy_window;
525
526 #if JOE_DEBUG
527 if (*kpdp && (*kpdp & PG_FRAME) != (*updp & PG_FRAME))
528 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp, kpdp);
529 #endif
530 pmap_store_pte(kpdp, *updp);
531
532 (void) ml_set_interrupts_enabled(intr);
533 }
534 return;
535 }
536 /*
537 * fall through
538 */
539
540 case T_GENERAL_PROTECTION:
541 /*
542 * If there is a failure recovery address
543 * for this fault, go there.
544 */
545 for (rp = recover_table; rp < recover_table_end; rp++) {
546 if (kern_ip == rp->fault_addr) {
547 set_recovery_ip(saved_state, rp->recover_addr);
548 return;
549 }
550 }
551
552 /*
553 * Check thread recovery address also.
554 */
555 if (thread->recover) {
556 set_recovery_ip(saved_state, thread->recover);
557 thread->recover = 0;
558 return;
559 }
560 /*
561 * Unanticipated page-fault errors in kernel
562 * should not happen.
563 *
564 * fall through...
565 */
566
567 default:
568 /*
569 * Exception 15 is reserved but some chips may generate it
570 * spuriously. Seen at startup on AMD Athlon-64.
571 */
572 if (type == 15) {
573 kprintf("kernel_trap() ignoring spurious trap 15\n");
574 return;
575 }
576 debugger_entry:
577 /* Ensure that the i386_kernel_state at the base of the
578 * current thread's stack (if any) is synchronized with the
579 * context at the moment of the trap, to facilitate
580 * access through the debugger.
581 */
582 sync_iss_to_iks(saved_state);
583 #if MACH_KDB
584 restart_debugger:
585 #endif /* MACH_KDB */
586 #if MACH_KDP
587 if (current_debugger != KDB_CUR_DB) {
588 if (kdp_i386_trap(type, saved_state, result, vaddr))
589 return;
590 }
591 #endif /* MACH_KDP */
592 #if MACH_KDB
593 else
594 if (kdb_trap(type, code, saved_state)) {
595 if (switch_debugger) {
596 current_debugger = KDP_CUR_DB;
597 switch_debugger = 0;
598 goto restart_debugger;
599 }
600 return;
601 }
602 #endif /* MACH_KDB */
603 }
604
605 panic_trap(saved_state);
606 /*
607 * NO RETURN
608 */
609 }
610
611
612 static void
613 set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip)
614 {
615 saved_state->eip = ip;
616 }
617
618
619 static void
620 panic_trap(x86_saved_state32_t *regs)
621 {
622 const char *trapname = "Unknown";
623 uint32_t cr0 = get_cr0();
624 uint32_t cr2 = get_cr2();
625 uint32_t cr3 = get_cr3();
626 uint32_t cr4 = get_cr4();
627
628 if (panic_io_port)
629 (void)inb(panic_io_port);
630
631 kprintf("panic trap number 0x%x, eip 0x%x\n", regs->trapno, regs->eip);
632 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
633 cr0, cr2, cr3, cr4);
634
635 if (regs->trapno < TRAP_TYPES)
636 trapname = trap_type[regs->trapno];
637
638 panic("Unresolved kernel trap (CPU %d, Type %d=%s), registers:\n"
639 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
640 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
641 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
642 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n",
643 cpu_number(), regs->trapno, trapname, cr0, cr2, cr3, cr4,
644 regs->eax,regs->ebx,regs->ecx,regs->edx,
645 regs->cr2,regs->ebp,regs->esi,regs->edi,
646 regs->efl,regs->eip,regs->cs, regs->ds);
647 /*
648 * This next statement is not executed,
649 * but it's needed to stop the compiler using tail call optimization
650 * for the panic call - which confuses the subsequent backtrace.
651 */
652 cr0 = 0;
653 }
654
655 extern void kprintf_break_lock(void);
656
657
658 /*
659 * Called from locore on a special reserved stack after a double-fault
660 * is taken in kernel space.
661 * Kernel stack overflow is one route here.
662 */
663 void
664 panic_double_fault(int code)
665 {
666 struct i386_tss *my_ktss = current_ktss();
667
668 /* Set postcode (DEBUG only) */
669 postcode(PANIC_DOUBLE_FAULT);
670
671 /* Issue an I/O port read if one has been requested - this is an event logic
672 * analyzers can use as a trigger point.
673 */
674 if (panic_io_port)
675 (void)inb(panic_io_port);
676
677 /*
678 * Break kprintf lock in case of recursion,
679 * and record originally faulted instruction address.
680 */
681 kprintf_break_lock();
682
683 #if MACH_KDP
684 /*
685 * Print backtrace leading to first fault:
686 */
687 panic_i386_backtrace((void *) my_ktss->ebp, 10);
688 #endif
689
690 panic("Double fault (CPU:%d, thread:%p, code:0x%x),"
691 "registers:\n"
692 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
693 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
694 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
695 "EFL: 0x%08x, EIP: 0x%08x\n",
696 cpu_number(), current_thread(), code,
697 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
698 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
699 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
700 my_ktss->eflags, my_ktss->eip);
701 }
702
703
704 /*
705 * Called from locore on a special reserved stack after a machine-check
706 */
707 void
708 panic_machine_check(int code)
709 {
710 struct i386_tss *my_ktss = current_ktss();
711
712 /* Set postcode (DEBUG only) */
713 postcode(PANIC_MACHINE_CHECK);
714
715 /*
716 * Break kprintf lock in case of recursion,
717 * and record originally faulted instruction address.
718 */
719 kprintf_break_lock();
720 panic("Machine-check (CPU:%d, thread:%p, code:0x%x),"
721 "registers:\n"
722 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
723 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
724 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
725 "EFL: 0x%08x, EIP: 0x%08x\n",
726 cpu_number(), current_thread(), code,
727 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
728 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
729 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
730 my_ktss->eflags, my_ktss->eip);
731 }
732
733 void
734 panic_double_fault64(x86_saved_state_t *esp)
735 {
736 /* Set postcode (DEBUG only) */
737 postcode(PANIC_DOUBLE_FAULT);
738
739 /*
740 * Break kprintf lock in case of recursion,
741 * and record originally faulted instruction address.
742 */
743 kprintf_break_lock();
744
745 /*
746 * Dump the interrupt stack frame at last kernel entry.
747 */
748 if (is_saved_state64(esp)) {
749 x86_saved_state64_t *ss64p = saved_state64(esp);
750 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
751 "registers:\n"
752 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
753 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
754 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
755 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
756 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
757 "RFL: 0x%016qx, RIP: 0x%016qx\n",
758 cpu_number(), current_thread(), ss64p->isf.trapno, ss64p->isf.err,
759 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
760 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
761 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
762 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
763 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
764 ss64p->isf.rflags, ss64p->isf.rip);
765 } else {
766 x86_saved_state32_t *ss32p = saved_state32(esp);
767 panic("Double fault (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
768 "registers:\n"
769 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
770 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
771 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
772 "EFL: 0x%08x, EIP: 0x%08x\n",
773 cpu_number(), current_thread(), ss32p->trapno, ss32p->err,
774 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
775 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
776 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
777 ss32p->efl, ss32p->eip);
778 }
779 }
780
781 /*
782 * Simplistic machine check handler.
783 * We could peruse all those MSRs but we only dump register state as we do for
784 * the double fault exception.
785 * Note: the machine check registers are non-volatile across warm boot - so
786 * they'll be around when we return.
787 */
788 void
789 panic_machine_check64(x86_saved_state_t *esp)
790 {
791 /* Set postcode (DEBUG only) */
792 postcode(PANIC_MACHINE_CHECK);
793
794 /*
795 * Break kprintf lock in case of recursion,
796 * and record originally faulted instruction address.
797 */
798 kprintf_break_lock();
799
800 /*
801 * Dump the interrupt stack frame at last kernel entry.
802 */
803 if (is_saved_state64(esp)) {
804 x86_saved_state64_t *ss64p = saved_state64(esp);
805 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%qx),"
806 "registers:\n"
807 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
808 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
809 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
810 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
811 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
812 "RFL: 0x%016qx, RIP: 0x%016qx\n",
813 cpu_number(), current_thread(), ss64p->isf.trapno, ss64p->isf.err,
814 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
815 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
816 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
817 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
818 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
819 ss64p->isf.rflags, ss64p->isf.rip);
820 } else {
821 x86_saved_state32_t *ss32p = saved_state32(esp);
822 panic("Machine Check (CPU:%d, thread:%p, trapno:0x%x, err:0x%x),"
823 "registers:\n"
824 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
825 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
826 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
827 "EFL: 0x%08x, EIP: 0x%08x\n",
828 cpu_number(), current_thread(), ss32p->trapno, ss32p->err,
829 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
830 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
831 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
832 ss32p->efl, ss32p->eip);
833 }
834 }
835
836 /*
837 * Trap from user mode.
838 */
839 void
840 user_trap(
841 x86_saved_state_t *saved_state)
842 {
843 int exc;
844 int code;
845 int err;
846 unsigned int subcode;
847 int type;
848 user_addr_t vaddr;
849 vm_prot_t prot;
850 thread_t thread = current_thread();
851 ast_t *myast;
852 boolean_t intr;
853 kern_return_t kret;
854 user_addr_t rip;
855
856 assert((is_saved_state32(saved_state) && !thread_is_64bit(thread)) ||
857 (is_saved_state64(saved_state) && thread_is_64bit(thread)));
858
859 if (is_saved_state64(saved_state)) {
860 x86_saved_state64_t *regs;
861
862 regs = saved_state64(saved_state);
863
864 type = regs->isf.trapno;
865 err = regs->isf.err & 0xffff;
866 vaddr = (user_addr_t)regs->cr2;
867 rip = (user_addr_t)regs->isf.rip;
868 } else {
869 x86_saved_state32_t *regs;
870
871 regs = saved_state32(saved_state);
872
873 type = regs->trapno;
874 err = regs->err & 0xffff;
875 vaddr = (user_addr_t)regs->cr2;
876 rip = (user_addr_t)regs->eip;
877 }
878
879 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
880 (int)(vaddr>>32), (int)vaddr, (int)(rip>>32), (int)rip, 0);
881
882 code = 0;
883 subcode = 0;
884 exc = 0;
885
886 #if DEBUG_TRACE
887 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
888 saved_state, type, vaddr);
889 #endif
890 myast = ast_pending();
891 if (perfASTHook) {
892 if (*myast & AST_CHUD_ALL) {
893 perfASTHook(type, saved_state, 0, 0);
894 }
895 } else {
896 *myast &= ~AST_CHUD_ALL;
897 }
898
899 /* Is there a hook? */
900 if (perfTrapHook) {
901 if (perfTrapHook(type, saved_state, 0, 0) == KERN_SUCCESS)
902 return; /* If it succeeds, we are done... */
903 }
904
905 switch (type) {
906
907 case T_DIVIDE_ERROR:
908 exc = EXC_ARITHMETIC;
909 code = EXC_I386_DIV;
910 break;
911
912 case T_DEBUG:
913 {
914 pcb_t pcb;
915 unsigned int clear = 0;
916 /*
917 * get dr6 and set it in the thread's pcb before
918 * returning to userland
919 */
920 pcb = thread->machine.pcb;
921 if (pcb->ids) {
922 /*
923 * We can get and set the status register
924 * in 32-bit mode even on a 64-bit thread
925 * because the high order bits are not
926 * used on x86_64
927 */
928 if (thread_is_64bit(thread)) {
929 uint32_t dr6;
930 x86_debug_state64_t *ids = pcb->ids;
931 dr6 = (uint32_t)ids->dr6;
932 __asm__ volatile ("movl %%db6, %0" : "=r" (dr6));
933 ids->dr6 = dr6;
934 } else { /* 32 bit thread */
935 x86_debug_state32_t *ids = pcb->ids;
936 __asm__ volatile ("movl %%db6, %0" : "=r" (ids->dr6));
937 }
938 __asm__ volatile ("movl %0, %%db6" : : "r" (clear));
939 }
940 exc = EXC_BREAKPOINT;
941 code = EXC_I386_SGL;
942 break;
943 }
944 case T_INT3:
945 exc = EXC_BREAKPOINT;
946 code = EXC_I386_BPT;
947 break;
948
949 case T_OVERFLOW:
950 exc = EXC_ARITHMETIC;
951 code = EXC_I386_INTO;
952 break;
953
954 case T_OUT_OF_BOUNDS:
955 exc = EXC_SOFTWARE;
956 code = EXC_I386_BOUND;
957 break;
958
959 case T_INVALID_OPCODE:
960 exc = EXC_BAD_INSTRUCTION;
961 code = EXC_I386_INVOP;
962 break;
963
964 case T_NO_FPU:
965 case 32: /* XXX */
966 fpnoextflt();
967 return;
968
969 case T_FPU_FAULT:
970 fpextovrflt();
971 return;
972
973 case 10: /* invalid TSS == iret with NT flag set */
974 exc = EXC_BAD_INSTRUCTION;
975 code = EXC_I386_INVTSSFLT;
976 subcode = err;
977 break;
978
979 case T_SEGMENT_NOT_PRESENT:
980 exc = EXC_BAD_INSTRUCTION;
981 code = EXC_I386_SEGNPFLT;
982 subcode = err;
983 break;
984
985 case T_STACK_FAULT:
986 exc = EXC_BAD_INSTRUCTION;
987 code = EXC_I386_STKFLT;
988 subcode = err;
989 break;
990
991 case T_GENERAL_PROTECTION:
992 exc = EXC_BAD_INSTRUCTION;
993 code = EXC_I386_GPFLT;
994 subcode = err;
995 break;
996
997 case T_PAGE_FAULT:
998 prot = VM_PROT_READ;
999
1000 if (err & T_PF_WRITE)
1001 prot |= VM_PROT_WRITE;
1002 #if PAE
1003 if (err & T_PF_EXECUTE)
1004 prot |= VM_PROT_EXECUTE;
1005 #endif
1006 kret = vm_fault(thread->map, vm_map_trunc_page(vaddr),
1007 prot, FALSE,
1008 THREAD_ABORTSAFE, NULL, 0);
1009
1010 user_page_fault_continue(kret);
1011
1012 /* NOTREACHED */
1013 break;
1014
1015 case T_SSE_FLOAT_ERROR:
1016 fpSSEexterrflt();
1017 return;
1018
1019
1020 case T_FLOATING_POINT_ERROR:
1021 fpexterrflt();
1022 return;
1023
1024 default:
1025 #if MACH_KGDB
1026 Debugger("Unanticipated user trap");
1027 return;
1028 #endif /* MACH_KGDB */
1029 #if MACH_KDB
1030 if (kdb_trap(type, err, saved_state))
1031 return;
1032 #endif /* MACH_KDB */
1033 panic("user trap");
1034 return;
1035 }
1036 intr = ml_set_interrupts_enabled(FALSE);
1037 myast = ast_pending();
1038 while (*myast & AST_ALL) {
1039 ast_taken(AST_ALL, intr);
1040 ml_set_interrupts_enabled(FALSE);
1041 myast = ast_pending();
1042 }
1043 ml_set_interrupts_enabled(intr);
1044
1045 i386_exception(exc, code, subcode);
1046 /*NOTREACHED*/
1047 }
1048
1049
1050 /*
1051 * Handle AST traps for i386.
1052 * Check for delayed floating-point exception from
1053 * AT-bus machines.
1054 */
1055
1056 extern void log_thread_action (thread_t, char *);
1057
1058 void
1059 i386_astintr(int preemption)
1060 {
1061 ast_t mask = AST_ALL;
1062 spl_t s;
1063
1064 if (preemption)
1065 mask = AST_PREEMPTION;
1066
1067 s = splsched();
1068
1069 ast_taken(mask, s);
1070
1071 splx(s);
1072 }
1073
1074 /*
1075 * Handle exceptions for i386.
1076 *
1077 * If we are an AT bus machine, we must turn off the AST for a
1078 * delayed floating-point exception.
1079 *
1080 * If we are providing floating-point emulation, we may have
1081 * to retrieve the real register values from the floating point
1082 * emulator.
1083 */
1084 void
1085 i386_exception(
1086 int exc,
1087 int code,
1088 int subcode)
1089 {
1090 exception_data_type_t codes[EXCEPTION_CODE_MAX];
1091
1092 codes[0] = code; /* new exception interface */
1093 codes[1] = subcode;
1094 exception_triage(exc, codes, 2);
1095 /*NOTREACHED*/
1096 }
1097
1098
1099 void
1100 kernel_preempt_check(void)
1101 {
1102 ast_t *myast;
1103 boolean_t intr;
1104
1105 /*
1106 * disable interrupts to both prevent pre-emption
1107 * and to keep the ast state from changing via
1108 * an interrupt handler making something runnable
1109 */
1110 intr = ml_set_interrupts_enabled(FALSE);
1111
1112 myast = ast_pending();
1113
1114 if ((*myast & AST_URGENT) && intr == TRUE && get_interrupt_level() == 0) {
1115 /*
1116 * can handle interrupts and preemptions
1117 * at this point
1118 */
1119 ml_set_interrupts_enabled(intr);
1120
1121 /*
1122 * now cause the PRE-EMPTION trap
1123 */
1124 __asm__ volatile (" int $0xff");
1125 } else {
1126 /*
1127 * if interrupts were already disabled or
1128 * we're in an interrupt context, we can't
1129 * preempt... of course if AST_URGENT
1130 * isn't set we also don't want to
1131 */
1132 ml_set_interrupts_enabled(intr);
1133 }
1134 }
1135
1136 #if MACH_KDB
1137
1138 extern void db_i386_state(x86_saved_state32_t *regs);
1139
1140 #include <ddb/db_output.h>
1141
1142 void
1143 db_i386_state(
1144 x86_saved_state32_t *regs)
1145 {
1146 db_printf("eip %8x\n", regs->eip);
1147 db_printf("trap %8x\n", regs->trapno);
1148 db_printf("err %8x\n", regs->err);
1149 db_printf("efl %8x\n", regs->efl);
1150 db_printf("ebp %8x\n", regs->ebp);
1151 db_printf("esp %8x\n", regs->cr2);
1152 db_printf("uesp %8x\n", regs->uesp);
1153 db_printf("cs %8x\n", regs->cs & 0xff);
1154 db_printf("ds %8x\n", regs->ds & 0xff);
1155 db_printf("es %8x\n", regs->es & 0xff);
1156 db_printf("fs %8x\n", regs->fs & 0xff);
1157 db_printf("gs %8x\n", regs->gs & 0xff);
1158 db_printf("ss %8x\n", regs->ss & 0xff);
1159 db_printf("eax %8x\n", regs->eax);
1160 db_printf("ebx %8x\n", regs->ebx);
1161 db_printf("ecx %8x\n", regs->ecx);
1162 db_printf("edx %8x\n", regs->edx);
1163 db_printf("esi %8x\n", regs->esi);
1164 db_printf("edi %8x\n", regs->edi);
1165 }
1166
1167 #endif /* MACH_KDB */
1168
1169 /* Synchronize a thread's i386_kernel_state (if any) with the given
1170 * i386_saved_state_t obtained from the trap/IPI handler; called in
1171 * kernel_trap() prior to entering the debugger, and when receiving
1172 * an "MP_KDP" IPI.
1173 */
1174
1175 void
1176 sync_iss_to_iks(x86_saved_state32_t *saved_state)
1177 {
1178 struct x86_kernel_state32 *iks;
1179 vm_offset_t kstack;
1180 boolean_t record_active_regs = FALSE;
1181
1182 if ((kstack = current_thread()->kernel_stack) != 0) {
1183 x86_saved_state32_t *regs;
1184
1185 regs = saved_state;
1186
1187 iks = STACK_IKS(kstack);
1188
1189 /*
1190 * Did we take the trap/interrupt in kernel mode?
1191 */
1192 if (regs == USER_REGS32(current_thread()))
1193 record_active_regs = TRUE;
1194 else {
1195 iks->k_ebx = regs->ebx;
1196 iks->k_esp = (int)regs;
1197 iks->k_ebp = regs->ebp;
1198 iks->k_edi = regs->edi;
1199 iks->k_esi = regs->esi;
1200 iks->k_eip = regs->eip;
1201 }
1202 }
1203
1204 if (record_active_regs == TRUE) {
1205 /*
1206 * Show the trap handler path
1207 */
1208 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1209 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1210 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1211 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1212 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1213 /*
1214 * "Current" instruction pointer
1215 */
1216 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1217 }
1218 }
1219
1220 /*
1221 * This is used by the NMI interrupt handler (from mp.c) to
1222 * uncondtionally sync the trap handler context to the IKS
1223 * irrespective of whether the NMI was fielded in kernel
1224 * or user space.
1225 */
1226 void
1227 sync_iss_to_iks_unconditionally(__unused x86_saved_state32_t *saved_state) {
1228 struct x86_kernel_state32 *iks;
1229 vm_offset_t kstack;
1230 boolean_t record_active_regs = FALSE;
1231
1232 if ((kstack = current_thread()->kernel_stack) != 0) {
1233
1234 iks = STACK_IKS(kstack);
1235 /*
1236 * Show the trap handler path
1237 */
1238 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1239 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1240 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1241 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1242 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1243 /*
1244 * "Current" instruction pointer
1245 */
1246 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1247
1248 }
1249 }