]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/trap.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / trap.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 /*
60 * Hardware trap/fault handler.
61 */
62
63 #include <mach_kdb.h>
64 #include <mach_kgdb.h>
65 #include <mach_kdp.h>
66 #include <mach_ldebug.h>
67
68 #include <types.h>
69 #include <i386/eflags.h>
70 #include <i386/trap.h>
71 #include <i386/pmap.h>
72 #include <i386/fpu.h>
73 #include <i386/misc_protos.h> /* panic_io_port_read() */
74
75 #include <mach/exception.h>
76 #include <mach/kern_return.h>
77 #include <mach/vm_param.h>
78 #include <mach/i386/thread_status.h>
79
80 #include <vm/vm_kern.h>
81 #include <vm/vm_fault.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/processor.h>
85 #include <kern/thread.h>
86 #include <kern/task.h>
87 #include <kern/sched.h>
88 #include <kern/sched_prim.h>
89 #include <kern/exception.h>
90 #include <kern/spl.h>
91 #include <kern/misc_protos.h>
92
93 #include <sys/kdebug.h>
94
95 #if MACH_KGDB
96 #include <kgdb/kgdb_defs.h>
97 #endif /* MACH_KGDB */
98
99 #if MACH_KDB
100 #include <debug.h>
101 #include <ddb/db_watch.h>
102 #include <ddb/db_run.h>
103 #include <ddb/db_break.h>
104 #include <ddb/db_trap.h>
105 #endif /* MACH_KDB */
106
107 #include <string.h>
108
109 #include <i386/postcode.h>
110 #include <i386/mp_desc.h>
111 #include <i386/proc_reg.h>
112 #include <i386/machine_check.h>
113 #include <mach/i386/syscall_sw.h>
114
115 /*
116 * Forward declarations
117 */
118 static void user_page_fault_continue(kern_return_t kret);
119 static void panic_trap(x86_saved_state32_t *saved_state);
120 static void set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip);
121
122 perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */
123 perfCallback perfASTHook = NULL; /* Pointer to CHUD AST hook routine */
124
125 #if CONFIG_DTRACE
126 /* See <rdar://problem/4613924> */
127 perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */
128
129 extern boolean_t dtrace_tally_fault(user_addr_t);
130 #endif
131
132 void
133 thread_syscall_return(
134 kern_return_t ret)
135 {
136 thread_t thr_act = current_thread();
137
138 if (thread_is_64bit(thr_act)) {
139 x86_saved_state64_t *regs;
140
141 regs = USER_REGS64(thr_act);
142
143 if (kdebug_enable && ((regs->rax & SYSCALL_CLASS_MASK) == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT))) {
144 /* Mach trap */
145 KERNEL_DEBUG_CONSTANT(
146 MACHDBG_CODE(DBG_MACH_EXCP_SC, ((int) (regs->rax & SYSCALL_NUMBER_MASK)))
147 | DBG_FUNC_END,
148 ret, 0, 0, 0, 0);
149 }
150 regs->rax = ret;
151
152 } else {
153 x86_saved_state32_t *regs;
154
155 regs = USER_REGS32(thr_act);
156
157 if (kdebug_enable && ((int) regs->eax < 0)) {
158 /* Mach trap */
159 KERNEL_DEBUG_CONSTANT(
160 MACHDBG_CODE(DBG_MACH_EXCP_SC, -((int) regs->eax))
161 | DBG_FUNC_END,
162 ret, 0, 0, 0, 0);
163 }
164 regs->eax = ret;
165 }
166 thread_exception_return();
167 /*NOTREACHED*/
168 }
169
170
171 #if MACH_KDB
172 boolean_t debug_all_traps_with_kdb = FALSE;
173 extern struct db_watchpoint *db_watchpoint_list;
174 extern boolean_t db_watchpoints_inserted;
175 extern boolean_t db_breakpoints_inserted;
176
177 void
178 thread_kdb_return(void)
179 {
180 thread_t thr_act = current_thread();
181 x86_saved_state_t *iss = USER_STATE(thr_act);
182
183 if (is_saved_state64(iss)) {
184 x86_saved_state64_t *regs;
185
186 regs = saved_state64(iss);
187
188 if (kdb_trap(regs->isf.trapno, (int)regs->isf.err, (void *)regs)) {
189 thread_exception_return();
190 /*NOTREACHED*/
191 }
192
193 } else {
194 x86_saved_state32_t *regs;
195
196 regs = saved_state32(iss);
197
198 if (kdb_trap(regs->trapno, regs->err, (void *)regs)) {
199 thread_exception_return();
200 /*NOTREACHED*/
201 }
202 }
203 }
204
205 #endif /* MACH_KDB */
206
207 void
208 user_page_fault_continue(
209 kern_return_t kr)
210 {
211 thread_t thread = current_thread();
212 ast_t *myast;
213 boolean_t intr;
214 user_addr_t vaddr;
215 #if MACH_KDB
216 x86_saved_state_t *regs = USER_STATE(thread);
217 int err;
218 int trapno;
219
220 assert((is_saved_state32(regs) && !thread_is_64bit(thread)) ||
221 (is_saved_state64(regs) && thread_is_64bit(thread)));
222 #endif
223
224 if (thread_is_64bit(thread)) {
225 x86_saved_state64_t *uregs;
226
227 uregs = USER_REGS64(thread);
228
229 #if MACH_KDB
230 trapno = uregs->isf.trapno;
231 err = uregs->isf.err;
232 #endif
233 vaddr = (user_addr_t)uregs->cr2;
234 } else {
235 x86_saved_state32_t *uregs;
236
237 uregs = USER_REGS32(thread);
238
239 #if MACH_KDB
240 trapno = uregs->trapno;
241 err = uregs->err;
242 #endif
243 vaddr = uregs->cr2;
244 }
245
246 if ((kr == KERN_SUCCESS) || (kr == KERN_ABORTED)) {
247 #if MACH_KDB
248 if (!db_breakpoints_inserted) {
249 db_set_breakpoints();
250 }
251 if (db_watchpoint_list &&
252 db_watchpoints_inserted &&
253 (err & T_PF_WRITE) &&
254 db_find_watchpoint(thread->map,
255 (vm_offset_t)vaddr,
256 saved_state32(regs)))
257 kdb_trap(T_WATCHPOINT, 0, saved_state32(regs));
258 #endif /* MACH_KDB */
259 intr = ml_set_interrupts_enabled(FALSE);
260 myast = ast_pending();
261 while (*myast & AST_ALL) {
262 ast_taken(AST_ALL, intr);
263 ml_set_interrupts_enabled(FALSE);
264 myast = ast_pending();
265 }
266 ml_set_interrupts_enabled(intr);
267
268 thread_exception_return();
269 /*NOTREACHED*/
270 }
271
272 #if MACH_KDB
273 if (debug_all_traps_with_kdb &&
274 kdb_trap(trapno, err, saved_state32(regs))) {
275 thread_exception_return();
276 /*NOTREACHED*/
277 }
278 #endif /* MACH_KDB */
279
280 i386_exception(EXC_BAD_ACCESS, kr, vaddr);
281 /*NOTREACHED*/
282 }
283
284 /*
285 * Fault recovery in copyin/copyout routines.
286 */
287 struct recovery {
288 uint32_t fault_addr;
289 uint32_t recover_addr;
290 };
291
292 extern struct recovery recover_table[];
293 extern struct recovery recover_table_end[];
294
295 const char * trap_type[] = {TRAP_NAMES};
296 unsigned TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]);
297
298
299 static inline void
300 reset_dr7(void)
301 {
302 uint32_t dr7 = 0x400; /* magic dr7 reset value */
303 __asm__ volatile("movl %0,%%dr7" : : "r" (dr7));
304 }
305 #if MACH_KDP
306 unsigned kdp_has_active_watchpoints = 0;
307 #endif
308 /*
309 * Trap from kernel mode. Only page-fault errors are recoverable,
310 * and then only in special circumstances. All other errors are
311 * fatal. Return value indicates if trap was handled.
312 */
313 void
314 kernel_trap(
315 x86_saved_state_t *state)
316 {
317 x86_saved_state32_t *saved_state;
318 int code;
319 user_addr_t vaddr;
320 int type;
321 vm_map_t map = 0; /* protected by T_PAGE_FAULT */
322 kern_return_t result = KERN_FAILURE;
323 thread_t thread;
324 ast_t *myast;
325 boolean_t intr;
326 vm_prot_t prot;
327 struct recovery *rp;
328 vm_offset_t kern_ip;
329 int fault_in_copy_window = -1;
330 int is_user = 0;
331 #if MACH_KDB
332 pt_entry_t *pte;
333 #endif /* MACH_KDB */
334
335 thread = current_thread();
336
337 if (is_saved_state64(state))
338 panic("kernel_trap(%p) with 64-bit state", state);
339 saved_state = saved_state32(state);
340
341 vaddr = (user_addr_t)saved_state->cr2;
342 type = saved_state->trapno;
343 code = saved_state->err & 0xffff;
344 intr = (saved_state->efl & EFL_IF) != 0; /* state of ints at trap */
345
346 kern_ip = (vm_offset_t)saved_state->eip;
347
348 myast = ast_pending();
349
350 if (perfASTHook) {
351 if (*myast & AST_CHUD_ALL)
352 perfASTHook(type, NULL, 0, 0);
353 } else
354 *myast &= ~AST_CHUD_ALL;
355
356 /*
357 * Is there a hook?
358 */
359 if (perfTrapHook) {
360 if (perfTrapHook(type, NULL, 0, 0) == KERN_SUCCESS) {
361 /*
362 * If it succeeds, we are done...
363 */
364 return;
365 }
366 }
367
368 #if CONFIG_DTRACE
369 if (tempDTraceTrapHook) {
370 if (tempDTraceTrapHook(type, state, 0, 0) == KERN_SUCCESS) {
371 /*
372 * If it succeeds, we are done...
373 */
374 return;
375 }
376 }
377 #endif /* CONFIG_DTRACE */
378
379 /*
380 * we come here with interrupts off as we don't want to recurse
381 * on preemption below. but we do want to re-enable interrupts
382 * as soon we possibly can to hold latency down
383 */
384 if (T_PREEMPT == type) {
385 ast_taken(AST_PREEMPTION, FALSE);
386
387 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
388 0, 0, 0, kern_ip, 0);
389 return;
390 }
391
392 if (T_PAGE_FAULT == type) {
393 /*
394 * assume we're faulting in the kernel map
395 */
396 map = kernel_map;
397
398 if (thread != THREAD_NULL && thread->map != kernel_map) {
399 vm_offset_t copy_window_base;
400 vm_offset_t kvaddr;
401 int window_index;
402
403 kvaddr = (vm_offset_t)vaddr;
404 /*
405 * must determine if fault occurred in
406 * the copy window while pre-emption is
407 * disabled for this processor so that
408 * we only need to look at the window
409 * associated with this processor
410 */
411 copy_window_base = current_cpu_datap()->cpu_copywindow_base;
412
413 if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS)) ) {
414
415 window_index = (kvaddr - copy_window_base) / NBPDE;
416
417 if (thread->machine.copy_window[window_index].user_base != (user_addr_t)-1) {
418
419 kvaddr -= (copy_window_base + (NBPDE * window_index));
420 vaddr = thread->machine.copy_window[window_index].user_base + kvaddr;
421
422 map = thread->map;
423 fault_in_copy_window = window_index;
424 }
425 is_user = -1;
426 }
427 }
428 }
429 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE,
430 (int)(vaddr >> 32), (int)vaddr, is_user, kern_ip, 0);
431
432
433 (void) ml_set_interrupts_enabled(intr);
434
435 switch (type) {
436
437 case T_NO_FPU:
438 fpnoextflt();
439 return;
440
441 case T_FPU_FAULT:
442 fpextovrflt();
443 return;
444
445 case T_FLOATING_POINT_ERROR:
446 fpexterrflt();
447 return;
448
449 case T_SSE_FLOAT_ERROR:
450 fpSSEexterrflt();
451 return;
452 case T_DEBUG:
453 #if MACH_KDP
454 if ((saved_state->efl & EFL_TF) == 0
455 && !kdp_has_active_watchpoints)
456 #else
457 if ((saved_state->efl & EFL_TF) == 0)
458 #endif
459 {
460 /* We've somehow encountered a debug
461 * register match that does not belong
462 * to the kernel debugger.
463 * This isn't supposed to happen.
464 */
465 reset_dr7();
466 return;
467 }
468 goto debugger_entry;
469 case T_PAGE_FAULT:
470 /*
471 * If the current map is a submap of the kernel map,
472 * and the address is within that map, fault on that
473 * map. If the same check is done in vm_fault
474 * (vm_map_lookup), we may deadlock on the kernel map
475 * lock.
476 */
477
478 prot = VM_PROT_READ;
479
480 if (code & T_PF_WRITE)
481 prot |= VM_PROT_WRITE;
482 #if PAE
483 if (code & T_PF_EXECUTE)
484 prot |= VM_PROT_EXECUTE;
485 #endif
486
487 #if MACH_KDB
488 /*
489 * Check for watchpoint on kernel static data.
490 * vm_fault would fail in this case
491 */
492 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted &&
493 (code & T_PF_WRITE) && vaddr < vm_map_max(map) &&
494 ((*(pte = pmap_pte(kernel_pmap, (vm_map_offset_t)vaddr))) & INTEL_PTE_WRITE) == 0) {
495 pmap_store_pte(
496 pte,
497 *pte | INTEL_PTE_VALID | INTEL_PTE_WRITE);
498 /* XXX need invltlb here? */
499
500 result = KERN_SUCCESS;
501 goto look_for_watchpoints;
502 }
503 #endif /* MACH_KDB */
504
505 #if CONFIG_DTRACE
506 if (thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */
507 if (dtrace_tally_fault(vaddr)) { /* Should a fault under dtrace be ignored? */
508 /*
509 * DTrace has "anticipated" the possibility of this fault, and has
510 * established the suitable recovery state. Drop down now into the
511 * recovery handling code in "case T_GENERAL_PROTECTION:".
512 */
513 goto FALL_THROUGH;
514 }
515 }
516 #endif /* CONFIG_DTRACE */
517
518 result = vm_fault(map,
519 vm_map_trunc_page(vaddr),
520 prot,
521 FALSE,
522 THREAD_UNINT, NULL, 0);
523
524 #if MACH_KDB
525 if (result == KERN_SUCCESS) {
526 /*
527 * Look for watchpoints
528 */
529 look_for_watchpoints:
530 if (map == kernel_map && db_watchpoint_list && db_watchpoints_inserted && (code & T_PF_WRITE) &&
531 db_find_watchpoint(map, vaddr, saved_state))
532 kdb_trap(T_WATCHPOINT, 0, saved_state);
533 }
534 #endif /* MACH_KDB */
535
536 if (result == KERN_SUCCESS) {
537
538 if (fault_in_copy_window != -1) {
539 pt_entry_t *updp;
540 pt_entry_t *kpdp;
541
542 /*
543 * in case there was no page table assigned
544 * for the user base address and the pmap
545 * got 'expanded' due to this fault, we'll
546 * copy in the descriptor
547 *
548 * we're either setting the page table descriptor
549 * to the same value or it was 0... no need
550 * for a TLB flush in either case
551 */
552
553 ml_set_interrupts_enabled(FALSE);
554 updp = pmap_pde(map->pmap, thread->machine.copy_window[fault_in_copy_window].user_base);
555 assert(updp);
556 if (0 == updp) panic("trap: updp 0"); /* XXX DEBUG */
557 kpdp = current_cpu_datap()->cpu_copywindow_pdp;
558 kpdp += fault_in_copy_window;
559
560 #if JOE_DEBUG
561 if (*kpdp && (*kpdp & PG_FRAME) != (*updp & PG_FRAME))
562 panic("kernel_fault: user pdp doesn't match - updp = 0x%x, kpdp = 0x%x\n", updp, kpdp);
563 #endif
564 pmap_store_pte(kpdp, *updp);
565
566 (void) ml_set_interrupts_enabled(intr);
567 }
568 return;
569 }
570 /*
571 * fall through
572 */
573 #if CONFIG_DTRACE
574 FALL_THROUGH:
575 #endif /* CONFIG_DTRACE */
576
577 case T_GENERAL_PROTECTION:
578 /*
579 * If there is a failure recovery address
580 * for this fault, go there.
581 */
582 for (rp = recover_table; rp < recover_table_end; rp++) {
583 if (kern_ip == rp->fault_addr) {
584 set_recovery_ip(saved_state, rp->recover_addr);
585 return;
586 }
587 }
588
589 /*
590 * Check thread recovery address also.
591 */
592 if (thread->recover) {
593 set_recovery_ip(saved_state, thread->recover);
594 thread->recover = 0;
595 return;
596 }
597 /*
598 * Unanticipated page-fault errors in kernel
599 * should not happen.
600 *
601 * fall through...
602 */
603
604 default:
605 /*
606 * Exception 15 is reserved but some chips may generate it
607 * spuriously. Seen at startup on AMD Athlon-64.
608 */
609 if (type == 15) {
610 kprintf("kernel_trap() ignoring spurious trap 15\n");
611 return;
612 }
613 debugger_entry:
614 /* Ensure that the i386_kernel_state at the base of the
615 * current thread's stack (if any) is synchronized with the
616 * context at the moment of the trap, to facilitate
617 * access through the debugger.
618 */
619 sync_iss_to_iks(saved_state);
620 #if MACH_KDB
621 restart_debugger:
622 #endif /* MACH_KDB */
623 #if MACH_KDP
624 if (current_debugger != KDB_CUR_DB) {
625 if (kdp_i386_trap(type, saved_state, result, vaddr))
626 return;
627 } else {
628 #endif /* MACH_KDP */
629 #if MACH_KDB
630 if (kdb_trap(type, code, saved_state)) {
631 if (switch_debugger) {
632 current_debugger = KDP_CUR_DB;
633 switch_debugger = 0;
634 goto restart_debugger;
635 }
636 return;
637 }
638 #endif /* MACH_KDB */
639 #if MACH_KDP
640 }
641 #endif
642 }
643
644 panic_trap(saved_state);
645 /*
646 * NO RETURN
647 */
648 }
649
650
651 static void
652 set_recovery_ip(x86_saved_state32_t *saved_state, vm_offset_t ip)
653 {
654 saved_state->eip = ip;
655 }
656
657
658 static void
659 panic_trap(x86_saved_state32_t *regs)
660 {
661 const char *trapname = "Unknown";
662 uint32_t cr0 = get_cr0();
663 uint32_t cr2 = get_cr2();
664 uint32_t cr3 = get_cr3();
665 uint32_t cr4 = get_cr4();
666
667 /*
668 * Issue an I/O port read if one has been requested - this is an
669 * event logic analyzers can use as a trigger point.
670 */
671 panic_io_port_read();
672
673 kprintf("panic trap number 0x%x, eip 0x%x\n", regs->trapno, regs->eip);
674 kprintf("cr0 0x%08x cr2 0x%08x cr3 0x%08x cr4 0x%08x\n",
675 cr0, cr2, cr3, cr4);
676
677 if (regs->trapno < TRAP_TYPES)
678 trapname = trap_type[regs->trapno];
679 #undef panic
680 panic("Kernel trap at 0x%08x, type %d=%s, registers:\n"
681 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
682 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
683 "CR2: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
684 "EFL: 0x%08x, EIP: 0x%08x, CS: 0x%08x, DS: 0x%08x\n"
685 "Error code: 0x%08x\n",
686 regs->eip, regs->trapno, trapname, cr0, cr2, cr3, cr4,
687 regs->eax,regs->ebx,regs->ecx,regs->edx,
688 regs->cr2,regs->ebp,regs->esi,regs->edi,
689 regs->efl,regs->eip,regs->cs, regs->ds, regs->err);
690 /*
691 * This next statement is not executed,
692 * but it's needed to stop the compiler using tail call optimization
693 * for the panic call - which confuses the subsequent backtrace.
694 */
695 cr0 = 0;
696 }
697
698 extern void kprintf_break_lock(void);
699
700
701 /*
702 * Called from locore on a special reserved stack after a double-fault
703 * is taken in kernel space.
704 * Kernel stack overflow is one route here.
705 */
706 void
707 panic_double_fault(
708 #if CONFIG_NO_PANIC_STRINGS
709 __unused int code
710 #else
711 int code
712 #endif
713 )
714 {
715 #if MACH_KDP || !CONFIG_NO_PANIC_STRINGS
716 struct i386_tss *my_ktss = current_ktss();
717 #endif
718
719 /* Set postcode (DEBUG only) */
720 postcode(PANIC_DOUBLE_FAULT);
721
722 /*
723 * Issue an I/O port read if one has been requested - this is an
724 * event logic analyzers can use as a trigger point.
725 */
726 panic_io_port_read();
727
728 /*
729 * Break kprintf lock in case of recursion,
730 * and record originally faulted instruction address.
731 */
732 kprintf_break_lock();
733
734 #if MACH_KDP
735 /*
736 * Print backtrace leading to first fault:
737 */
738 panic_i386_backtrace((void *) my_ktss->ebp, 10);
739 #endif
740
741 panic("Double fault at 0x%08x, thread:%p, code:0x%x, "
742 "registers:\n"
743 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
744 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
745 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
746 "EFL: 0x%08x, EIP: 0x%08x\n",
747 my_ktss->eip, current_thread(), code,
748 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
749 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
750 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
751 my_ktss->eflags, my_ktss->eip);
752 }
753
754
755 /*
756 * Called from locore on a special reserved stack after a machine-check
757 */
758 void
759 panic_machine_check(
760 #if CONFIG_NO_PANIC_STRINGS
761 __unused int code
762 #else
763 int code
764 #endif
765 )
766 {
767 #if !CONFIG_NO_PANIC_STRINGS
768 struct i386_tss *my_ktss = current_ktss();
769 #endif
770
771 /* Set postcode (DEBUG only) */
772 postcode(PANIC_MACHINE_CHECK);
773
774 /*
775 * Issue an I/O port read if one has been requested - this is an
776 * event logic analyzers can use as a trigger point.
777 */
778 panic_io_port_read();
779
780 /*
781 * Break kprintf lock in case of recursion,
782 * and record originally faulted instruction address.
783 */
784 kprintf_break_lock();
785
786 /*
787 * Dump the contents of the machine check MSRs (if any).
788 */
789 mca_dump();
790
791 /*
792 * And that's all folks, we don't attempt recovery...
793 */
794 panic("Machine-check at 0x%08x, thread:%p, code:0x%x, "
795 "registers:\n"
796 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
797 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
798 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
799 "EFL: 0x%08x, EIP: 0x%08x\n",
800 my_ktss->eip, current_thread(), code,
801 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
802 my_ktss->eax, my_ktss->ebx, my_ktss->ecx, my_ktss->edx,
803 my_ktss->esp, my_ktss->ebp, my_ktss->esi, my_ktss->edi,
804 my_ktss->eflags, my_ktss->eip);
805 }
806
807 void
808 panic_double_fault64(x86_saved_state_t *esp)
809 {
810 /* Set postcode (DEBUG only) */
811 postcode(PANIC_DOUBLE_FAULT);
812
813 /*
814 * Issue an I/O port read if one has been requested - this is an
815 * event logic analyzers can use as a trigger point.
816 */
817 panic_io_port_read();
818
819 /*
820 * Break kprintf lock in case of recursion,
821 * and record originally faulted instruction address.
822 */
823 kprintf_break_lock();
824
825 /*
826 * Dump the interrupt stack frame at last kernel entry.
827 */
828 if (is_saved_state64(esp)) {
829 #if !CONFIG_NO_PANIC_STRINGS
830 x86_saved_state64_t *ss64p = saved_state64(esp);
831 #endif
832 panic("Double fault thread:%p, trapno:0x%x, err:0x%qx, "
833 "registers:\n"
834 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
835 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
836 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
837 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
838 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
839 "RFL: 0x%016qx, RIP: 0x%016qx, CR2: 0x%016qx\n",
840 current_thread(), ss64p->isf.trapno, ss64p->isf.err,
841 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
842 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
843 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
844 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
845 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
846 ss64p->isf.rflags, ss64p->isf.rip, ss64p->cr2);
847 } else {
848 #if !CONFIG_NO_PANIC_STRINGS
849 x86_saved_state32_t *ss32p = saved_state32(esp);
850 #endif
851 panic("Double fault at 0x%08x, thread:%p, trapno:0x%x, err:0x%x),"
852 "registers:\n"
853 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
854 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
855 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
856 "EFL: 0x%08x, EIP: 0x%08x\n",
857 ss32p->eip, current_thread(), ss32p->trapno, ss32p->err,
858 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
859 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
860 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
861 ss32p->efl, ss32p->eip);
862 }
863 }
864
865 /*
866 * Machine check handler for 64-bit.
867 */
868 void
869 panic_machine_check64(x86_saved_state_t *esp)
870 {
871 /* Set postcode (DEBUG only) */
872 postcode(PANIC_MACHINE_CHECK);
873
874 /*
875 * Issue an I/O port read if one has been requested - this is an
876 * event logic analyzers can use as a trigger point.
877 */
878 panic_io_port_read();
879
880 /*
881 * Break kprintf lock in case of recursion,
882 * and record originally faulted instruction address.
883 */
884 kprintf_break_lock();
885
886 /*
887 * Dump the contents of the machine check MSRs (if any).
888 */
889 mca_dump();
890
891 /*
892 * And that's all folks, we don't attempt recovery...
893 */
894 if (is_saved_state64(esp)) {
895 #if !CONFIG_NO_PANIC_STRINGS
896 x86_saved_state64_t *ss64p = saved_state64(esp);
897 #endif
898 panic("Machine Check thread:%p, trapno:0x%x, err:0x%qx, "
899 "registers:\n"
900 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
901 "RAX: 0x%016qx, RBX: 0x%016qx, RCX: 0x%016qx, RDX: 0x%016qx\n"
902 "RSP: 0x%016qx, RBP: 0x%016qx, RSI: 0x%016qx, RDI: 0x%016qx\n"
903 "R8: 0x%016qx, R9: 0x%016qx, R10: 0x%016qx, R11: 0x%016qx\n"
904 "R12: 0x%016qx, R13: 0x%016qx, R14: 0x%016qx, R15: 0x%016qx\n"
905 "RFL: 0x%016qx, RIP: 0x%016qx\n",
906 current_thread(), ss64p->isf.trapno, ss64p->isf.err,
907 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
908 ss64p->rax, ss64p->rbx, ss64p->rcx, ss64p->rdx,
909 ss64p->isf.rsp, ss64p->rbp, ss64p->rsi, ss64p->rdi,
910 ss64p->r8, ss64p->r9, ss64p->r10, ss64p->r11,
911 ss64p->r12, ss64p->r13, ss64p->r14, ss64p->r15,
912 ss64p->isf.rflags, ss64p->isf.rip);
913 } else {
914 #if !CONFIG_NO_PANIC_STRINGS
915 x86_saved_state32_t *ss32p = saved_state32(esp);
916 #endif
917 panic("Machine Check at 0x%08x, thread:%p, trapno:0x%x, err:0x%x, "
918 "registers:\n"
919 "CR0: 0x%08x, CR2: 0x%08x, CR3: 0x%08x, CR4: 0x%08x\n"
920 "EAX: 0x%08x, EBX: 0x%08x, ECX: 0x%08x, EDX: 0x%08x\n"
921 "ESP: 0x%08x, EBP: 0x%08x, ESI: 0x%08x, EDI: 0x%08x\n"
922 "EFL: 0x%08x, EIP: 0x%08x\n",
923 ss32p->eip, current_thread(), ss32p->trapno, ss32p->err,
924 get_cr0(), get_cr2(), get_cr3(), get_cr4(),
925 ss32p->eax, ss32p->ebx, ss32p->ecx, ss32p->edx,
926 ss32p->uesp, ss32p->ebp, ss32p->esi, ss32p->edi,
927 ss32p->efl, ss32p->eip);
928 }
929 }
930
931 #if CONFIG_DTRACE
932 extern kern_return_t dtrace_user_probe(x86_saved_state_t *);
933 #endif
934
935 /*
936 * Trap from user mode.
937 */
938 void
939 user_trap(
940 x86_saved_state_t *saved_state)
941 {
942 int exc;
943 int err;
944 mach_exception_code_t code;
945 mach_exception_subcode_t subcode;
946 int type;
947 user_addr_t vaddr;
948 vm_prot_t prot;
949 thread_t thread = current_thread();
950 ast_t *myast;
951 kern_return_t kret;
952 user_addr_t rip;
953
954 assert((is_saved_state32(saved_state) && !thread_is_64bit(thread)) ||
955 (is_saved_state64(saved_state) && thread_is_64bit(thread)));
956
957 if (is_saved_state64(saved_state)) {
958 x86_saved_state64_t *regs;
959
960 regs = saved_state64(saved_state);
961
962 type = regs->isf.trapno;
963 err = regs->isf.err & 0xffff;
964 vaddr = (user_addr_t)regs->cr2;
965 rip = (user_addr_t)regs->isf.rip;
966 } else {
967 x86_saved_state32_t *regs;
968
969 regs = saved_state32(saved_state);
970
971 type = regs->trapno;
972 err = regs->err & 0xffff;
973 vaddr = (user_addr_t)regs->cr2;
974 rip = (user_addr_t)regs->eip;
975 }
976
977 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE,
978 (int)(vaddr>>32), (int)vaddr, (int)(rip>>32), (int)rip, 0);
979
980 code = 0;
981 subcode = 0;
982 exc = 0;
983
984 #if DEBUG_TRACE
985 kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n",
986 saved_state, type, vaddr);
987 #endif
988 myast = ast_pending();
989 if (perfASTHook) {
990 if (*myast & AST_CHUD_ALL) {
991 perfASTHook(type, saved_state, 0, 0);
992 }
993 } else {
994 *myast &= ~AST_CHUD_ALL;
995 }
996
997 /* Is there a hook? */
998 if (perfTrapHook) {
999 if (perfTrapHook(type, saved_state, 0, 0) == KERN_SUCCESS)
1000 return; /* If it succeeds, we are done... */
1001 }
1002
1003 /*
1004 * DTrace does not consume all user traps, only INT_3's for now.
1005 * Avoid needlessly calling tempDTraceTrapHook here, and let the
1006 * INT_3 case handle them.
1007 */
1008
1009 switch (type) {
1010
1011 case T_DIVIDE_ERROR:
1012 exc = EXC_ARITHMETIC;
1013 code = EXC_I386_DIV;
1014 break;
1015
1016 case T_DEBUG:
1017 {
1018 pcb_t pcb;
1019 unsigned int clear = 0;
1020 /*
1021 * get dr6 and set it in the thread's pcb before
1022 * returning to userland
1023 */
1024 pcb = thread->machine.pcb;
1025 if (pcb->ids) {
1026 /*
1027 * We can get and set the status register
1028 * in 32-bit mode even on a 64-bit thread
1029 * because the high order bits are not
1030 * used on x86_64
1031 */
1032 if (thread_is_64bit(thread)) {
1033 uint32_t dr6;
1034 x86_debug_state64_t *ids = pcb->ids;
1035 dr6 = (uint32_t)ids->dr6;
1036 __asm__ volatile ("movl %%db6, %0" : "=r" (dr6));
1037 ids->dr6 = dr6;
1038 } else { /* 32 bit thread */
1039 x86_debug_state32_t *ids = pcb->ids;
1040 __asm__ volatile ("movl %%db6, %0" : "=r" (ids->dr6));
1041 }
1042 __asm__ volatile ("movl %0, %%db6" : : "r" (clear));
1043 }
1044 exc = EXC_BREAKPOINT;
1045 code = EXC_I386_SGL;
1046 break;
1047 }
1048 case T_INT3:
1049 #if CONFIG_DTRACE
1050 if (dtrace_user_probe(saved_state) == KERN_SUCCESS)
1051 return; /* If it succeeds, we are done... */
1052 #endif
1053 exc = EXC_BREAKPOINT;
1054 code = EXC_I386_BPT;
1055 break;
1056
1057 case T_OVERFLOW:
1058 exc = EXC_ARITHMETIC;
1059 code = EXC_I386_INTO;
1060 break;
1061
1062 case T_OUT_OF_BOUNDS:
1063 exc = EXC_SOFTWARE;
1064 code = EXC_I386_BOUND;
1065 break;
1066
1067 case T_INVALID_OPCODE:
1068 exc = EXC_BAD_INSTRUCTION;
1069 code = EXC_I386_INVOP;
1070 break;
1071
1072 case T_NO_FPU:
1073 fpnoextflt();
1074 return;
1075
1076 case T_FPU_FAULT:
1077 fpextovrflt(); /* Propagates exception directly, doesn't return */
1078 return;
1079
1080 case T_INVALID_TSS: /* invalid TSS == iret with NT flag set */
1081 exc = EXC_BAD_INSTRUCTION;
1082 code = EXC_I386_INVTSSFLT;
1083 subcode = err;
1084 break;
1085
1086 case T_SEGMENT_NOT_PRESENT:
1087 exc = EXC_BAD_INSTRUCTION;
1088 code = EXC_I386_SEGNPFLT;
1089 subcode = err;
1090 break;
1091
1092 case T_STACK_FAULT:
1093 exc = EXC_BAD_INSTRUCTION;
1094 code = EXC_I386_STKFLT;
1095 subcode = err;
1096 break;
1097
1098 case T_GENERAL_PROTECTION:
1099 /*
1100 * There's a wide range of circumstances which generate this
1101 * class of exception. From user-space, many involve bad
1102 * addresses (such as a non-canonical 64-bit address).
1103 * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV).
1104 * The trouble is cr2 doesn't contain the faulting address;
1105 * we'd need to decode the faulting instruction to really
1106 * determine this. We'll leave that to debuggers.
1107 * However, attempted execution of privileged instructions
1108 * (e.g. cli) also generate GP faults and so we map these to
1109 * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than
1110 * EXC_BAD_INSTRUCTION which is more accurate. We just can't
1111 * win!
1112 */
1113 exc = EXC_BAD_ACCESS;
1114 code = EXC_I386_GPFLT;
1115 subcode = err;
1116 break;
1117
1118 case T_PAGE_FAULT:
1119 prot = VM_PROT_READ;
1120
1121 if (err & T_PF_WRITE)
1122 prot |= VM_PROT_WRITE;
1123 #if PAE
1124 if (err & T_PF_EXECUTE)
1125 prot |= VM_PROT_EXECUTE;
1126 #endif
1127 kret = vm_fault(thread->map, vm_map_trunc_page(vaddr),
1128 prot, FALSE,
1129 THREAD_ABORTSAFE, NULL, 0);
1130
1131 user_page_fault_continue(kret);
1132
1133 /* NOTREACHED */
1134 break;
1135
1136 case T_SSE_FLOAT_ERROR:
1137 fpSSEexterrflt(); /* Propagates exception directly, doesn't return */
1138 return;
1139
1140
1141 case T_FLOATING_POINT_ERROR:
1142 fpexterrflt(); /* Propagates exception directly, doesn't return */
1143 return;
1144
1145 case T_DTRACE_RET:
1146 #if CONFIG_DTRACE
1147 if (dtrace_user_probe(saved_state) == KERN_SUCCESS)
1148 return; /* If it succeeds, we are done... */
1149 #endif
1150 /*
1151 * If we get an INT 0x7f when we do not expect to,
1152 * treat it as an illegal instruction
1153 */
1154 exc = EXC_BAD_INSTRUCTION;
1155 code = EXC_I386_INVOP;
1156 break;
1157
1158 default:
1159 #if MACH_KGDB
1160 Debugger("Unanticipated user trap");
1161 return;
1162 #endif /* MACH_KGDB */
1163 #if MACH_KDB
1164 if (kdb_trap(type, err, saved_state32(saved_state)))
1165 return;
1166 #endif /* MACH_KDB */
1167 panic("Unexpected user trap, type %d", type);
1168 return;
1169 }
1170 /* Note: Codepaths that directly return from user_trap() have pending
1171 * ASTs processed in locore
1172 */
1173 i386_exception(exc, code, subcode);
1174 /* NOTREACHED */
1175 }
1176
1177
1178 /*
1179 * Handle AST traps for i386.
1180 * Check for delayed floating-point exception from
1181 * AT-bus machines.
1182 */
1183
1184 extern void log_thread_action (thread_t, char *);
1185
1186 void
1187 i386_astintr(int preemption)
1188 {
1189 ast_t mask = AST_ALL;
1190 spl_t s;
1191
1192 if (preemption)
1193 mask = AST_PREEMPTION;
1194
1195 s = splsched();
1196
1197 ast_taken(mask, s);
1198
1199 splx(s);
1200 }
1201
1202 /*
1203 * Handle exceptions for i386.
1204 *
1205 * If we are an AT bus machine, we must turn off the AST for a
1206 * delayed floating-point exception.
1207 *
1208 * If we are providing floating-point emulation, we may have
1209 * to retrieve the real register values from the floating point
1210 * emulator.
1211 */
1212 void
1213 i386_exception(
1214 int exc,
1215 mach_exception_code_t code,
1216 mach_exception_subcode_t subcode)
1217 {
1218 mach_exception_data_type_t codes[EXCEPTION_CODE_MAX];
1219
1220 codes[0] = code; /* new exception interface */
1221 codes[1] = subcode;
1222 exception_triage(exc, codes, 2);
1223 /*NOTREACHED*/
1224 }
1225
1226
1227 void
1228 kernel_preempt_check(void)
1229 {
1230 ast_t *myast;
1231 boolean_t intr;
1232
1233 /*
1234 * disable interrupts to both prevent pre-emption
1235 * and to keep the ast state from changing via
1236 * an interrupt handler making something runnable
1237 */
1238 intr = ml_set_interrupts_enabled(FALSE);
1239
1240 myast = ast_pending();
1241
1242 if ((*myast & AST_URGENT) && intr == TRUE && get_interrupt_level() == 0) {
1243 /*
1244 * can handle interrupts and preemptions
1245 * at this point
1246 */
1247 ml_set_interrupts_enabled(intr);
1248
1249 /*
1250 * now cause the PRE-EMPTION trap
1251 */
1252 __asm__ volatile (" int $0xff");
1253 } else {
1254 /*
1255 * if interrupts were already disabled or
1256 * we're in an interrupt context, we can't
1257 * preempt... of course if AST_URGENT
1258 * isn't set we also don't want to
1259 */
1260 ml_set_interrupts_enabled(intr);
1261 }
1262 }
1263
1264 #if MACH_KDB
1265
1266 extern void db_i386_state(x86_saved_state32_t *regs);
1267
1268 #include <ddb/db_output.h>
1269
1270 void
1271 db_i386_state(
1272 x86_saved_state32_t *regs)
1273 {
1274 db_printf("eip %8x\n", regs->eip);
1275 db_printf("trap %8x\n", regs->trapno);
1276 db_printf("err %8x\n", regs->err);
1277 db_printf("efl %8x\n", regs->efl);
1278 db_printf("ebp %8x\n", regs->ebp);
1279 db_printf("esp %8x\n", regs->cr2);
1280 db_printf("uesp %8x\n", regs->uesp);
1281 db_printf("cs %8x\n", regs->cs & 0xff);
1282 db_printf("ds %8x\n", regs->ds & 0xff);
1283 db_printf("es %8x\n", regs->es & 0xff);
1284 db_printf("fs %8x\n", regs->fs & 0xff);
1285 db_printf("gs %8x\n", regs->gs & 0xff);
1286 db_printf("ss %8x\n", regs->ss & 0xff);
1287 db_printf("eax %8x\n", regs->eax);
1288 db_printf("ebx %8x\n", regs->ebx);
1289 db_printf("ecx %8x\n", regs->ecx);
1290 db_printf("edx %8x\n", regs->edx);
1291 db_printf("esi %8x\n", regs->esi);
1292 db_printf("edi %8x\n", regs->edi);
1293 }
1294
1295 #endif /* MACH_KDB */
1296
1297 /* Synchronize a thread's i386_kernel_state (if any) with the given
1298 * i386_saved_state_t obtained from the trap/IPI handler; called in
1299 * kernel_trap() prior to entering the debugger, and when receiving
1300 * an "MP_KDP" IPI.
1301 */
1302
1303 void
1304 sync_iss_to_iks(x86_saved_state32_t *saved_state)
1305 {
1306 struct x86_kernel_state32 *iks;
1307 vm_offset_t kstack;
1308 boolean_t record_active_regs = FALSE;
1309
1310 if ((kstack = current_thread()->kernel_stack) != 0) {
1311 x86_saved_state32_t *regs;
1312
1313 regs = saved_state;
1314
1315 iks = STACK_IKS(kstack);
1316
1317 /*
1318 * Did we take the trap/interrupt in kernel mode?
1319 */
1320 if (regs == USER_REGS32(current_thread()))
1321 record_active_regs = TRUE;
1322 else {
1323 iks->k_ebx = regs->ebx;
1324 iks->k_esp = (int)regs;
1325 iks->k_ebp = regs->ebp;
1326 iks->k_edi = regs->edi;
1327 iks->k_esi = regs->esi;
1328 iks->k_eip = regs->eip;
1329 }
1330 }
1331
1332 if (record_active_regs == TRUE) {
1333 /*
1334 * Show the trap handler path
1335 */
1336 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1337 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1338 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1339 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1340 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1341 /*
1342 * "Current" instruction pointer
1343 */
1344 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1345 }
1346 }
1347
1348 /*
1349 * This is used by the NMI interrupt handler (from mp.c) to
1350 * uncondtionally sync the trap handler context to the IKS
1351 * irrespective of whether the NMI was fielded in kernel
1352 * or user space.
1353 */
1354 void
1355 sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) {
1356 struct x86_kernel_state32 *iks;
1357 vm_offset_t kstack;
1358
1359 if ((kstack = current_thread()->kernel_stack) != 0) {
1360 iks = STACK_IKS(kstack);
1361 /*
1362 * Display the trap handler path.
1363 */
1364 __asm__ volatile("movl %%ebx, %0" : "=m" (iks->k_ebx));
1365 __asm__ volatile("movl %%esp, %0" : "=m" (iks->k_esp));
1366 __asm__ volatile("movl %%ebp, %0" : "=m" (iks->k_ebp));
1367 __asm__ volatile("movl %%edi, %0" : "=m" (iks->k_edi));
1368 __asm__ volatile("movl %%esi, %0" : "=m" (iks->k_esi));
1369 /*
1370 * "Current" instruction pointer.
1371 */
1372 __asm__ volatile("movl $1f, %0\n1:" : "=m" (iks->k_eip));
1373 }
1374 }