2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Hardware trap/fault handler.
64 #include <mach_ldebug.h>
67 #include <i386/eflags.h>
68 #include <i386/trap.h>
69 #include <i386/pmap.h>
71 #include <i386/panic_notify.h>
72 #include <i386/lapic.h>
74 #include <mach/exception.h>
75 #include <mach/kern_return.h>
76 #include <mach/vm_param.h>
77 #include <mach/i386/thread_status.h>
79 #include <vm/vm_kern.h>
80 #include <vm/vm_fault.h>
82 #include <kern/kern_types.h>
83 #include <kern/processor.h>
84 #include <kern/thread.h>
85 #include <kern/task.h>
86 #include <kern/sched.h>
87 #include <kern/sched_prim.h>
88 #include <kern/exception.h>
90 #include <kern/misc_protos.h>
91 #include <kern/debug.h>
93 #include <kern/telemetry.h>
95 #include <sys/kdebug.h>
96 #include <kperf/kperf.h>
97 #include <prng/random.h>
98 #include <prng/entropy.h>
102 #include <i386/postcode.h>
103 #include <i386/mp_desc.h>
104 #include <i386/proc_reg.h>
105 #include <i386/machine_routines.h>
107 #include <i386/machine_check.h>
109 #include <mach/i386/syscall_sw.h>
111 #include <libkern/OSDebug.h>
112 #include <i386/cpu_threads.h>
113 #include <machine/pal_routines.h>
115 extern void throttle_lowpri_io(int);
116 extern void kprint_state(x86_saved_state64_t
*saved_state
);
117 #if DEVELOPMENT || DEBUG
118 int insnstream_force_cacheline_mismatch
= 0;
119 extern int panic_on_cacheline_mismatch
;
120 extern char panic_on_trap_procname
[];
121 extern uint32_t panic_on_trap_mask
;
124 extern int insn_copyin_count
;
127 * Forward declarations
129 static void panic_trap(x86_saved_state64_t
*saved_state
, uint32_t pl
, kern_return_t fault_result
) __dead2
;
130 static void set_recovery_ip(x86_saved_state64_t
*saved_state
, vm_offset_t ip
);
131 #if DEVELOPMENT || DEBUG
132 static __attribute__((noinline
)) void copy_instruction_stream(thread_t thread
, uint64_t rip
, int trap_code
, bool inspect_cacheline
);
134 static __attribute__((noinline
)) void copy_instruction_stream(thread_t thread
, uint64_t rip
, int trap_code
);
138 /* See <rdar://problem/4613924> */
139 perfCallback tempDTraceTrapHook
= NULL
; /* Pointer to DTrace fbt trap hook routine */
141 extern boolean_t
dtrace_tally_fault(user_addr_t
);
142 extern boolean_t
dtrace_handle_trap(int, x86_saved_state_t
*);
146 extern char * proc_name_address(void *p
);
147 #endif /* MACH_BSD */
149 extern boolean_t pmap_smep_enabled
;
150 extern boolean_t pmap_smap_enabled
;
152 __attribute__((noreturn
))
154 thread_syscall_return(
157 thread_t thr_act
= current_thread();
161 pal_register_cache_state(thr_act
, DIRTY
);
163 if (thread_is_64bit_addr(thr_act
)) {
164 x86_saved_state64_t
*regs
;
166 regs
= USER_REGS64(thr_act
);
168 code
= (int) (regs
->rax
& SYSCALL_NUMBER_MASK
);
169 is_mach
= (regs
->rax
& SYSCALL_CLASS_MASK
)
170 == (SYSCALL_CLASS_MACH
<< SYSCALL_CLASS_SHIFT
);
171 if (kdebug_enable
&& is_mach
) {
173 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
174 MACHDBG_CODE(DBG_MACH_EXCP_SC
, code
) | DBG_FUNC_END
,
180 DEBUG_KPRINT_SYSCALL_MACH(
181 "thread_syscall_return: 64-bit mach ret=%u\n",
184 DEBUG_KPRINT_SYSCALL_UNIX(
185 "thread_syscall_return: 64-bit unix ret=%u\n",
190 x86_saved_state32_t
*regs
;
192 regs
= USER_REGS32(thr_act
);
194 code
= ((int) regs
->eax
);
195 is_mach
= (code
< 0);
196 if (kdebug_enable
&& is_mach
) {
198 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
199 MACHDBG_CODE(DBG_MACH_EXCP_SC
, -code
) | DBG_FUNC_END
,
205 DEBUG_KPRINT_SYSCALL_MACH(
206 "thread_syscall_return: 32-bit mach ret=%u\n",
209 DEBUG_KPRINT_SYSCALL_UNIX(
210 "thread_syscall_return: 32-bit unix ret=%u\n",
216 #if DEBUG || DEVELOPMENT
217 kern_allocation_name_t
218 prior __assert_only
= thread_get_kernel_state(thr_act
)->allocation_name
;
219 assertf(prior
== NULL
, "thread_set_allocation_name(\"%s\") not cleared", kern_allocation_get_name(prior
));
220 #endif /* DEBUG || DEVELOPMENT */
222 throttle_lowpri_io(1);
224 thread_exception_return();
229 * Fault recovery in copyin/copyout routines.
232 uintptr_t fault_addr
;
233 uintptr_t recover_addr
;
236 extern struct recovery recover_table
[];
237 extern struct recovery recover_table_end
[];
239 const char * trap_type
[] = {TRAP_NAMES
};
240 unsigned TRAP_TYPES
= sizeof(trap_type
) / sizeof(trap_type
[0]);
242 extern void PE_incoming_interrupt(int interrupt
);
244 #if defined(__x86_64__) && DEBUG
246 kprint_state(x86_saved_state64_t
*saved_state
)
248 kprintf("current_cpu_datap() 0x%lx\n", (uintptr_t)current_cpu_datap());
249 kprintf("Current GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_GS_BASE
));
250 kprintf("Kernel GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_KERNEL_GS_BASE
));
251 kprintf("state at 0x%lx:\n", (uintptr_t) saved_state
);
253 kprintf(" rdi 0x%llx\n", saved_state
->rdi
);
254 kprintf(" rsi 0x%llx\n", saved_state
->rsi
);
255 kprintf(" rdx 0x%llx\n", saved_state
->rdx
);
256 kprintf(" r10 0x%llx\n", saved_state
->r10
);
257 kprintf(" r8 0x%llx\n", saved_state
->r8
);
258 kprintf(" r9 0x%llx\n", saved_state
->r9
);
260 kprintf(" cr2 0x%llx\n", saved_state
->cr2
);
261 kprintf("real cr2 0x%lx\n", get_cr2());
262 kprintf(" r15 0x%llx\n", saved_state
->r15
);
263 kprintf(" r14 0x%llx\n", saved_state
->r14
);
264 kprintf(" r13 0x%llx\n", saved_state
->r13
);
265 kprintf(" r12 0x%llx\n", saved_state
->r12
);
266 kprintf(" r11 0x%llx\n", saved_state
->r11
);
267 kprintf(" rbp 0x%llx\n", saved_state
->rbp
);
268 kprintf(" rbx 0x%llx\n", saved_state
->rbx
);
269 kprintf(" rcx 0x%llx\n", saved_state
->rcx
);
270 kprintf(" rax 0x%llx\n", saved_state
->rax
);
272 kprintf(" gs 0x%x\n", saved_state
->gs
);
273 kprintf(" fs 0x%x\n", saved_state
->fs
);
275 kprintf(" isf.trapno 0x%x\n", saved_state
->isf
.trapno
);
276 kprintf(" isf._pad 0x%x\n", saved_state
->isf
._pad
);
277 kprintf(" isf.trapfn 0x%llx\n", saved_state
->isf
.trapfn
);
278 kprintf(" isf.err 0x%llx\n", saved_state
->isf
.err
);
279 kprintf(" isf.rip 0x%llx\n", saved_state
->isf
.rip
);
280 kprintf(" isf.cs 0x%llx\n", saved_state
->isf
.cs
);
281 kprintf(" isf.rflags 0x%llx\n", saved_state
->isf
.rflags
);
282 kprintf(" isf.rsp 0x%llx\n", saved_state
->isf
.rsp
);
283 kprintf(" isf.ss 0x%llx\n", saved_state
->isf
.ss
);
289 * Non-zero indicates latency assert is enabled and capped at valued
290 * absolute time units.
293 uint64_t interrupt_latency_cap
= 0;
294 boolean_t ilat_assert
= FALSE
;
297 interrupt_latency_tracker_setup(void)
299 uint32_t ilat_cap_us
;
300 if (PE_parse_boot_argn("interrupt_latency_cap_us", &ilat_cap_us
, sizeof(ilat_cap_us
))) {
301 interrupt_latency_cap
= ilat_cap_us
* NSEC_PER_USEC
;
302 nanoseconds_to_absolutetime(interrupt_latency_cap
, &interrupt_latency_cap
);
304 interrupt_latency_cap
= LockTimeOut
;
306 PE_parse_boot_argn("-interrupt_latency_assert_enable", &ilat_assert
, sizeof(ilat_assert
));
310 interrupt_reset_latency_stats(void)
313 for (i
= 0; i
< real_ncpus
; i
++) {
314 cpu_data_ptr
[i
]->cpu_max_observed_int_latency
=
315 cpu_data_ptr
[i
]->cpu_max_observed_int_latency_vector
= 0;
320 interrupt_populate_latency_stats(char *buf
, unsigned bufsize
)
322 uint32_t i
, tcpu
= ~0;
323 uint64_t cur_max
= 0;
325 for (i
= 0; i
< real_ncpus
; i
++) {
326 if (cur_max
< cpu_data_ptr
[i
]->cpu_max_observed_int_latency
) {
327 cur_max
= cpu_data_ptr
[i
]->cpu_max_observed_int_latency
;
332 if (tcpu
< real_ncpus
) {
333 snprintf(buf
, bufsize
, "0x%x 0x%x 0x%llx", tcpu
, cpu_data_ptr
[tcpu
]->cpu_max_observed_int_latency_vector
, cpu_data_ptr
[tcpu
]->cpu_max_observed_int_latency
);
337 uint32_t interrupt_timer_coalescing_enabled
= 1;
338 uint64_t interrupt_coalesced_timers
;
342 * - local APIC interrupts (IPIs, timers, etc) are handled by the kernel,
343 * - device interrupts go to the platform expert.
346 interrupt(x86_saved_state_t
*state
)
351 boolean_t user_mode
= FALSE
;
353 int cnum
= cpu_number();
354 cpu_data_t
*cdp
= cpu_data_ptr
[cnum
];
355 int itype
= DBG_INTR_TYPE_UNKNOWN
;
358 x86_saved_state64_t
*state64
= saved_state64(state
);
359 rip
= state64
->isf
.rip
;
360 rsp
= state64
->isf
.rsp
;
361 interrupt_num
= state64
->isf
.trapno
;
362 if (state64
->isf
.cs
& 0x03) {
366 #if DEVELOPMENT || DEBUG
367 uint64_t frameptr
= is_saved_state64(state
) ? state64
->rbp
: saved_state32(state
)->ebp
;
368 uint32_t traptrace_index
= traptrace_start(interrupt_num
, rip
, mach_absolute_time(), frameptr
);
371 if (cpu_data_ptr
[cnum
]->lcpu
.package
->num_idle
== topoParms
.nLThreadsPerPackage
) {
372 cpu_data_ptr
[cnum
]->cpu_hwIntpexits
[interrupt_num
]++;
375 if (interrupt_num
== (LAPIC_DEFAULT_INTERRUPT_BASE
+ LAPIC_INTERPROCESSOR_INTERRUPT
)) {
376 itype
= DBG_INTR_TYPE_IPI
;
377 } else if (interrupt_num
== (LAPIC_DEFAULT_INTERRUPT_BASE
+ LAPIC_TIMER_INTERRUPT
)) {
378 itype
= DBG_INTR_TYPE_TIMER
;
380 itype
= DBG_INTR_TYPE_OTHER
;
383 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
384 MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_START
,
386 (user_mode
? rip
: VM_KERNEL_UNSLIDE(rip
)),
387 user_mode
, itype
, 0);
389 SCHED_STATS_INC(interrupt_count
);
392 if (telemetry_needs_record
) {
393 telemetry_mark_curthread(user_mode
, FALSE
);
397 ipl
= get_preemption_level();
400 * Handle local APIC interrupts
401 * else call platform expert for devices.
403 handled
= lapic_interrupt(interrupt_num
, state
);
406 if (interrupt_num
== (LAPIC_DEFAULT_INTERRUPT_BASE
+ LAPIC_CMCI_INTERRUPT
)) {
408 * CMCI can be signalled on any logical processor, and the kexts
409 * that implement handling CMCI use IOKit to register handlers for
410 * the CMCI vector, so if we see a CMCI, do not encode a CPU
411 * number in bits 8:31 (since the vector is the same regardless of
414 PE_incoming_interrupt(interrupt_num
);
415 } else if (cnum
<= lapic_max_interrupt_cpunum
) {
416 PE_incoming_interrupt((cnum
<< 8) | interrupt_num
);
420 if (__improbable(get_preemption_level() != ipl
)) {
421 panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x\n", interrupt_num
, ipl
, get_preemption_level());
425 if (__improbable(cdp
->cpu_nested_istack
)) {
426 cdp
->cpu_nested_istack_events
++;
428 uint64_t ctime
= mach_absolute_time();
429 uint64_t int_latency
= ctime
- cdp
->cpu_int_event_time
;
430 uint64_t esdeadline
, ehdeadline
;
431 /* Attempt to process deferred timers in the context of
432 * this interrupt, unless interrupt time has already exceeded
433 * TCOAL_ILAT_THRESHOLD.
435 #define TCOAL_ILAT_THRESHOLD (30000ULL)
437 if ((int_latency
< TCOAL_ILAT_THRESHOLD
) &&
438 interrupt_timer_coalescing_enabled
) {
439 esdeadline
= cdp
->rtclock_timer
.queue
.earliest_soft_deadline
;
440 ehdeadline
= cdp
->rtclock_timer
.deadline
;
441 if ((ctime
>= esdeadline
) && (ctime
< ehdeadline
)) {
442 interrupt_coalesced_timers
++;
443 TCOAL_DEBUG(0x88880000 | DBG_FUNC_START
, ctime
, esdeadline
, ehdeadline
, interrupt_coalesced_timers
, 0);
445 TCOAL_DEBUG(0x88880000 | DBG_FUNC_END
, ctime
, esdeadline
, interrupt_coalesced_timers
, 0, 0);
447 TCOAL_DEBUG(0x77770000, ctime
, cdp
->rtclock_timer
.queue
.earliest_soft_deadline
, cdp
->rtclock_timer
.deadline
, interrupt_coalesced_timers
, 0);
451 if (__improbable(ilat_assert
&& (int_latency
> interrupt_latency_cap
) && !machine_timeout_suspended())) {
452 panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num
, int_latency
, cdp
->cpu_prior_signals
, cdp
->cpu_signals
);
455 if (__improbable(int_latency
> cdp
->cpu_max_observed_int_latency
)) {
456 cdp
->cpu_max_observed_int_latency
= int_latency
;
457 cdp
->cpu_max_observed_int_latency_vector
= interrupt_num
;
462 * Having serviced the interrupt first, look at the interrupted stack depth.
465 uint64_t depth
= cdp
->cpu_kernel_stack
466 + sizeof(struct thread_kernel_state
)
467 + sizeof(struct i386_exception_link
*)
469 if (__improbable(depth
> kernel_stack_depth_max
)) {
470 kernel_stack_depth_max
= (vm_offset_t
)depth
;
471 KERNEL_DEBUG_CONSTANT(
472 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_STACK_DEPTH
),
473 (long) depth
, (long) VM_KERNEL_UNSLIDE(rip
), 0, 0, 0);
477 if (cnum
== master_cpu
) {
485 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCP_INTR
, 0) | DBG_FUNC_END
,
488 assert(ml_get_interrupts_enabled() == FALSE
);
490 #if DEVELOPMENT || DEBUG
491 if (traptrace_index
!= TRAPTRACE_INVALID_INDEX
) {
492 traptrace_end(traptrace_index
, mach_absolute_time());
500 long dr7
= 0x400; /* magic dr7 reset value; 32 bit on i386, 64 bit on x86_64 */
501 __asm__
volatile ("mov %0,%%dr7" : : "r" (dr7
));
504 unsigned kdp_has_active_watchpoints
= 0;
505 #define NO_WATCHPOINTS (!kdp_has_active_watchpoints)
507 #define NO_WATCHPOINTS 1
510 * Trap from kernel mode. Only page-fault errors are recoverable,
511 * and then only in special circumstances. All other errors are
512 * fatal. Return value indicates if trap was handled.
517 x86_saved_state_t
*state
,
520 x86_saved_state64_t
*saved_state
;
524 vm_map_t map
= 0; /* protected by T_PAGE_FAULT */
525 kern_return_t result
= KERN_FAILURE
;
526 kern_return_t fault_result
= KERN_SUCCESS
;
533 int trap_pl
= get_preemption_level();
535 thread
= current_thread();
537 if (__improbable(is_saved_state32(state
))) {
538 panic("kernel_trap(%p) with 32-bit state", state
);
540 saved_state
= saved_state64(state
);
542 /* Record cpu where state was captured */
543 saved_state
->isf
.cpu
= cpu_number();
545 vaddr
= (user_addr_t
)saved_state
->cr2
;
546 type
= saved_state
->isf
.trapno
;
547 code
= (int)(saved_state
->isf
.err
& 0xffff);
548 intr
= (saved_state
->isf
.rflags
& EFL_IF
) != 0; /* state of ints at trap */
549 kern_ip
= (vm_offset_t
)saved_state
->isf
.rip
;
551 is_user
= (vaddr
< VM_MAX_USER_PAGE_ADDRESS
);
553 #if DEVELOPMENT || DEBUG
554 uint32_t traptrace_index
= traptrace_start(type
, kern_ip
, mach_absolute_time(), saved_state
->rbp
);
559 * Is there a DTrace hook?
561 if (__improbable(tempDTraceTrapHook
!= NULL
)) {
562 if (tempDTraceTrapHook(type
, state
, lo_spp
, 0) == KERN_SUCCESS
) {
564 * If it succeeds, we are done...
570 /* Handle traps originated from probe context. */
571 if (thread
!= THREAD_NULL
&& thread
->t_dtrace_inprobe
) {
572 if (dtrace_handle_trap(type
, state
)) {
577 #endif /* CONFIG_DTRACE */
580 * we come here with interrupts off as we don't want to recurse
581 * on preemption below. but we do want to re-enable interrupts
582 * as soon we possibly can to hold latency down
584 if (__improbable(T_PREEMPT
== type
)) {
587 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
588 (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
589 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip
), 0);
594 user_addr_t kd_vaddr
= is_user
? vaddr
: VM_KERNEL_UNSLIDE(vaddr
);
595 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
596 (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86
, type
)) | DBG_FUNC_NONE
,
597 (unsigned)(kd_vaddr
>> 32), (unsigned)kd_vaddr
, is_user
,
598 VM_KERNEL_UNSLIDE(kern_ip
), 0);
601 if (T_PAGE_FAULT
== type
) {
603 * assume we're faulting in the kernel map
607 if (__probable((thread
!= THREAD_NULL
) && (thread
->map
!= kernel_map
) &&
608 (vaddr
< VM_MAX_USER_PAGE_ADDRESS
))) {
609 /* fault occurred in userspace */
612 /* Intercept a potential Supervisor Mode Execute
613 * Protection fault. These criteria identify
614 * both NX faults and SMEP faults, but both
615 * are fatal. We avoid checking PTEs (racy).
616 * (The VM could just redrive a SMEP fault, hence
619 if (__improbable((code
== (T_PF_PROT
| T_PF_EXECUTE
)) &&
620 (pmap_smep_enabled
) && (saved_state
->isf
.rip
== vaddr
))) {
625 * Additionally check for SMAP faults...
626 * which are characterized by page-present and
627 * the AC bit unset (i.e. not from copyin/out path).
629 if (__improbable(code
& T_PF_PROT
&&
631 (saved_state
->isf
.rflags
& EFL_AC
) == 0)) {
636 * If we're not sharing cr3 with the user
637 * and we faulted in copyio,
638 * then switch cr3 here and dismiss the fault.
641 (thread
->machine
.specFlags
& CopyIOActive
) &&
642 map
->pmap
->pm_cr3
!= get_cr3_base()) {
643 pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled
== FALSE
);
644 set_cr3_raw(map
->pmap
->pm_cr3
);
647 if (__improbable(vaddr
< PAGE_SIZE
) &&
648 ((thread
->machine
.specFlags
& CopyIOActive
) == 0)) {
654 (void) ml_set_interrupts_enabled(intr
);
665 case T_FLOATING_POINT_ERROR
:
669 case T_SSE_FLOAT_ERROR
:
673 case T_INVALID_OPCODE
:
678 if ((saved_state
->isf
.rflags
& EFL_TF
) == 0 && NO_WATCHPOINTS
) {
679 /* We've somehow encountered a debug
680 * register match that does not belong
681 * to the kernel debugger.
682 * This isn't supposed to happen.
693 if (thread
!= THREAD_NULL
&& thread
->t_dtrace_inprobe
) { /* Executing under dtrace_probe? */
694 if (dtrace_tally_fault(vaddr
)) { /* Should a fault under dtrace be ignored? */
696 * DTrace has "anticipated" the possibility of this fault, and has
697 * established the suitable recovery state. Drop down now into the
698 * recovery handling code in "case T_GENERAL_PROTECTION:".
703 #endif /* CONFIG_DTRACE */
707 if (code
& T_PF_WRITE
) {
708 prot
|= VM_PROT_WRITE
;
710 if (code
& T_PF_EXECUTE
) {
711 prot
|= VM_PROT_EXECUTE
;
714 fault_result
= result
= vm_fault(map
,
717 FALSE
, VM_KERN_MEMORY_NONE
,
718 THREAD_UNINT
, NULL
, 0);
720 if (result
== KERN_SUCCESS
) {
728 #endif /* CONFIG_DTRACE */
730 case T_GENERAL_PROTECTION
:
732 * If there is a failure recovery address
733 * for this fault, go there.
735 for (rp
= recover_table
; rp
< recover_table_end
; rp
++) {
736 if (kern_ip
== rp
->fault_addr
) {
737 set_recovery_ip(saved_state
, rp
->recover_addr
);
743 * Check thread recovery address also.
745 if (thread
!= THREAD_NULL
&& thread
->recover
) {
746 set_recovery_ip(saved_state
, thread
->recover
);
751 * Unanticipated page-fault errors in kernel
759 * Exception 15 is reserved but some chips may generate it
760 * spuriously. Seen at startup on AMD Athlon-64.
763 kprintf("kernel_trap() ignoring spurious trap 15\n");
767 /* Ensure that the i386_kernel_state at the base of the
768 * current thread's stack (if any) is synchronized with the
769 * context at the moment of the trap, to facilitate
770 * access through the debugger.
772 sync_iss_to_iks(state
);
774 if (kdp_i386_trap(type
, saved_state
, result
, (vm_offset_t
)vaddr
)) {
780 panic_trap(saved_state
, trap_pl
, fault_result
);
786 #if DEVELOPMENT || DEBUG
787 if (traptrace_index
!= TRAPTRACE_INVALID_INDEX
) {
788 traptrace_end(traptrace_index
, mach_absolute_time());
795 set_recovery_ip(x86_saved_state64_t
*saved_state
, vm_offset_t ip
)
797 saved_state
->isf
.rip
= ip
;
801 panic_trap(x86_saved_state64_t
*regs
, uint32_t pl
, kern_return_t fault_result
)
803 const char *trapname
= "Unknown";
804 pal_cr_t cr0
, cr2
, cr3
, cr4
;
805 boolean_t potential_smep_fault
= FALSE
, potential_kernel_NX_fault
= FALSE
;
806 boolean_t potential_smap_fault
= FALSE
;
808 pal_get_control_registers( &cr0
, &cr2
, &cr3
, &cr4
);
809 assert(ml_get_interrupts_enabled() == FALSE
);
810 current_cpu_datap()->cpu_fatal_trap_state
= regs
;
812 * Issue an I/O port read if one has been requested - this is an
813 * event logic analyzers can use as a trigger point.
817 kprintf("CPU %d panic trap number 0x%x, rip 0x%016llx\n",
818 cpu_number(), regs
->isf
.trapno
, regs
->isf
.rip
);
819 kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n",
822 if (regs
->isf
.trapno
< TRAP_TYPES
) {
823 trapname
= trap_type
[regs
->isf
.trapno
];
826 if ((regs
->isf
.trapno
== T_PAGE_FAULT
) && (regs
->isf
.err
== (T_PF_PROT
| T_PF_EXECUTE
)) && (regs
->isf
.rip
== regs
->cr2
)) {
827 if (pmap_smep_enabled
&& (regs
->isf
.rip
< VM_MAX_USER_PAGE_ADDRESS
)) {
828 potential_smep_fault
= TRUE
;
829 } else if (regs
->isf
.rip
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
830 potential_kernel_NX_fault
= TRUE
;
832 } else if (pmap_smap_enabled
&&
833 regs
->isf
.trapno
== T_PAGE_FAULT
&&
834 regs
->isf
.err
& T_PF_PROT
&&
835 regs
->cr2
< VM_MAX_USER_PAGE_ADDRESS
&&
836 regs
->isf
.rip
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
) {
837 potential_smap_fault
= TRUE
;
841 panic("Kernel trap at 0x%016llx, type %d=%s, registers:\n"
842 "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n"
843 "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n"
844 "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n"
845 "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n"
846 "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n"
847 "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n"
848 "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d, VF: %d\n",
849 regs
->isf
.rip
, regs
->isf
.trapno
, trapname
,
851 regs
->rax
, regs
->rbx
, regs
->rcx
, regs
->rdx
,
852 regs
->isf
.rsp
, regs
->rbp
, regs
->rsi
, regs
->rdi
,
853 regs
->r8
, regs
->r9
, regs
->r10
, regs
->r11
,
854 regs
->r12
, regs
->r13
, regs
->r14
, regs
->r15
,
855 regs
->isf
.rflags
, regs
->isf
.rip
, regs
->isf
.cs
& 0xFFFF,
856 regs
->isf
.ss
& 0xFFFF, regs
->cr2
, regs
->isf
.err
, regs
->isf
.cpu
,
857 virtualized
? " VMM" : "",
858 potential_kernel_NX_fault
? " Kernel NX fault" : "",
859 potential_smep_fault
? " SMEP/User NX fault" : "",
860 potential_smap_fault
? " SMAP fault" : "",
866 extern kern_return_t
dtrace_user_probe(x86_saved_state_t
*);
871 uint32_t fsigns
, fsigcs
;
875 * Trap from user mode.
879 x86_saved_state_t
*saved_state
)
883 mach_exception_code_t code
;
884 mach_exception_subcode_t subcode
;
888 thread_t thread
= current_thread();
891 unsigned long dr6
= 0; /* 32 bit for i386, 64 bit for x86_64 */
892 int current_cpu
= cpu_number();
893 #if DEVELOPMENT || DEBUG
894 bool inspect_cacheline
= false;
895 uint32_t traptrace_index
;
897 assert((is_saved_state32(saved_state
) && !thread_is_64bit_addr(thread
)) ||
898 (is_saved_state64(saved_state
) && thread_is_64bit_addr(thread
)));
900 if (is_saved_state64(saved_state
)) {
901 x86_saved_state64_t
*regs
;
903 regs
= saved_state64(saved_state
);
905 /* Record cpu where state was captured */
906 regs
->isf
.cpu
= current_cpu
;
908 type
= regs
->isf
.trapno
;
909 err
= (int)regs
->isf
.err
& 0xffff;
910 vaddr
= (user_addr_t
)regs
->cr2
;
911 rip
= (user_addr_t
)regs
->isf
.rip
;
912 #if DEVELOPMENT || DEBUG
913 traptrace_index
= traptrace_start(type
, rip
, mach_absolute_time(), regs
->rbp
);
916 x86_saved_state32_t
*regs
;
918 regs
= saved_state32(saved_state
);
920 /* Record cpu where state was captured */
921 regs
->cpu
= current_cpu
;
924 err
= regs
->err
& 0xffff;
925 vaddr
= (user_addr_t
)regs
->cr2
;
926 rip
= (user_addr_t
)regs
->eip
;
927 #if DEVELOPMENT || DEBUG
928 traptrace_index
= traptrace_start(type
, rip
, mach_absolute_time(), regs
->ebp
);
932 #if DEVELOPMENT || DEBUG
934 * Copy the cacheline of code into the thread's instruction stream save area
935 * before enabling interrupts (the assumption is that we have not otherwise faulted or
936 * trapped since the original cache line stores). If the saved code is not valid,
937 * we'll catch it below when we process the copyin() for unhandled faults.
939 if (type
== T_PAGE_FAULT
|| type
== T_INVALID_OPCODE
|| type
== T_GENERAL_PROTECTION
) {
940 #define CACHELINE_SIZE 64
941 THREAD_TO_PCB(thread
)->insn_cacheline
[CACHELINE_SIZE
] = (uint8_t)(rip
& (CACHELINE_SIZE
- 1));
942 bcopy(&cpu_shadowp(current_cpu
)->cpu_rtimes
[0],
943 &THREAD_TO_PCB(thread
)->insn_cacheline
[0],
944 sizeof(THREAD_TO_PCB(thread
)->insn_cacheline
) - 1);
945 inspect_cacheline
= true;
949 if (type
== T_DEBUG
) {
950 if (thread
->machine
.ids
) {
951 unsigned long clear
= 0;
952 /* Stash and clear this processor's DR6 value, in the event
953 * this was a debug register match
955 __asm__
volatile ("mov %%db6, %0" : "=r" (dr6
));
956 __asm__
volatile ("mov %0, %%db6" : : "r" (clear
));
958 /* [Re]Enable LBRs *BEFORE* enabling interrupts to ensure we hit the right CPU */
964 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
965 (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86
, type
)) | DBG_FUNC_NONE
,
966 (unsigned)(vaddr
>> 32), (unsigned)vaddr
,
967 (unsigned)(rip
>> 32), (unsigned)rip
, 0);
975 * DTrace does not consume all user traps, only INT_3's for now.
976 * Avoid needlessly calling tempDTraceTrapHook here, and let the
977 * INT_3 case handle them.
981 DEBUG_KPRINT_SYSCALL_MASK(1,
982 "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n",
983 type
, trap_type
[type
], err
, (void *)(long) vaddr
, (void *)(long) rip
);
987 exc
= EXC_ARITHMETIC
;
995 * Update the PCB with this processor's DR6 value
996 * in the event this was a debug register match.
998 pcb
= THREAD_TO_PCB(thread
);
1001 * We can get and set the status register
1002 * in 32-bit mode even on a 64-bit thread
1003 * because the high order bits are not
1006 if (thread_is_64bit_addr(thread
)) {
1007 x86_debug_state64_t
*ids
= pcb
->ids
;
1009 } else { /* 32 bit thread */
1010 x86_debug_state32_t
*ids
= pcb
->ids
;
1011 ids
->dr6
= (uint32_t) dr6
;
1014 exc
= EXC_BREAKPOINT
;
1015 code
= EXC_I386_SGL
;
1020 if (dtrace_user_probe(saved_state
) == KERN_SUCCESS
) {
1021 return; /* If it succeeds, we are done... */
1024 exc
= EXC_BREAKPOINT
;
1025 code
= EXC_I386_BPT
;
1029 exc
= EXC_ARITHMETIC
;
1030 code
= EXC_I386_INTO
;
1033 case T_OUT_OF_BOUNDS
:
1035 code
= EXC_I386_BOUND
;
1038 case T_INVALID_OPCODE
:
1039 if (fpUDflt(rip
) == 1) {
1040 exc
= EXC_BAD_INSTRUCTION
;
1041 code
= EXC_I386_INVOP
;
1054 exc
= EXC_BAD_ACCESS
;
1055 code
= VM_PROT_READ
| VM_PROT_EXECUTE
;
1059 case T_INVALID_TSS
: /* invalid TSS == iret with NT flag set */
1060 exc
= EXC_BAD_INSTRUCTION
;
1061 code
= EXC_I386_INVTSSFLT
;
1065 case T_SEGMENT_NOT_PRESENT
:
1066 exc
= EXC_BAD_INSTRUCTION
;
1067 code
= EXC_I386_SEGNPFLT
;
1072 exc
= EXC_BAD_INSTRUCTION
;
1073 code
= EXC_I386_STKFLT
;
1077 case T_GENERAL_PROTECTION
:
1079 * There's a wide range of circumstances which generate this
1080 * class of exception. From user-space, many involve bad
1081 * addresses (such as a non-canonical 64-bit address).
1082 * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV).
1083 * The trouble is cr2 doesn't contain the faulting address;
1084 * we'd need to decode the faulting instruction to really
1085 * determine this. We'll leave that to debuggers.
1086 * However, attempted execution of privileged instructions
1087 * (e.g. cli) also generate GP faults and so we map these to
1088 * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than
1089 * EXC_BAD_INSTRUCTION which is more accurate. We just can't
1092 exc
= EXC_BAD_ACCESS
;
1093 code
= EXC_I386_GPFLT
;
1099 prot
= VM_PROT_READ
;
1101 if (err
& T_PF_WRITE
) {
1102 prot
|= VM_PROT_WRITE
;
1104 if (__improbable(err
& T_PF_EXECUTE
)) {
1105 prot
|= VM_PROT_EXECUTE
;
1107 #if DEVELOPMENT || DEBUG
1109 fsig
= thread_fpsimd_hash(thread
);
1114 kret
= vm_fault(thread
->map
,
1116 prot
, FALSE
, VM_KERN_MEMORY_NONE
,
1117 THREAD_ABORTSAFE
, NULL
, 0);
1118 #if DEVELOPMENT || DEBUG
1120 uint32_t fsig2
= thread_fpsimd_hash(thread
);
1125 if (fsig
!= fsig2
) {
1126 panic("FP/SIMD state hash mismatch across fault thread: %p 0x%x->0x%x", thread
, fsig
, fsig2
);
1134 if (__probable((kret
== KERN_SUCCESS
) || (kret
== KERN_ABORTED
))) {
1136 } else if (__improbable(kret
== KERN_FAILURE
)) {
1138 * For a user trap, vm_fault() should never return KERN_FAILURE.
1139 * If it does, we're leaking preemption disables somewhere in the kernel.
1141 panic("vm_fault() KERN_FAILURE from user fault on thread %p", thread
);
1144 /* PAL debug hook (empty on x86) */
1145 pal_dbg_page_fault(thread
, vaddr
, kret
);
1146 exc
= EXC_BAD_ACCESS
;
1152 case T_SSE_FLOAT_ERROR
:
1154 exc
= EXC_ARITHMETIC
;
1155 code
= EXC_I386_SSEEXTERR
;
1156 subcode
= ((struct x86_fx_thread_state
*)thread
->machine
.ifps
)->fx_MXCSR
;
1160 case T_FLOATING_POINT_ERROR
:
1162 exc
= EXC_ARITHMETIC
;
1163 code
= EXC_I386_EXTERR
;
1164 subcode
= ((struct x86_fx_thread_state
*)thread
->machine
.ifps
)->fx_status
;
1169 if (dtrace_user_probe(saved_state
) == KERN_SUCCESS
) {
1170 return; /* If it succeeds, we are done... */
1174 * If we get an INT 0x7f when we do not expect to,
1175 * treat it as an illegal instruction
1177 exc
= EXC_BAD_INSTRUCTION
;
1178 code
= EXC_I386_INVOP
;
1182 panic("Unexpected user trap, type %d", type
);
1189 if (is_saved_state64(saved_state
)) {
1190 cs
= saved_state64(saved_state
)->isf
.cs
;
1192 cs
= saved_state32(saved_state
)->cs
;
1195 if (last_branch_support_enabled
) {
1196 intrs
= ml_set_interrupts_enabled(FALSE
);
1198 * This is a bit racy (it's possible for this thread to migrate to another CPU, then
1199 * migrate back, but that seems rather rare in practice), but good enough to ensure
1200 * the LBRs are saved before proceeding with exception/signal dispatch.
1202 if (current_cpu
== cpu_number()) {
1203 i386_lbr_synch(thread
);
1205 ml_set_interrupts_enabled(intrs
);
1209 * Do not try to copyin from the instruction stream if the page fault was due
1210 * to an access to rip and was unhandled.
1211 * Do not deal with cases when %cs != USER[64]_CS
1212 * And of course there's no need to copy the instruction stream if the boot-arg
1215 if (insn_copyin_count
> 0 &&
1216 (cs
== USER64_CS
|| cs
== USER_CS
) && (type
!= T_PAGE_FAULT
|| vaddr
!= rip
)) {
1217 #if DEVELOPMENT || DEBUG
1218 copy_instruction_stream(thread
, rip
, type
, inspect_cacheline
);
1220 copy_instruction_stream(thread
, rip
, type
);
1224 #if DEVELOPMENT || DEBUG
1225 if (traptrace_index
!= TRAPTRACE_INVALID_INDEX
) {
1226 traptrace_end(traptrace_index
, mach_absolute_time());
1230 * Note: Codepaths that directly return from user_trap() have pending
1231 * ASTs processed in locore
1233 i386_exception(exc
, code
, subcode
);
1236 #if DEVELOPMENT || DEBUG
1237 if (traptrace_index
!= TRAPTRACE_INVALID_INDEX
) {
1238 traptrace_end(traptrace_index
, mach_absolute_time());
1245 * Copyin up to x86_INSTRUCTION_STATE_MAX_INSN_BYTES bytes from the page that includes `rip`,
1246 * ensuring that we stay on the same page, clipping the start or end, as needed.
1247 * Add the clipped amount back at the start or end, depending on where it fits.
1248 * Consult the variable populated by the boot-arg `insn_capcnt'
1250 static __attribute__((noinline
)) void
1251 copy_instruction_stream(thread_t thread
, uint64_t rip
, int __unused trap_code
1252 #if DEVELOPMENT || DEBUG
1253 , bool inspect_cacheline
1257 #if x86_INSTRUCTION_STATE_MAX_INSN_BYTES > 4096
1258 #error x86_INSTRUCTION_STATE_MAX_INSN_BYTES cannot exceed a page in size.
1260 pcb_t pcb
= THREAD_TO_PCB(thread
);
1261 vm_map_offset_t pagemask
= ~vm_map_page_mask(current_map());
1262 vm_map_offset_t rip_page
= rip
& pagemask
;
1263 vm_map_offset_t start_addr
;
1264 vm_map_offset_t insn_offset
;
1265 vm_map_offset_t end_addr
= rip
+ (insn_copyin_count
/ 2);
1268 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1272 #if DEVELOPMENT || DEBUG
1273 assert(insn_copyin_count
<= x86_INSTRUCTION_STATE_MAX_INSN_BYTES
);
1275 if (insn_copyin_count
> x86_INSTRUCTION_STATE_MAX_INSN_BYTES
||
1276 insn_copyin_count
< 64 /* CACHELINE_SIZE */) {
1281 #pragma clang diagnostic push
1282 #pragma clang diagnostic ignored "-Walloca"
1283 stack_buffer
= __builtin_alloca(insn_copyin_count
);
1284 #pragma clang diagnostic pop
1286 if (rip
>= (insn_copyin_count
/ 2)) {
1287 start_addr
= rip
- (insn_copyin_count
/ 2);
1292 if (start_addr
< rip_page
) {
1293 insn_offset
= (insn_copyin_count
/ 2) - (rip_page
- start_addr
);
1294 end_addr
+= (rip_page
- start_addr
);
1295 start_addr
= rip_page
;
1296 } else if (end_addr
>= (rip_page
+ (~pagemask
+ 1))) {
1297 start_addr
-= (end_addr
- (rip_page
+ (~pagemask
+ 1))); /* Adjust start address backward */
1298 /* Adjust instruction offset due to start address change */
1299 insn_offset
= (insn_copyin_count
/ 2) + (end_addr
- (rip_page
+ (~pagemask
+ 1)));
1300 end_addr
= rip_page
+ (~pagemask
+ 1); /* clip to the start of the next page (non-inclusive */
1302 insn_offset
= insn_copyin_count
/ 2;
1305 disable_preemption(); /* Prevent copyin from faulting in the instruction stream */
1307 #if DEVELOPMENT || DEBUG
1308 (insnstream_force_cacheline_mismatch
< 2) &&
1310 ((end_addr
> start_addr
) && (copyin_err
= copyin(start_addr
, stack_buffer
, end_addr
- start_addr
)) == 0)) {
1311 enable_preemption();
1313 if (pcb
->insn_state
== 0) {
1314 pcb
->insn_state
= kalloc(sizeof(x86_instruction_state_t
));
1317 if (pcb
->insn_state
!= 0) {
1318 bcopy(stack_buffer
, pcb
->insn_state
->insn_bytes
, end_addr
- start_addr
);
1319 bzero(&pcb
->insn_state
->insn_bytes
[end_addr
- start_addr
],
1320 insn_copyin_count
- (end_addr
- start_addr
));
1322 pcb
->insn_state
->insn_stream_valid_bytes
= (int)(end_addr
- start_addr
);
1323 pcb
->insn_state
->insn_offset
= (int)insn_offset
;
1325 #if DEVELOPMENT || DEBUG
1326 /* Now try to validate the cacheline we read at early-fault time matches the code
1327 * copied in. Before we do that, we have to make sure the buffer contains a valid
1328 * cacheline by looking for the 2 sentinel values written in the event the cacheline
1329 * could not be copied.
1331 #define CACHELINE_DATA_NOT_PRESENT 0xdeadc0debeefcafeULL
1332 #define CACHELINE_MASK (CACHELINE_SIZE - 1)
1334 if (inspect_cacheline
&&
1335 (*(uint64_t *)(uintptr_t)&pcb
->insn_cacheline
[0] != CACHELINE_DATA_NOT_PRESENT
&&
1336 *(uint64_t *)(uintptr_t)&pcb
->insn_cacheline
[8] != CACHELINE_DATA_NOT_PRESENT
)) {
1338 * The position of the cacheline in the instruction buffer is at offset
1339 * insn_offset - (rip & CACHELINE_MASK)
1341 if (__improbable((rip
& CACHELINE_MASK
) > insn_offset
)) {
1342 printf("thread %p code cacheline @ %p clipped wrt copied-in code (offset %d)\n",
1343 thread
, (void *)(rip
& ~CACHELINE_MASK
), (int)(rip
& CACHELINE_MASK
));
1344 } else if (bcmp(&pcb
->insn_state
->insn_bytes
[insn_offset
- (rip
& CACHELINE_MASK
)],
1345 &pcb
->insn_cacheline
[0], CACHELINE_SIZE
) != 0
1346 || insnstream_force_cacheline_mismatch
1348 #if x86_INSTRUCTION_STATE_CACHELINE_SIZE != CACHELINE_SIZE
1349 #error cacheline size mismatch
1351 bcopy(&pcb
->insn_cacheline
[0], &pcb
->insn_state
->insn_cacheline
[0],
1352 x86_INSTRUCTION_STATE_CACHELINE_SIZE
);
1353 /* Mark the instruction stream as being out-of-synch */
1354 pcb
->insn_state
->out_of_synch
= 1;
1356 printf("thread %p code cacheline @ %p mismatches with copied-in code [trap 0x%x]\n",
1357 thread
, (void *)(rip
& ~CACHELINE_MASK
), trap_code
);
1358 for (int i
= 0; i
< 8; i
++) {
1359 printf("\t[%d] cl=0x%08llx vs. ci=0x%08llx\n", i
, *(uint64_t *)(uintptr_t)&pcb
->insn_cacheline
[i
* 8],
1360 *(uint64_t *)(uintptr_t)&pcb
->insn_state
->insn_bytes
[(i
* 8) + insn_offset
- (rip
& CACHELINE_MASK
)]);
1362 if (panic_on_cacheline_mismatch
) {
1363 panic("Cacheline mismatch while processing unhandled exception.");
1366 printf("thread %p code cacheline @ %p DOES match with copied-in code\n",
1367 thread
, (void *)(rip
& ~CACHELINE_MASK
));
1368 pcb
->insn_state
->out_of_synch
= 0;
1370 } else if (inspect_cacheline
) {
1371 printf("thread %p could not capture code cacheline at fault IP %p [offset %d]\n",
1372 (void *)thread
, (void *)rip
, (int)(insn_offset
- (rip
& CACHELINE_MASK
)));
1373 pcb
->insn_state
->out_of_synch
= 0;
1376 pcb
->insn_state
->out_of_synch
= 0;
1377 #endif /* DEVELOPMENT || DEBUG */
1379 #if defined(MACH_BSD) && (DEVELOPMENT || DEBUG)
1380 if (panic_on_trap_procname
[0] != 0) {
1381 char procnamebuf
[65] = {0};
1383 if (thread
->task
->bsd_info
!= NULL
) {
1384 procname
= proc_name_address(thread
->task
->bsd_info
);
1385 strlcpy(procnamebuf
, procname
, sizeof(procnamebuf
));
1387 if (strcasecmp(panic_on_trap_procname
, procnamebuf
) == 0 &&
1388 ((1U << trap_code
) & panic_on_trap_mask
) != 0) {
1389 panic("Panic requested on trap type 0x%x for process `%s'", trap_code
,
1390 panic_on_trap_procname
);
1395 #endif /* MACH_BSD && (DEVELOPMENT || DEBUG) */
1398 enable_preemption();
1400 pcb
->insn_state_copyin_failure_errorcode
= copyin_err
;
1401 #if DEVELOPMENT || DEBUG
1402 if (inspect_cacheline
&& pcb
->insn_state
== 0) {
1403 pcb
->insn_state
= kalloc(sizeof(x86_instruction_state_t
));
1405 if (pcb
->insn_state
!= 0) {
1406 pcb
->insn_state
->insn_stream_valid_bytes
= 0;
1407 pcb
->insn_state
->insn_offset
= 0;
1409 if (inspect_cacheline
&&
1410 (*(uint64_t *)(uintptr_t)&pcb
->insn_cacheline
[0] != CACHELINE_DATA_NOT_PRESENT
&&
1411 *(uint64_t *)(uintptr_t)&pcb
->insn_cacheline
[8] != CACHELINE_DATA_NOT_PRESENT
)) {
1413 * We can still copy the cacheline into the instruction state structure
1414 * if it contains valid data
1416 pcb
->insn_state
->out_of_synch
= 1;
1417 bcopy(&pcb
->insn_cacheline
[0], &pcb
->insn_state
->insn_cacheline
[0],
1418 x86_INSTRUCTION_STATE_CACHELINE_SIZE
);
1421 #endif /* DEVELOPMENT || DEBUG */
1426 * Handle exceptions for i386.
1428 * If we are an AT bus machine, we must turn off the AST for a
1429 * delayed floating-point exception.
1431 * If we are providing floating-point emulation, we may have
1432 * to retrieve the real register values from the floating point
1438 mach_exception_code_t code
,
1439 mach_exception_subcode_t subcode
)
1441 mach_exception_data_type_t codes
[EXCEPTION_CODE_MAX
];
1443 DEBUG_KPRINT_SYSCALL_MACH("i386_exception: exc=%d code=0x%llx subcode=0x%llx\n",
1444 exc
, code
, subcode
);
1445 codes
[0] = code
; /* new exception interface */
1447 exception_triage(exc
, codes
, 2);
1452 /* Synchronize a thread's x86_kernel_state (if any) with the given
1453 * x86_saved_state_t obtained from the trap/IPI handler; called in
1454 * kernel_trap() prior to entering the debugger, and when receiving
1455 * an "MP_KDP" IPI. Called with null saved_state if an incoming IPI
1456 * was detected from the kernel while spinning with interrupts masked.
1460 sync_iss_to_iks(x86_saved_state_t
*saved_state
)
1462 struct x86_kernel_state
*iks
= NULL
;
1464 boolean_t record_active_regs
= FALSE
;
1466 /* The PAL may have a special way to sync registers */
1467 if (saved_state
&& saved_state
->flavor
== THREAD_STATE_NONE
) {
1468 pal_get_kern_regs( saved_state
);
1471 if (current_thread() != NULL
&&
1472 (kstack
= current_thread()->kernel_stack
) != 0) {
1473 x86_saved_state64_t
*regs
= saved_state64(saved_state
);
1475 iks
= STACK_IKS(kstack
);
1477 /* Did we take the trap/interrupt in kernel mode? */
1478 if (saved_state
== NULL
|| /* NULL => polling in kernel */
1479 regs
== USER_REGS64(current_thread())) {
1480 record_active_regs
= TRUE
;
1482 iks
->k_rbx
= regs
->rbx
;
1483 iks
->k_rsp
= regs
->isf
.rsp
;
1484 iks
->k_rbp
= regs
->rbp
;
1485 iks
->k_r12
= regs
->r12
;
1486 iks
->k_r13
= regs
->r13
;
1487 iks
->k_r14
= regs
->r14
;
1488 iks
->k_r15
= regs
->r15
;
1489 iks
->k_rip
= regs
->isf
.rip
;
1493 if (record_active_regs
== TRUE
) {
1494 /* Show the trap handler path */
1495 __asm__
volatile ("movq %%rbx, %0" : "=m" (iks
->k_rbx
));
1496 __asm__
volatile ("movq %%rsp, %0" : "=m" (iks
->k_rsp
));
1497 __asm__
volatile ("movq %%rbp, %0" : "=m" (iks
->k_rbp
));
1498 __asm__
volatile ("movq %%r12, %0" : "=m" (iks
->k_r12
));
1499 __asm__
volatile ("movq %%r13, %0" : "=m" (iks
->k_r13
));
1500 __asm__
volatile ("movq %%r14, %0" : "=m" (iks
->k_r14
));
1501 __asm__
volatile ("movq %%r15, %0" : "=m" (iks
->k_r15
));
1502 /* "Current" instruction pointer */
1503 __asm__
volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:"
1511 * This is used by the NMI interrupt handler (from mp.c) to
1512 * uncondtionally sync the trap handler context to the IKS
1513 * irrespective of whether the NMI was fielded in kernel
1517 sync_iss_to_iks_unconditionally(__unused x86_saved_state_t
*saved_state
)
1519 struct x86_kernel_state
*iks
;
1522 if ((kstack
= current_thread()->kernel_stack
) != 0) {
1523 iks
= STACK_IKS(kstack
);
1524 /* Display the trap handler path */
1525 __asm__
volatile ("movq %%rbx, %0" : "=m" (iks
->k_rbx
));
1526 __asm__
volatile ("movq %%rsp, %0" : "=m" (iks
->k_rsp
));
1527 __asm__
volatile ("movq %%rbp, %0" : "=m" (iks
->k_rbp
));
1528 __asm__
volatile ("movq %%r12, %0" : "=m" (iks
->k_r12
));
1529 __asm__
volatile ("movq %%r13, %0" : "=m" (iks
->k_r13
));
1530 __asm__
volatile ("movq %%r14, %0" : "=m" (iks
->k_r14
));
1531 __asm__
volatile ("movq %%r15, %0" : "=m" (iks
->k_r15
));
1532 /* "Current" instruction pointer */
1533 __asm__
volatile ("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks
->k_rip
)::"rax");
1542 extern void thread_exception_return_internal(void) __dead2
;
1545 thread_exception_return(void)
1547 thread_t thread
= current_thread();
1548 ml_set_interrupts_enabled(FALSE
);
1549 if (thread_is_64bit_addr(thread
) != task_has_64Bit_addr(thread
->task
)) {
1550 panic("Task/thread bitness mismatch %p %p, task: %d, thread: %d", thread
, thread
->task
, thread_is_64bit_addr(thread
), task_has_64Bit_addr(thread
->task
));
1553 if (thread_is_64bit_addr(thread
)) {
1554 if ((gdt_desc_p(USER64_CS
)->access
& ACC_PL_U
) == 0) {
1555 panic("64-GDT mismatch %p, descriptor: %p", thread
, gdt_desc_p(USER64_CS
));
1558 if ((gdt_desc_p(USER_CS
)->access
& ACC_PL_U
) == 0) {
1559 panic("32-GDT mismatch %p, descriptor: %p", thread
, gdt_desc_p(USER_CS
));
1562 assert(get_preemption_level() == 0);
1563 thread_exception_return_internal();