]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
39037602 | 2 | * Copyright (c) 2000-2016 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
2d21ac55 A |
29 | * @OSF_COPYRIGHT@ |
30 | */ | |
1c79356b | 31 | /* |
2d21ac55 A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
1c79356b | 56 | /* |
2d21ac55 A |
57 | */ |
58 | ||
1c79356b | 59 | /* |
2d21ac55 | 60 | * Hardware trap/fault handler. |
1c79356b A |
61 | */ |
62 | ||
1c79356b A |
63 | #include <mach_kdp.h> |
64 | #include <mach_ldebug.h> | |
65 | ||
66 | #include <types.h> | |
67 | #include <i386/eflags.h> | |
68 | #include <i386/trap.h> | |
69 | #include <i386/pmap.h> | |
70 | #include <i386/fpu.h> | |
0c530ab8 | 71 | #include <i386/misc_protos.h> /* panic_io_port_read() */ |
b0d623f7 | 72 | #include <i386/lapic.h> |
1c79356b A |
73 | |
74 | #include <mach/exception.h> | |
75 | #include <mach/kern_return.h> | |
76 | #include <mach/vm_param.h> | |
77 | #include <mach/i386/thread_status.h> | |
78 | ||
79 | #include <vm/vm_kern.h> | |
80 | #include <vm/vm_fault.h> | |
81 | ||
1c79356b | 82 | #include <kern/kern_types.h> |
91447636 | 83 | #include <kern/processor.h> |
1c79356b A |
84 | #include <kern/thread.h> |
85 | #include <kern/task.h> | |
86 | #include <kern/sched.h> | |
87 | #include <kern/sched_prim.h> | |
88 | #include <kern/exception.h> | |
89 | #include <kern/spl.h> | |
90 | #include <kern/misc_protos.h> | |
b0d623f7 | 91 | #include <kern/debug.h> |
39236c6e A |
92 | #if CONFIG_TELEMETRY |
93 | #include <kern/telemetry.h> | |
94 | #endif | |
0c530ab8 | 95 | #include <sys/kdebug.h> |
fe8ab488 | 96 | #include <prng/random.h> |
0c530ab8 | 97 | |
1c79356b A |
98 | #include <string.h> |
99 | ||
0c530ab8 A |
100 | #include <i386/postcode.h> |
101 | #include <i386/mp_desc.h> | |
102 | #include <i386/proc_reg.h> | |
b0d623f7 | 103 | #if CONFIG_MCA |
0c530ab8 | 104 | #include <i386/machine_check.h> |
b0d623f7 | 105 | #endif |
0c530ab8 | 106 | #include <mach/i386/syscall_sw.h> |
1c79356b | 107 | |
b0d623f7 | 108 | #include <libkern/OSDebug.h> |
bd504ef0 | 109 | #include <i386/cpu_threads.h> |
6d2010ae | 110 | #include <machine/pal_routines.h> |
593a1d5f | 111 | |
6d2010ae A |
112 | extern void throttle_lowpri_io(int); |
113 | extern void kprint_state(x86_saved_state64_t *saved_state); | |
b0d623f7 | 114 | |
1c79356b A |
115 | /* |
116 | * Forward declarations | |
117 | */ | |
0c530ab8 | 118 | static void user_page_fault_continue(kern_return_t kret); |
3e170ce0 | 119 | static void panic_trap(x86_saved_state64_t *saved_state, uint32_t pl); |
b0d623f7 | 120 | static void set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip); |
6601e61a | 121 | |
b0d623f7 | 122 | volatile perfCallback perfTrapHook = NULL; /* Pointer to CHUD trap hook routine */ |
1c79356b | 123 | |
2d21ac55 A |
124 | #if CONFIG_DTRACE |
125 | /* See <rdar://problem/4613924> */ | |
126 | perfCallback tempDTraceTrapHook = NULL; /* Pointer to DTrace fbt trap hook routine */ | |
127 | ||
128 | extern boolean_t dtrace_tally_fault(user_addr_t); | |
129 | #endif | |
130 | ||
13f56ec4 | 131 | extern boolean_t pmap_smep_enabled; |
fe8ab488 | 132 | extern boolean_t pmap_smap_enabled; |
7ddcb079 | 133 | |
39037602 | 134 | __attribute__((noreturn)) |
1c79356b A |
135 | void |
136 | thread_syscall_return( | |
137 | kern_return_t ret) | |
138 | { | |
0c530ab8 | 139 | thread_t thr_act = current_thread(); |
b0d623f7 A |
140 | boolean_t is_mach; |
141 | int code; | |
142 | ||
6d2010ae | 143 | pal_register_cache_state(thr_act, DIRTY); |
0c530ab8 A |
144 | |
145 | if (thread_is_64bit(thr_act)) { | |
146 | x86_saved_state64_t *regs; | |
147 | ||
148 | regs = USER_REGS64(thr_act); | |
149 | ||
b0d623f7 A |
150 | code = (int) (regs->rax & SYSCALL_NUMBER_MASK); |
151 | is_mach = (regs->rax & SYSCALL_CLASS_MASK) | |
152 | == (SYSCALL_CLASS_MACH << SYSCALL_CLASS_SHIFT); | |
153 | if (kdebug_enable && is_mach) { | |
0c530ab8 | 154 | /* Mach trap */ |
316670eb | 155 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
b0d623f7 A |
156 | MACHDBG_CODE(DBG_MACH_EXCP_SC,code)|DBG_FUNC_END, |
157 | ret, 0, 0, 0, 0); | |
0c530ab8 A |
158 | } |
159 | regs->rax = ret; | |
b0d623f7 A |
160 | #if DEBUG |
161 | if (is_mach) | |
162 | DEBUG_KPRINT_SYSCALL_MACH( | |
163 | "thread_syscall_return: 64-bit mach ret=%u\n", | |
164 | ret); | |
165 | else | |
166 | DEBUG_KPRINT_SYSCALL_UNIX( | |
167 | "thread_syscall_return: 64-bit unix ret=%u\n", | |
168 | ret); | |
169 | #endif | |
0c530ab8 A |
170 | } else { |
171 | x86_saved_state32_t *regs; | |
172 | ||
173 | regs = USER_REGS32(thr_act); | |
174 | ||
b0d623f7 A |
175 | code = ((int) regs->eax); |
176 | is_mach = (code < 0); | |
177 | if (kdebug_enable && is_mach) { | |
0c530ab8 | 178 | /* Mach trap */ |
316670eb | 179 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
b0d623f7 A |
180 | MACHDBG_CODE(DBG_MACH_EXCP_SC,-code)|DBG_FUNC_END, |
181 | ret, 0, 0, 0, 0); | |
0c530ab8 A |
182 | } |
183 | regs->eax = ret; | |
b0d623f7 A |
184 | #if DEBUG |
185 | if (is_mach) | |
186 | DEBUG_KPRINT_SYSCALL_MACH( | |
187 | "thread_syscall_return: 32-bit mach ret=%u\n", | |
188 | ret); | |
189 | else | |
190 | DEBUG_KPRINT_SYSCALL_UNIX( | |
191 | "thread_syscall_return: 32-bit unix ret=%u\n", | |
192 | ret); | |
193 | #endif | |
0c530ab8 | 194 | } |
39236c6e | 195 | throttle_lowpri_io(1); |
593a1d5f A |
196 | |
197 | thread_exception_return(); | |
1c79356b A |
198 | /*NOTREACHED*/ |
199 | } | |
200 | ||
201 | ||
6d2010ae | 202 | static inline void |
1c79356b | 203 | user_page_fault_continue( |
0c530ab8 | 204 | kern_return_t kr) |
1c79356b | 205 | { |
0c530ab8 | 206 | thread_t thread = current_thread(); |
0c530ab8 | 207 | user_addr_t vaddr; |
b0d623f7 | 208 | |
316670eb A |
209 | if (thread_is_64bit(thread)) { |
210 | x86_saved_state64_t *uregs; | |
0c530ab8 A |
211 | |
212 | uregs = USER_REGS64(thread); | |
213 | ||
0c530ab8 A |
214 | vaddr = (user_addr_t)uregs->cr2; |
215 | } else { | |
216 | x86_saved_state32_t *uregs; | |
217 | ||
218 | uregs = USER_REGS32(thread); | |
219 | ||
0c530ab8 A |
220 | vaddr = uregs->cr2; |
221 | } | |
1c79356b | 222 | |
1c79356b | 223 | |
6d2010ae A |
224 | /* PAL debug hook */ |
225 | pal_dbg_page_fault( thread, vaddr, kr ); | |
b0d623f7 | 226 | |
0c530ab8 | 227 | i386_exception(EXC_BAD_ACCESS, kr, vaddr); |
1c79356b A |
228 | /*NOTREACHED*/ |
229 | } | |
230 | ||
231 | /* | |
232 | * Fault recovery in copyin/copyout routines. | |
233 | */ | |
234 | struct recovery { | |
b0d623f7 A |
235 | uintptr_t fault_addr; |
236 | uintptr_t recover_addr; | |
1c79356b A |
237 | }; |
238 | ||
239 | extern struct recovery recover_table[]; | |
240 | extern struct recovery recover_table_end[]; | |
241 | ||
0c530ab8 A |
242 | const char * trap_type[] = {TRAP_NAMES}; |
243 | unsigned TRAP_TYPES = sizeof(trap_type)/sizeof(trap_type[0]); | |
91447636 | 244 | |
6d2010ae A |
245 | extern void PE_incoming_interrupt(int interrupt); |
246 | ||
b0d623f7 | 247 | #if defined(__x86_64__) && DEBUG |
6d2010ae A |
248 | void |
249 | kprint_state(x86_saved_state64_t *saved_state) | |
b0d623f7 A |
250 | { |
251 | kprintf("current_cpu_datap() 0x%lx\n", (uintptr_t)current_cpu_datap()); | |
252 | kprintf("Current GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_GS_BASE)); | |
253 | kprintf("Kernel GS base MSR 0x%llx\n", rdmsr64(MSR_IA32_KERNEL_GS_BASE)); | |
254 | kprintf("state at 0x%lx:\n", (uintptr_t) saved_state); | |
255 | ||
256 | kprintf(" rdi 0x%llx\n", saved_state->rdi); | |
257 | kprintf(" rsi 0x%llx\n", saved_state->rsi); | |
258 | kprintf(" rdx 0x%llx\n", saved_state->rdx); | |
259 | kprintf(" r10 0x%llx\n", saved_state->r10); | |
260 | kprintf(" r8 0x%llx\n", saved_state->r8); | |
261 | kprintf(" r9 0x%llx\n", saved_state->r9); | |
b0d623f7 A |
262 | |
263 | kprintf(" cr2 0x%llx\n", saved_state->cr2); | |
264 | kprintf("real cr2 0x%lx\n", get_cr2()); | |
265 | kprintf(" r15 0x%llx\n", saved_state->r15); | |
266 | kprintf(" r14 0x%llx\n", saved_state->r14); | |
267 | kprintf(" r13 0x%llx\n", saved_state->r13); | |
268 | kprintf(" r12 0x%llx\n", saved_state->r12); | |
269 | kprintf(" r11 0x%llx\n", saved_state->r11); | |
270 | kprintf(" rbp 0x%llx\n", saved_state->rbp); | |
271 | kprintf(" rbx 0x%llx\n", saved_state->rbx); | |
272 | kprintf(" rcx 0x%llx\n", saved_state->rcx); | |
273 | kprintf(" rax 0x%llx\n", saved_state->rax); | |
274 | ||
275 | kprintf(" gs 0x%x\n", saved_state->gs); | |
276 | kprintf(" fs 0x%x\n", saved_state->fs); | |
277 | ||
278 | kprintf(" isf.trapno 0x%x\n", saved_state->isf.trapno); | |
279 | kprintf(" isf._pad 0x%x\n", saved_state->isf._pad); | |
280 | kprintf(" isf.trapfn 0x%llx\n", saved_state->isf.trapfn); | |
281 | kprintf(" isf.err 0x%llx\n", saved_state->isf.err); | |
282 | kprintf(" isf.rip 0x%llx\n", saved_state->isf.rip); | |
283 | kprintf(" isf.cs 0x%llx\n", saved_state->isf.cs); | |
284 | kprintf(" isf.rflags 0x%llx\n", saved_state->isf.rflags); | |
285 | kprintf(" isf.rsp 0x%llx\n", saved_state->isf.rsp); | |
286 | kprintf(" isf.ss 0x%llx\n", saved_state->isf.ss); | |
287 | } | |
b0d623f7 A |
288 | #endif |
289 | ||
060df5ea | 290 | |
060df5ea A |
291 | /* |
292 | * Non-zero indicates latency assert is enabled and capped at valued | |
293 | * absolute time units. | |
294 | */ | |
295 | ||
296 | uint64_t interrupt_latency_cap = 0; | |
297 | boolean_t ilat_assert = FALSE; | |
298 | ||
299 | void | |
300 | interrupt_latency_tracker_setup(void) { | |
301 | uint32_t ilat_cap_us; | |
302 | if (PE_parse_boot_argn("interrupt_latency_cap_us", &ilat_cap_us, sizeof(ilat_cap_us))) { | |
303 | interrupt_latency_cap = ilat_cap_us * NSEC_PER_USEC; | |
304 | nanoseconds_to_absolutetime(interrupt_latency_cap, &interrupt_latency_cap); | |
305 | } else { | |
306 | interrupt_latency_cap = LockTimeOut; | |
307 | } | |
308 | PE_parse_boot_argn("-interrupt_latency_assert_enable", &ilat_assert, sizeof(ilat_assert)); | |
309 | } | |
310 | ||
311 | void interrupt_reset_latency_stats(void) { | |
312 | uint32_t i; | |
313 | for (i = 0; i < real_ncpus; i++) { | |
314 | cpu_data_ptr[i]->cpu_max_observed_int_latency = | |
315 | cpu_data_ptr[i]->cpu_max_observed_int_latency_vector = 0; | |
316 | } | |
317 | } | |
318 | ||
319 | void interrupt_populate_latency_stats(char *buf, unsigned bufsize) { | |
320 | uint32_t i, tcpu = ~0; | |
321 | uint64_t cur_max = 0; | |
322 | ||
323 | for (i = 0; i < real_ncpus; i++) { | |
324 | if (cur_max < cpu_data_ptr[i]->cpu_max_observed_int_latency) { | |
325 | cur_max = cpu_data_ptr[i]->cpu_max_observed_int_latency; | |
326 | tcpu = i; | |
327 | } | |
328 | } | |
329 | ||
330 | if (tcpu < real_ncpus) | |
331 | snprintf(buf, bufsize, "0x%x 0x%x 0x%llx", tcpu, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency_vector, cpu_data_ptr[tcpu]->cpu_max_observed_int_latency); | |
332 | } | |
b0d623f7 | 333 | |
39236c6e A |
334 | uint32_t interrupt_timer_coalescing_enabled = 1; |
335 | uint64_t interrupt_coalesced_timers; | |
336 | ||
b0d623f7 A |
337 | /* |
338 | * Handle interrupts: | |
339 | * - local APIC interrupts (IPIs, timers, etc) are handled by the kernel, | |
340 | * - device interrupts go to the platform expert. | |
341 | */ | |
342 | void | |
343 | interrupt(x86_saved_state_t *state) | |
344 | { | |
345 | uint64_t rip; | |
346 | uint64_t rsp; | |
347 | int interrupt_num; | |
348 | boolean_t user_mode = FALSE; | |
6d2010ae | 349 | int ipl; |
060df5ea | 350 | int cnum = cpu_number(); |
39236c6e | 351 | cpu_data_t *cdp = cpu_data_ptr[cnum]; |
316670eb | 352 | int itype = 0; |
bd504ef0 | 353 | |
060df5ea | 354 | if (is_saved_state64(state) == TRUE) { |
b0d623f7 A |
355 | x86_saved_state64_t *state64; |
356 | ||
357 | state64 = saved_state64(state); | |
358 | rip = state64->isf.rip; | |
359 | rsp = state64->isf.rsp; | |
360 | interrupt_num = state64->isf.trapno; | |
361 | #ifdef __x86_64__ | |
362 | if(state64->isf.cs & 0x03) | |
363 | #endif | |
364 | user_mode = TRUE; | |
365 | } else { | |
366 | x86_saved_state32_t *state32; | |
367 | ||
368 | state32 = saved_state32(state); | |
369 | if (state32->cs & 0x03) | |
370 | user_mode = TRUE; | |
371 | rip = state32->eip; | |
372 | rsp = state32->uesp; | |
373 | interrupt_num = state32->trapno; | |
374 | } | |
375 | ||
bd504ef0 A |
376 | if (cpu_data_ptr[cnum]->lcpu.package->num_idle == topoParms.nLThreadsPerPackage) |
377 | cpu_data_ptr[cnum]->cpu_hwIntpexits[interrupt_num]++; | |
378 | ||
316670eb A |
379 | if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_INTERPROCESSOR_INTERRUPT)) |
380 | itype = 1; | |
381 | else if (interrupt_num == (LAPIC_DEFAULT_INTERRUPT_BASE + LAPIC_TIMER_INTERRUPT)) | |
382 | itype = 2; | |
383 | else | |
384 | itype = 3; | |
385 | ||
386 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
b0d623f7 | 387 | MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_START, |
316670eb A |
388 | interrupt_num, |
389 | (user_mode ? rip : VM_KERNEL_UNSLIDE(rip)), | |
390 | user_mode, itype, 0); | |
6d2010ae A |
391 | |
392 | SCHED_STATS_INTERRUPT(current_processor()); | |
393 | ||
39236c6e | 394 | #if CONFIG_TELEMETRY |
3e170ce0 | 395 | if (telemetry_needs_record) { |
39236c6e A |
396 | telemetry_mark_curthread(user_mode); |
397 | } | |
398 | #endif | |
399 | ||
6d2010ae | 400 | ipl = get_preemption_level(); |
316670eb | 401 | |
b0d623f7 A |
402 | /* |
403 | * Handle local APIC interrupts | |
404 | * else call platform expert for devices. | |
6d2010ae | 405 | */ |
fe8ab488 | 406 | if (!lapic_interrupt(interrupt_num, state)) { |
b0d623f7 | 407 | PE_incoming_interrupt(interrupt_num); |
fe8ab488 | 408 | } |
6d2010ae A |
409 | |
410 | if (__improbable(get_preemption_level() != ipl)) { | |
411 | panic("Preemption level altered by interrupt vector 0x%x: initial 0x%x, final: 0x%x\n", interrupt_num, ipl, get_preemption_level()); | |
060df5ea | 412 | } |
b0d623f7 | 413 | |
316670eb | 414 | |
39236c6e A |
415 | if (__improbable(cdp->cpu_nested_istack)) { |
416 | cdp->cpu_nested_istack_events++; | |
060df5ea | 417 | } |
6d2010ae | 418 | else { |
39236c6e A |
419 | uint64_t ctime = mach_absolute_time(); |
420 | uint64_t int_latency = ctime - cdp->cpu_int_event_time; | |
421 | uint64_t esdeadline, ehdeadline; | |
422 | /* Attempt to process deferred timers in the context of | |
423 | * this interrupt, unless interrupt time has already exceeded | |
424 | * TCOAL_ILAT_THRESHOLD. | |
425 | */ | |
426 | #define TCOAL_ILAT_THRESHOLD (30000ULL) | |
427 | ||
428 | if ((int_latency < TCOAL_ILAT_THRESHOLD) && | |
429 | interrupt_timer_coalescing_enabled) { | |
430 | esdeadline = cdp->rtclock_timer.queue.earliest_soft_deadline; | |
431 | ehdeadline = cdp->rtclock_timer.deadline; | |
432 | if ((ctime >= esdeadline) && (ctime < ehdeadline)) { | |
433 | interrupt_coalesced_timers++; | |
434 | TCOAL_DEBUG(0x88880000 | DBG_FUNC_START, ctime, esdeadline, ehdeadline, interrupt_coalesced_timers, 0); | |
435 | rtclock_intr(state); | |
436 | TCOAL_DEBUG(0x88880000 | DBG_FUNC_END, ctime, esdeadline, interrupt_coalesced_timers, 0, 0); | |
437 | } else { | |
438 | TCOAL_DEBUG(0x77770000, ctime, cdp->rtclock_timer.queue.earliest_soft_deadline, cdp->rtclock_timer.deadline, interrupt_coalesced_timers, 0); | |
439 | } | |
060df5ea | 440 | } |
39236c6e A |
441 | |
442 | if (__improbable(ilat_assert && (int_latency > interrupt_latency_cap) && !machine_timeout_suspended())) { | |
443 | panic("Interrupt vector 0x%x exceeded interrupt latency threshold, 0x%llx absolute time delta, prior signals: 0x%x, current signals: 0x%x", interrupt_num, int_latency, cdp->cpu_prior_signals, cdp->cpu_signals); | |
444 | } | |
445 | ||
446 | if (__improbable(int_latency > cdp->cpu_max_observed_int_latency)) { | |
447 | cdp->cpu_max_observed_int_latency = int_latency; | |
448 | cdp->cpu_max_observed_int_latency_vector = interrupt_num; | |
060df5ea A |
449 | } |
450 | } | |
451 | ||
b0d623f7 A |
452 | /* |
453 | * Having serviced the interrupt first, look at the interrupted stack depth. | |
454 | */ | |
455 | if (!user_mode) { | |
39236c6e | 456 | uint64_t depth = cdp->cpu_kernel_stack |
b0d623f7 A |
457 | + sizeof(struct x86_kernel_state) |
458 | + sizeof(struct i386_exception_link *) | |
459 | - rsp; | |
39236c6e | 460 | if (__improbable(depth > kernel_stack_depth_max)) { |
b0d623f7 A |
461 | kernel_stack_depth_max = (vm_offset_t)depth; |
462 | KERNEL_DEBUG_CONSTANT( | |
463 | MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_DEPTH), | |
316670eb | 464 | (long) depth, (long) VM_KERNEL_UNSLIDE(rip), 0, 0, 0); |
b0d623f7 A |
465 | } |
466 | } | |
39236c6e | 467 | |
fe8ab488 A |
468 | if (cnum == master_cpu) |
469 | ml_entropy_collect(); | |
470 | ||
39236c6e A |
471 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
472 | MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0) | DBG_FUNC_END, | |
473 | interrupt_num, 0, 0, 0, 0); | |
474 | ||
3e170ce0 | 475 | assert(ml_get_interrupts_enabled() == FALSE); |
b0d623f7 | 476 | } |
2d21ac55 | 477 | |
0c530ab8 A |
478 | static inline void |
479 | reset_dr7(void) | |
480 | { | |
b0d623f7 A |
481 | long dr7 = 0x400; /* magic dr7 reset value; 32 bit on i386, 64 bit on x86_64 */ |
482 | __asm__ volatile("mov %0,%%dr7" : : "r" (dr7)); | |
0c530ab8 A |
483 | } |
484 | #if MACH_KDP | |
485 | unsigned kdp_has_active_watchpoints = 0; | |
b0d623f7 A |
486 | #define NO_WATCHPOINTS (!kdp_has_active_watchpoints) |
487 | #else | |
488 | #define NO_WATCHPOINTS 1 | |
0c530ab8 | 489 | #endif |
1c79356b A |
490 | /* |
491 | * Trap from kernel mode. Only page-fault errors are recoverable, | |
492 | * and then only in special circumstances. All other errors are | |
493 | * fatal. Return value indicates if trap was handled. | |
494 | */ | |
b0d623f7 | 495 | |
0c530ab8 | 496 | void |
1c79356b | 497 | kernel_trap( |
6d2010ae A |
498 | x86_saved_state_t *state, |
499 | uintptr_t *lo_spp) | |
1c79356b | 500 | { |
b0d623f7 | 501 | x86_saved_state64_t *saved_state; |
91447636 | 502 | int code; |
0c530ab8 A |
503 | user_addr_t vaddr; |
504 | int type; | |
2d21ac55 | 505 | vm_map_t map = 0; /* protected by T_PAGE_FAULT */ |
91447636 | 506 | kern_return_t result = KERN_FAILURE; |
0c530ab8 A |
507 | thread_t thread; |
508 | ast_t *myast; | |
509 | boolean_t intr; | |
510 | vm_prot_t prot; | |
511 | struct recovery *rp; | |
512 | vm_offset_t kern_ip; | |
b0d623f7 | 513 | #if NCOPY_WINDOWS > 0 |
0c530ab8 | 514 | int fault_in_copy_window = -1; |
b0d623f7 | 515 | #endif |
39037602 | 516 | int is_user; |
3e170ce0 A |
517 | int trap_pl = get_preemption_level(); |
518 | ||
1c79356b | 519 | thread = current_thread(); |
1c79356b | 520 | |
6d2010ae | 521 | if (__improbable(is_saved_state32(state))) |
b0d623f7 A |
522 | panic("kernel_trap(%p) with 32-bit state", state); |
523 | saved_state = saved_state64(state); | |
6d2010ae A |
524 | |
525 | /* Record cpu where state was captured */ | |
526 | saved_state->isf.cpu = cpu_number(); | |
527 | ||
b0d623f7 A |
528 | vaddr = (user_addr_t)saved_state->cr2; |
529 | type = saved_state->isf.trapno; | |
530 | code = (int)(saved_state->isf.err & 0xffff); | |
531 | intr = (saved_state->isf.rflags & EFL_IF) != 0; /* state of ints at trap */ | |
532 | kern_ip = (vm_offset_t)saved_state->isf.rip; | |
0c530ab8 A |
533 | |
534 | myast = ast_pending(); | |
535 | ||
39037602 A |
536 | is_user = (vaddr < VM_MAX_USER_PAGE_ADDRESS); |
537 | ||
6d2010ae A |
538 | perfASTCallback astfn = perfASTHook; |
539 | if (__improbable(astfn != NULL)) { | |
0c530ab8 | 540 | if (*myast & AST_CHUD_ALL) |
6d2010ae | 541 | astfn(AST_CHUD_ALL, myast); |
0c530ab8 A |
542 | } else |
543 | *myast &= ~AST_CHUD_ALL; | |
544 | ||
2d21ac55 A |
545 | |
546 | #if CONFIG_DTRACE | |
fe8ab488 A |
547 | /* |
548 | * Is there a DTrace hook? | |
549 | */ | |
6d2010ae A |
550 | if (__improbable(tempDTraceTrapHook != NULL)) { |
551 | if (tempDTraceTrapHook(type, state, lo_spp, 0) == KERN_SUCCESS) { | |
2d21ac55 A |
552 | /* |
553 | * If it succeeds, we are done... | |
554 | */ | |
555 | return; | |
556 | } | |
557 | } | |
558 | #endif /* CONFIG_DTRACE */ | |
559 | ||
0c530ab8 A |
560 | /* |
561 | * we come here with interrupts off as we don't want to recurse | |
562 | * on preemption below. but we do want to re-enable interrupts | |
563 | * as soon we possibly can to hold latency down | |
564 | */ | |
6d2010ae | 565 | if (__improbable(T_PREEMPT == type)) { |
2d21ac55 | 566 | ast_taken(AST_PREEMPTION, FALSE); |
0c530ab8 | 567 | |
316670eb A |
568 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
569 | (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, | |
570 | 0, 0, 0, VM_KERNEL_UNSLIDE(kern_ip), 0); | |
0c530ab8 A |
571 | return; |
572 | } | |
39037602 A |
573 | |
574 | user_addr_t kd_vaddr = is_user ? vaddr : VM_KERNEL_UNSLIDE(vaddr); | |
575 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, | |
576 | (MACHDBG_CODE(DBG_MACH_EXCP_KTRAP_x86, type)) | DBG_FUNC_NONE, | |
577 | (unsigned)(kd_vaddr >> 32), (unsigned)kd_vaddr, is_user, | |
578 | VM_KERNEL_UNSLIDE(kern_ip), 0); | |
579 | ||
580 | ||
0c530ab8 A |
581 | if (T_PAGE_FAULT == type) { |
582 | /* | |
583 | * assume we're faulting in the kernel map | |
584 | */ | |
585 | map = kernel_map; | |
586 | ||
6d2010ae | 587 | if (__probable(thread != THREAD_NULL && thread->map != kernel_map)) { |
b0d623f7 A |
588 | #if NCOPY_WINDOWS > 0 |
589 | vm_offset_t copy_window_base; | |
0c530ab8 A |
590 | vm_offset_t kvaddr; |
591 | int window_index; | |
592 | ||
593 | kvaddr = (vm_offset_t)vaddr; | |
b0d623f7 | 594 | /* |
0c530ab8 A |
595 | * must determine if fault occurred in |
596 | * the copy window while pre-emption is | |
597 | * disabled for this processor so that | |
598 | * we only need to look at the window | |
599 | * associated with this processor | |
600 | */ | |
6d2010ae | 601 | copy_window_base = current_cpu_datap()->cpu_copywindow_base; |
0c530ab8 A |
602 | |
603 | if (kvaddr >= copy_window_base && kvaddr < (copy_window_base + (NBPDE * NCOPY_WINDOWS)) ) { | |
604 | ||
6d2010ae | 605 | window_index = (int)((kvaddr - copy_window_base) / NBPDE); |
0c530ab8 A |
606 | |
607 | if (thread->machine.copy_window[window_index].user_base != (user_addr_t)-1) { | |
608 | ||
609 | kvaddr -= (copy_window_base + (NBPDE * window_index)); | |
610 | vaddr = thread->machine.copy_window[window_index].user_base + kvaddr; | |
611 | ||
612 | map = thread->map; | |
613 | fault_in_copy_window = window_index; | |
614 | } | |
0c530ab8 | 615 | } |
b0d623f7 | 616 | #else |
7ddcb079 | 617 | if (__probable(vaddr < VM_MAX_USER_PAGE_ADDRESS)) { |
b0d623f7 A |
618 | /* fault occurred in userspace */ |
619 | map = thread->map; | |
7ddcb079 | 620 | |
13f56ec4 A |
621 | /* Intercept a potential Supervisor Mode Execute |
622 | * Protection fault. These criteria identify | |
623 | * both NX faults and SMEP faults, but both | |
624 | * are fatal. We avoid checking PTEs (racy). | |
625 | * (The VM could just redrive a SMEP fault, hence | |
626 | * the intercept). | |
627 | */ | |
39037602 A |
628 | if (__improbable((code == (T_PF_PROT | T_PF_EXECUTE)) && |
629 | (pmap_smep_enabled) && (saved_state->isf.rip == vaddr))) { | |
13f56ec4 A |
630 | goto debugger_entry; |
631 | } | |
632 | ||
04b8595b A |
633 | /* |
634 | * Additionally check for SMAP faults... | |
635 | * which are characterized by page-present and | |
636 | * the AC bit unset (i.e. not from copyin/out path). | |
637 | */ | |
638 | if (__improbable(code & T_PF_PROT && | |
639 | pmap_smap_enabled && | |
640 | (saved_state->isf.rflags & EFL_AC) == 0)) { | |
641 | goto debugger_entry; | |
642 | } | |
643 | ||
b0d623f7 A |
644 | /* |
645 | * If we're not sharing cr3 with the user | |
646 | * and we faulted in copyio, | |
647 | * then switch cr3 here and dismiss the fault. | |
648 | */ | |
649 | if (no_shared_cr3 && | |
650 | (thread->machine.specFlags&CopyIOActive) && | |
6d2010ae A |
651 | map->pmap->pm_cr3 != get_cr3_base()) { |
652 | pmap_assert(current_cpu_datap()->cpu_pmap_pcid_enabled == FALSE); | |
653 | set_cr3_raw(map->pmap->pm_cr3); | |
b0d623f7 A |
654 | return; |
655 | } | |
39037602 A |
656 | if (__improbable(vaddr < PAGE_SIZE) && |
657 | ((thread->machine.specFlags & CopyIOActive) == 0)) { | |
658 | goto debugger_entry; | |
659 | } | |
b0d623f7 A |
660 | } |
661 | #endif | |
0c530ab8 A |
662 | } |
663 | } | |
0c530ab8 A |
664 | |
665 | (void) ml_set_interrupts_enabled(intr); | |
666 | ||
1c79356b | 667 | switch (type) { |
1c79356b A |
668 | |
669 | case T_NO_FPU: | |
670 | fpnoextflt(); | |
0c530ab8 | 671 | return; |
1c79356b A |
672 | |
673 | case T_FPU_FAULT: | |
674 | fpextovrflt(); | |
0c530ab8 | 675 | return; |
1c79356b A |
676 | |
677 | case T_FLOATING_POINT_ERROR: | |
678 | fpexterrflt(); | |
0c530ab8 | 679 | return; |
1c79356b | 680 | |
0c530ab8 A |
681 | case T_SSE_FLOAT_ERROR: |
682 | fpSSEexterrflt(); | |
683 | return; | |
684 | case T_DEBUG: | |
b0d623f7 | 685 | if ((saved_state->isf.rflags & EFL_TF) == 0 && NO_WATCHPOINTS) |
2d21ac55 | 686 | { |
0c530ab8 A |
687 | /* We've somehow encountered a debug |
688 | * register match that does not belong | |
689 | * to the kernel debugger. | |
690 | * This isn't supposed to happen. | |
691 | */ | |
692 | reset_dr7(); | |
693 | return; | |
2d21ac55 | 694 | } |
0c530ab8 | 695 | goto debugger_entry; |
b0d623f7 A |
696 | #ifdef __x86_64__ |
697 | case T_INT3: | |
698 | goto debugger_entry; | |
699 | #endif | |
1c79356b | 700 | case T_PAGE_FAULT: |
0c530ab8 | 701 | |
2d21ac55 | 702 | #if CONFIG_DTRACE |
6d2010ae | 703 | if (thread != THREAD_NULL && thread->options & TH_OPT_DTRACE) { /* Executing under dtrace_probe? */ |
2d21ac55 A |
704 | if (dtrace_tally_fault(vaddr)) { /* Should a fault under dtrace be ignored? */ |
705 | /* | |
706 | * DTrace has "anticipated" the possibility of this fault, and has | |
707 | * established the suitable recovery state. Drop down now into the | |
708 | * recovery handling code in "case T_GENERAL_PROTECTION:". | |
709 | */ | |
710 | goto FALL_THROUGH; | |
711 | } | |
712 | } | |
713 | #endif /* CONFIG_DTRACE */ | |
7ddcb079 A |
714 | |
715 | prot = VM_PROT_READ; | |
716 | ||
717 | if (code & T_PF_WRITE) | |
718 | prot |= VM_PROT_WRITE; | |
7ddcb079 A |
719 | if (code & T_PF_EXECUTE) |
720 | prot |= VM_PROT_EXECUTE; | |
7ddcb079 | 721 | |
0c530ab8 | 722 | result = vm_fault(map, |
39037602 | 723 | vaddr, |
0c530ab8 A |
724 | prot, |
725 | FALSE, | |
726 | THREAD_UNINT, NULL, 0); | |
727 | ||
1c79356b | 728 | if (result == KERN_SUCCESS) { |
b0d623f7 | 729 | #if NCOPY_WINDOWS > 0 |
0c530ab8 | 730 | if (fault_in_copy_window != -1) { |
6d2010ae A |
731 | ml_set_interrupts_enabled(FALSE); |
732 | copy_window_fault(thread, map, | |
733 | fault_in_copy_window); | |
0c530ab8 | 734 | (void) ml_set_interrupts_enabled(intr); |
1c79356b | 735 | } |
b0d623f7 | 736 | #endif /* NCOPY_WINDOWS > 0 */ |
0c530ab8 | 737 | return; |
1c79356b | 738 | } |
0c530ab8 A |
739 | /* |
740 | * fall through | |
741 | */ | |
2d21ac55 A |
742 | #if CONFIG_DTRACE |
743 | FALL_THROUGH: | |
744 | #endif /* CONFIG_DTRACE */ | |
1c79356b A |
745 | |
746 | case T_GENERAL_PROTECTION: | |
1c79356b A |
747 | /* |
748 | * If there is a failure recovery address | |
749 | * for this fault, go there. | |
750 | */ | |
0c530ab8 A |
751 | for (rp = recover_table; rp < recover_table_end; rp++) { |
752 | if (kern_ip == rp->fault_addr) { | |
753 | set_recovery_ip(saved_state, rp->recover_addr); | |
754 | return; | |
1c79356b | 755 | } |
1c79356b A |
756 | } |
757 | ||
758 | /* | |
0c530ab8 | 759 | * Check thread recovery address also. |
1c79356b | 760 | */ |
6d2010ae | 761 | if (thread != THREAD_NULL && thread->recover) { |
b0d623f7 | 762 | set_recovery_ip(saved_state, thread->recover); |
0c530ab8 A |
763 | thread->recover = 0; |
764 | return; | |
1c79356b | 765 | } |
1c79356b A |
766 | /* |
767 | * Unanticipated page-fault errors in kernel | |
768 | * should not happen. | |
0c530ab8 A |
769 | * |
770 | * fall through... | |
1c79356b | 771 | */ |
1c79356b | 772 | default: |
91447636 A |
773 | /* |
774 | * Exception 15 is reserved but some chips may generate it | |
775 | * spuriously. Seen at startup on AMD Athlon-64. | |
776 | */ | |
777 | if (type == 15) { | |
778 | kprintf("kernel_trap() ignoring spurious trap 15\n"); | |
0c530ab8 | 779 | return; |
91447636 | 780 | } |
0c530ab8 A |
781 | debugger_entry: |
782 | /* Ensure that the i386_kernel_state at the base of the | |
783 | * current thread's stack (if any) is synchronized with the | |
784 | * context at the moment of the trap, to facilitate | |
785 | * access through the debugger. | |
1c79356b | 786 | */ |
b0d623f7 | 787 | sync_iss_to_iks(state); |
1c79356b | 788 | #if MACH_KDP |
39037602 A |
789 | if (kdp_i386_trap(type, saved_state, result, (vm_offset_t)vaddr)) |
790 | return; | |
2d21ac55 | 791 | #endif |
4452a7af | 792 | } |
316670eb | 793 | pal_cli(); |
3e170ce0 | 794 | panic_trap(saved_state, trap_pl); |
0c530ab8 A |
795 | /* |
796 | * NO RETURN | |
797 | */ | |
798 | } | |
799 | ||
800 | ||
b0d623f7 A |
801 | static void |
802 | set_recovery_ip(x86_saved_state64_t *saved_state, vm_offset_t ip) | |
803 | { | |
804 | saved_state->isf.rip = ip; | |
805 | } | |
0c530ab8 | 806 | |
b0d623f7 | 807 | static void |
3e170ce0 | 808 | panic_trap(x86_saved_state64_t *regs, uint32_t pl) |
4452a7af | 809 | { |
b0d623f7 | 810 | const char *trapname = "Unknown"; |
6d2010ae | 811 | pal_cr_t cr0, cr2, cr3, cr4; |
316670eb | 812 | boolean_t potential_smep_fault = FALSE, potential_kernel_NX_fault = FALSE; |
04b8595b | 813 | boolean_t potential_smap_fault = FALSE; |
0c530ab8 | 814 | |
6d2010ae A |
815 | pal_get_control_registers( &cr0, &cr2, &cr3, &cr4 ); |
816 | assert(ml_get_interrupts_enabled() == FALSE); | |
817 | current_cpu_datap()->cpu_fatal_trap_state = regs; | |
2d21ac55 A |
818 | /* |
819 | * Issue an I/O port read if one has been requested - this is an | |
820 | * event logic analyzers can use as a trigger point. | |
821 | */ | |
0c530ab8 A |
822 | panic_io_port_read(); |
823 | ||
b0d623f7 A |
824 | kprintf("panic trap number 0x%x, rip 0x%016llx\n", |
825 | regs->isf.trapno, regs->isf.rip); | |
826 | kprintf("cr0 0x%016llx cr2 0x%016llx cr3 0x%016llx cr4 0x%016llx\n", | |
827 | cr0, cr2, cr3, cr4); | |
4452a7af | 828 | |
b0d623f7 A |
829 | if (regs->isf.trapno < TRAP_TYPES) |
830 | trapname = trap_type[regs->isf.trapno]; | |
7ddcb079 | 831 | |
316670eb A |
832 | if ((regs->isf.trapno == T_PAGE_FAULT) && (regs->isf.err == (T_PF_PROT | T_PF_EXECUTE)) && (regs->isf.rip == regs->cr2)) { |
833 | if (pmap_smep_enabled && (regs->isf.rip < VM_MAX_USER_PAGE_ADDRESS)) { | |
834 | potential_smep_fault = TRUE; | |
835 | } else if (regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) { | |
836 | potential_kernel_NX_fault = TRUE; | |
837 | } | |
04b8595b A |
838 | } else if (pmap_smap_enabled && |
839 | regs->isf.trapno == T_PAGE_FAULT && | |
840 | regs->isf.err & T_PF_PROT && | |
841 | regs->cr2 < VM_MAX_USER_PAGE_ADDRESS && | |
842 | regs->isf.rip >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) { | |
843 | potential_smap_fault = TRUE; | |
13f56ec4 A |
844 | } |
845 | ||
b0d623f7 A |
846 | #undef panic |
847 | panic("Kernel trap at 0x%016llx, type %d=%s, registers:\n" | |
848 | "CR0: 0x%016llx, CR2: 0x%016llx, CR3: 0x%016llx, CR4: 0x%016llx\n" | |
849 | "RAX: 0x%016llx, RBX: 0x%016llx, RCX: 0x%016llx, RDX: 0x%016llx\n" | |
850 | "RSP: 0x%016llx, RBP: 0x%016llx, RSI: 0x%016llx, RDI: 0x%016llx\n" | |
851 | "R8: 0x%016llx, R9: 0x%016llx, R10: 0x%016llx, R11: 0x%016llx\n" | |
852 | "R12: 0x%016llx, R13: 0x%016llx, R14: 0x%016llx, R15: 0x%016llx\n" | |
853 | "RFL: 0x%016llx, RIP: 0x%016llx, CS: 0x%016llx, SS: 0x%016llx\n" | |
3e170ce0 | 854 | "Fault CR2: 0x%016llx, Error code: 0x%016llx, Fault CPU: 0x%x%s%s%s%s, PL: %d\n", |
b0d623f7 A |
855 | regs->isf.rip, regs->isf.trapno, trapname, |
856 | cr0, cr2, cr3, cr4, | |
857 | regs->rax, regs->rbx, regs->rcx, regs->rdx, | |
858 | regs->isf.rsp, regs->rbp, regs->rsi, regs->rdi, | |
859 | regs->r8, regs->r9, regs->r10, regs->r11, | |
860 | regs->r12, regs->r13, regs->r14, regs->r15, | |
6d2010ae | 861 | regs->isf.rflags, regs->isf.rip, regs->isf.cs & 0xFFFF, |
7ddcb079 | 862 | regs->isf.ss & 0xFFFF,regs->cr2, regs->isf.err, regs->isf.cpu, |
316670eb A |
863 | virtualized ? " VMM" : "", |
864 | potential_kernel_NX_fault ? " Kernel NX fault" : "", | |
fe8ab488 | 865 | potential_smep_fault ? " SMEP/User NX fault" : "", |
3e170ce0 | 866 | potential_smap_fault ? " SMAP fault" : "", pl); |
0c530ab8 | 867 | /* |
b0d623f7 A |
868 | * This next statement is not executed, |
869 | * but it's needed to stop the compiler using tail call optimization | |
870 | * for the panic call - which confuses the subsequent backtrace. | |
0c530ab8 | 871 | */ |
b0d623f7 | 872 | cr0 = 0; |
4452a7af A |
873 | } |
874 | ||
2d21ac55 A |
875 | #if CONFIG_DTRACE |
876 | extern kern_return_t dtrace_user_probe(x86_saved_state_t *); | |
877 | #endif | |
878 | ||
1c79356b A |
879 | /* |
880 | * Trap from user mode. | |
881 | */ | |
882 | void | |
883 | user_trap( | |
0c530ab8 | 884 | x86_saved_state_t *saved_state) |
1c79356b | 885 | { |
2d21ac55 A |
886 | int exc; |
887 | int err; | |
888 | mach_exception_code_t code; | |
889 | mach_exception_subcode_t subcode; | |
890 | int type; | |
891 | user_addr_t vaddr; | |
892 | vm_prot_t prot; | |
893 | thread_t thread = current_thread(); | |
894 | ast_t *myast; | |
895 | kern_return_t kret; | |
896 | user_addr_t rip; | |
6d2010ae | 897 | unsigned long dr6 = 0; /* 32 bit for i386, 64 bit for x86_64 */ |
0c530ab8 A |
898 | |
899 | assert((is_saved_state32(saved_state) && !thread_is_64bit(thread)) || | |
900 | (is_saved_state64(saved_state) && thread_is_64bit(thread))); | |
901 | ||
902 | if (is_saved_state64(saved_state)) { | |
903 | x86_saved_state64_t *regs; | |
904 | ||
905 | regs = saved_state64(saved_state); | |
906 | ||
6d2010ae A |
907 | /* Record cpu where state was captured */ |
908 | regs->isf.cpu = cpu_number(); | |
909 | ||
0c530ab8 | 910 | type = regs->isf.trapno; |
b0d623f7 | 911 | err = (int)regs->isf.err & 0xffff; |
0c530ab8 A |
912 | vaddr = (user_addr_t)regs->cr2; |
913 | rip = (user_addr_t)regs->isf.rip; | |
914 | } else { | |
2d21ac55 | 915 | x86_saved_state32_t *regs; |
0c530ab8 A |
916 | |
917 | regs = saved_state32(saved_state); | |
918 | ||
6d2010ae A |
919 | /* Record cpu where state was captured */ |
920 | regs->cpu = cpu_number(); | |
921 | ||
0c530ab8 A |
922 | type = regs->trapno; |
923 | err = regs->err & 0xffff; | |
924 | vaddr = (user_addr_t)regs->cr2; | |
925 | rip = (user_addr_t)regs->eip; | |
1c79356b A |
926 | } |
927 | ||
6d2010ae A |
928 | if ((type == T_DEBUG) && thread->machine.ids) { |
929 | unsigned long clear = 0; | |
930 | /* Stash and clear this processor's DR6 value, in the event | |
931 | * this was a debug register match | |
932 | */ | |
933 | __asm__ volatile ("mov %%db6, %0" : "=r" (dr6)); | |
934 | __asm__ volatile ("mov %0, %%db6" : : "r" (clear)); | |
935 | } | |
936 | ||
937 | pal_sti(); | |
938 | ||
316670eb | 939 | KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, |
b0d623f7 A |
940 | (MACHDBG_CODE(DBG_MACH_EXCP_UTRAP_x86, type)) | DBG_FUNC_NONE, |
941 | (unsigned)(vaddr>>32), (unsigned)vaddr, | |
942 | (unsigned)(rip>>32), (unsigned)rip, 0); | |
0c530ab8 | 943 | |
1c79356b A |
944 | code = 0; |
945 | subcode = 0; | |
91447636 | 946 | exc = 0; |
1c79356b | 947 | |
0c530ab8 A |
948 | #if DEBUG_TRACE |
949 | kprintf("user_trap(0x%08x) type=%d vaddr=0x%016llx\n", | |
950 | saved_state, type, vaddr); | |
951 | #endif | |
6d2010ae A |
952 | |
953 | perfASTCallback astfn = perfASTHook; | |
954 | if (__improbable(astfn != NULL)) { | |
d41d1dae | 955 | myast = ast_pending(); |
0c530ab8 | 956 | if (*myast & AST_CHUD_ALL) { |
6d2010ae | 957 | astfn(AST_CHUD_ALL, myast); |
0c530ab8 | 958 | } |
0c530ab8 A |
959 | } |
960 | ||
961 | /* Is there a hook? */ | |
6d2010ae A |
962 | perfCallback fn = perfTrapHook; |
963 | if (__improbable(fn != NULL)) { | |
b0d623f7 | 964 | if (fn(type, saved_state, 0, 0) == KERN_SUCCESS) |
0c530ab8 A |
965 | return; /* If it succeeds, we are done... */ |
966 | } | |
967 | ||
fe8ab488 | 968 | #if CONFIG_DTRACE |
2d21ac55 A |
969 | /* |
970 | * DTrace does not consume all user traps, only INT_3's for now. | |
971 | * Avoid needlessly calling tempDTraceTrapHook here, and let the | |
972 | * INT_3 case handle them. | |
973 | */ | |
fe8ab488 A |
974 | #endif |
975 | ||
b0d623f7 A |
976 | DEBUG_KPRINT_SYSCALL_MASK(1, |
977 | "user_trap: type=0x%x(%s) err=0x%x cr2=%p rip=%p\n", | |
978 | type, trap_type[type], err, (void *)(long) vaddr, (void *)(long) rip); | |
6d2010ae | 979 | |
1c79356b A |
980 | switch (type) { |
981 | ||
982 | case T_DIVIDE_ERROR: | |
983 | exc = EXC_ARITHMETIC; | |
984 | code = EXC_I386_DIV; | |
985 | break; | |
986 | ||
987 | case T_DEBUG: | |
0c530ab8 A |
988 | { |
989 | pcb_t pcb; | |
0c530ab8 | 990 | /* |
6d2010ae A |
991 | * Update the PCB with this processor's DR6 value |
992 | * in the event this was a debug register match. | |
0c530ab8 | 993 | */ |
6d2010ae | 994 | pcb = THREAD_TO_PCB(thread); |
0c530ab8 A |
995 | if (pcb->ids) { |
996 | /* | |
997 | * We can get and set the status register | |
998 | * in 32-bit mode even on a 64-bit thread | |
999 | * because the high order bits are not | |
1000 | * used on x86_64 | |
1001 | */ | |
1002 | if (thread_is_64bit(thread)) { | |
0c530ab8 | 1003 | x86_debug_state64_t *ids = pcb->ids; |
6d2010ae | 1004 | ids->dr6 = dr6; |
0c530ab8 A |
1005 | } else { /* 32 bit thread */ |
1006 | x86_debug_state32_t *ids = pcb->ids; | |
6d2010ae | 1007 | ids->dr6 = (uint32_t) dr6; |
0c530ab8 | 1008 | } |
0c530ab8 A |
1009 | } |
1010 | exc = EXC_BREAKPOINT; | |
1011 | code = EXC_I386_SGL; | |
1012 | break; | |
1013 | } | |
1c79356b | 1014 | case T_INT3: |
2d21ac55 A |
1015 | #if CONFIG_DTRACE |
1016 | if (dtrace_user_probe(saved_state) == KERN_SUCCESS) | |
1017 | return; /* If it succeeds, we are done... */ | |
1018 | #endif | |
1c79356b A |
1019 | exc = EXC_BREAKPOINT; |
1020 | code = EXC_I386_BPT; | |
1021 | break; | |
1022 | ||
1023 | case T_OVERFLOW: | |
1024 | exc = EXC_ARITHMETIC; | |
1025 | code = EXC_I386_INTO; | |
1026 | break; | |
1027 | ||
1028 | case T_OUT_OF_BOUNDS: | |
1029 | exc = EXC_SOFTWARE; | |
1030 | code = EXC_I386_BOUND; | |
1031 | break; | |
1032 | ||
1033 | case T_INVALID_OPCODE: | |
1034 | exc = EXC_BAD_INSTRUCTION; | |
1035 | code = EXC_I386_INVOP; | |
1036 | break; | |
1037 | ||
1038 | case T_NO_FPU: | |
1c79356b A |
1039 | fpnoextflt(); |
1040 | return; | |
1041 | ||
1042 | case T_FPU_FAULT: | |
2d21ac55 | 1043 | fpextovrflt(); /* Propagates exception directly, doesn't return */ |
1c79356b A |
1044 | return; |
1045 | ||
2d21ac55 | 1046 | case T_INVALID_TSS: /* invalid TSS == iret with NT flag set */ |
1c79356b A |
1047 | exc = EXC_BAD_INSTRUCTION; |
1048 | code = EXC_I386_INVTSSFLT; | |
0c530ab8 | 1049 | subcode = err; |
1c79356b A |
1050 | break; |
1051 | ||
1052 | case T_SEGMENT_NOT_PRESENT: | |
1053 | exc = EXC_BAD_INSTRUCTION; | |
1054 | code = EXC_I386_SEGNPFLT; | |
0c530ab8 | 1055 | subcode = err; |
1c79356b A |
1056 | break; |
1057 | ||
1058 | case T_STACK_FAULT: | |
1059 | exc = EXC_BAD_INSTRUCTION; | |
1060 | code = EXC_I386_STKFLT; | |
0c530ab8 | 1061 | subcode = err; |
1c79356b A |
1062 | break; |
1063 | ||
1064 | case T_GENERAL_PROTECTION: | |
2d21ac55 A |
1065 | /* |
1066 | * There's a wide range of circumstances which generate this | |
1067 | * class of exception. From user-space, many involve bad | |
1068 | * addresses (such as a non-canonical 64-bit address). | |
1069 | * So we map this to EXC_BAD_ACCESS (and thereby SIGSEGV). | |
1070 | * The trouble is cr2 doesn't contain the faulting address; | |
1071 | * we'd need to decode the faulting instruction to really | |
1072 | * determine this. We'll leave that to debuggers. | |
1073 | * However, attempted execution of privileged instructions | |
1074 | * (e.g. cli) also generate GP faults and so we map these to | |
1075 | * to EXC_BAD_ACCESS (and thence SIGSEGV) also - rather than | |
1076 | * EXC_BAD_INSTRUCTION which is more accurate. We just can't | |
1077 | * win! | |
1078 | */ | |
1079 | exc = EXC_BAD_ACCESS; | |
1c79356b | 1080 | code = EXC_I386_GPFLT; |
0c530ab8 | 1081 | subcode = err; |
1c79356b A |
1082 | break; |
1083 | ||
1084 | case T_PAGE_FAULT: | |
db609669 | 1085 | { |
39236c6e | 1086 | prot = VM_PROT_READ; |
0c530ab8 A |
1087 | |
1088 | if (err & T_PF_WRITE) | |
1089 | prot |= VM_PROT_WRITE; | |
6d2010ae | 1090 | if (__improbable(err & T_PF_EXECUTE)) |
0c530ab8 | 1091 | prot |= VM_PROT_EXECUTE; |
39236c6e | 1092 | kret = vm_fault(thread->map, |
39037602 | 1093 | vaddr, |
39236c6e A |
1094 | prot, FALSE, |
1095 | THREAD_ABORTSAFE, NULL, 0); | |
0c530ab8 | 1096 | |
db609669 A |
1097 | if (__probable((kret == KERN_SUCCESS) || (kret == KERN_ABORTED))) { |
1098 | thread_exception_return(); | |
39236c6e | 1099 | /*NOTREACHED*/ |
db609669 A |
1100 | } |
1101 | ||
1102 | user_page_fault_continue(kret); | |
1103 | } /* NOTREACHED */ | |
1c79356b A |
1104 | break; |
1105 | ||
0c530ab8 | 1106 | case T_SSE_FLOAT_ERROR: |
2d21ac55 | 1107 | fpSSEexterrflt(); /* Propagates exception directly, doesn't return */ |
0c530ab8 A |
1108 | return; |
1109 | ||
1110 | ||
1c79356b | 1111 | case T_FLOATING_POINT_ERROR: |
2d21ac55 | 1112 | fpexterrflt(); /* Propagates exception directly, doesn't return */ |
1c79356b A |
1113 | return; |
1114 | ||
2d21ac55 A |
1115 | case T_DTRACE_RET: |
1116 | #if CONFIG_DTRACE | |
1117 | if (dtrace_user_probe(saved_state) == KERN_SUCCESS) | |
1118 | return; /* If it succeeds, we are done... */ | |
1119 | #endif | |
1120 | /* | |
1121 | * If we get an INT 0x7f when we do not expect to, | |
1122 | * treat it as an illegal instruction | |
1123 | */ | |
1124 | exc = EXC_BAD_INSTRUCTION; | |
1125 | code = EXC_I386_INVOP; | |
1126 | break; | |
1127 | ||
1c79356b | 1128 | default: |
2d21ac55 | 1129 | panic("Unexpected user trap, type %d", type); |
1c79356b A |
1130 | return; |
1131 | } | |
2d21ac55 A |
1132 | /* Note: Codepaths that directly return from user_trap() have pending |
1133 | * ASTs processed in locore | |
1134 | */ | |
1c79356b | 1135 | i386_exception(exc, code, subcode); |
2d21ac55 | 1136 | /* NOTREACHED */ |
1c79356b A |
1137 | } |
1138 | ||
1c79356b A |
1139 | |
1140 | /* | |
1141 | * Handle AST traps for i386. | |
1c79356b A |
1142 | */ |
1143 | ||
1144 | extern void log_thread_action (thread_t, char *); | |
1145 | ||
1146 | void | |
1147 | i386_astintr(int preemption) | |
1148 | { | |
0c530ab8 | 1149 | ast_t mask = AST_ALL; |
1c79356b | 1150 | spl_t s; |
1c79356b | 1151 | |
0c530ab8 A |
1152 | if (preemption) |
1153 | mask = AST_PREEMPTION; | |
1154 | ||
1155 | s = splsched(); | |
1c79356b | 1156 | |
91447636 A |
1157 | ast_taken(mask, s); |
1158 | ||
0c530ab8 | 1159 | splx(s); |
1c79356b A |
1160 | } |
1161 | ||
1162 | /* | |
1163 | * Handle exceptions for i386. | |
1164 | * | |
1165 | * If we are an AT bus machine, we must turn off the AST for a | |
1166 | * delayed floating-point exception. | |
1167 | * | |
1168 | * If we are providing floating-point emulation, we may have | |
1169 | * to retrieve the real register values from the floating point | |
1170 | * emulator. | |
1171 | */ | |
1172 | void | |
1173 | i386_exception( | |
1174 | int exc, | |
2d21ac55 A |
1175 | mach_exception_code_t code, |
1176 | mach_exception_subcode_t subcode) | |
1c79356b | 1177 | { |
2d21ac55 | 1178 | mach_exception_data_type_t codes[EXCEPTION_CODE_MAX]; |
1c79356b | 1179 | |
b0d623f7 A |
1180 | DEBUG_KPRINT_SYSCALL_MACH("i386_exception: exc=%d code=0x%llx subcode=0x%llx\n", |
1181 | exc, code, subcode); | |
1c79356b A |
1182 | codes[0] = code; /* new exception interface */ |
1183 | codes[1] = subcode; | |
91447636 | 1184 | exception_triage(exc, codes, 2); |
1c79356b A |
1185 | /*NOTREACHED*/ |
1186 | } | |
1187 | ||
0c530ab8 | 1188 | |
fe8ab488 A |
1189 | /* Synchronize a thread's x86_kernel_state (if any) with the given |
1190 | * x86_saved_state_t obtained from the trap/IPI handler; called in | |
0c530ab8 | 1191 | * kernel_trap() prior to entering the debugger, and when receiving |
fe8ab488 A |
1192 | * an "MP_KDP" IPI. Called with null saved_state if an incoming IPI |
1193 | * was detected from the kernel while spinning with interrupts masked. | |
0c530ab8 A |
1194 | */ |
1195 | ||
1196 | void | |
b0d623f7 | 1197 | sync_iss_to_iks(x86_saved_state_t *saved_state) |
0c530ab8 | 1198 | { |
b0d623f7 | 1199 | struct x86_kernel_state *iks; |
0c530ab8 A |
1200 | vm_offset_t kstack; |
1201 | boolean_t record_active_regs = FALSE; | |
1202 | ||
6d2010ae | 1203 | /* The PAL may have a special way to sync registers */ |
fe8ab488 | 1204 | if (saved_state && saved_state->flavor == THREAD_STATE_NONE) |
6d2010ae A |
1205 | pal_get_kern_regs( saved_state ); |
1206 | ||
0c530ab8 | 1207 | if ((kstack = current_thread()->kernel_stack) != 0) { |
b0d623f7 | 1208 | x86_saved_state64_t *regs = saved_state64(saved_state); |
0c530ab8 A |
1209 | |
1210 | iks = STACK_IKS(kstack); | |
1211 | ||
6d2010ae | 1212 | /* Did we take the trap/interrupt in kernel mode? */ |
fe8ab488 A |
1213 | if (saved_state == NULL || /* NULL => polling in kernel */ |
1214 | regs == USER_REGS64(current_thread())) | |
b0d623f7 A |
1215 | record_active_regs = TRUE; |
1216 | else { | |
1217 | iks->k_rbx = regs->rbx; | |
1218 | iks->k_rsp = regs->isf.rsp; | |
1219 | iks->k_rbp = regs->rbp; | |
1220 | iks->k_r12 = regs->r12; | |
1221 | iks->k_r13 = regs->r13; | |
1222 | iks->k_r14 = regs->r14; | |
1223 | iks->k_r15 = regs->r15; | |
1224 | iks->k_rip = regs->isf.rip; | |
1225 | } | |
0c530ab8 A |
1226 | } |
1227 | ||
1228 | if (record_active_regs == TRUE) { | |
b0d623f7 A |
1229 | /* Show the trap handler path */ |
1230 | __asm__ volatile("movq %%rbx, %0" : "=m" (iks->k_rbx)); | |
1231 | __asm__ volatile("movq %%rsp, %0" : "=m" (iks->k_rsp)); | |
1232 | __asm__ volatile("movq %%rbp, %0" : "=m" (iks->k_rbp)); | |
1233 | __asm__ volatile("movq %%r12, %0" : "=m" (iks->k_r12)); | |
1234 | __asm__ volatile("movq %%r13, %0" : "=m" (iks->k_r13)); | |
1235 | __asm__ volatile("movq %%r14, %0" : "=m" (iks->k_r14)); | |
1236 | __asm__ volatile("movq %%r15, %0" : "=m" (iks->k_r15)); | |
1237 | /* "Current" instruction pointer */ | |
1238 | __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" | |
1239 | : "=m" (iks->k_rip) | |
1240 | : | |
1241 | : "rax"); | |
0c530ab8 A |
1242 | } |
1243 | } | |
1244 | ||
1245 | /* | |
1246 | * This is used by the NMI interrupt handler (from mp.c) to | |
1247 | * uncondtionally sync the trap handler context to the IKS | |
1248 | * irrespective of whether the NMI was fielded in kernel | |
1249 | * or user space. | |
1250 | */ | |
1251 | void | |
2d21ac55 | 1252 | sync_iss_to_iks_unconditionally(__unused x86_saved_state_t *saved_state) { |
b0d623f7 | 1253 | struct x86_kernel_state *iks; |
0c530ab8 | 1254 | vm_offset_t kstack; |
0c530ab8 A |
1255 | |
1256 | if ((kstack = current_thread()->kernel_stack) != 0) { | |
0c530ab8 | 1257 | iks = STACK_IKS(kstack); |
b0d623f7 A |
1258 | /* Display the trap handler path */ |
1259 | __asm__ volatile("movq %%rbx, %0" : "=m" (iks->k_rbx)); | |
1260 | __asm__ volatile("movq %%rsp, %0" : "=m" (iks->k_rsp)); | |
1261 | __asm__ volatile("movq %%rbp, %0" : "=m" (iks->k_rbp)); | |
1262 | __asm__ volatile("movq %%r12, %0" : "=m" (iks->k_r12)); | |
1263 | __asm__ volatile("movq %%r13, %0" : "=m" (iks->k_r13)); | |
1264 | __asm__ volatile("movq %%r14, %0" : "=m" (iks->k_r14)); | |
1265 | __asm__ volatile("movq %%r15, %0" : "=m" (iks->k_r15)); | |
1266 | /* "Current" instruction pointer */ | |
1267 | __asm__ volatile("leaq 1f(%%rip), %%rax; mov %%rax, %0\n1:" : "=m" (iks->k_rip)::"rax"); | |
0c530ab8 A |
1268 | } |
1269 | } |