]>
Commit | Line | Data |
---|---|---|
b0d623f7 | 1 | /* |
6d2010ae | 2 | * Copyright (c) 2010 Apple Inc. All rights reserved. |
b0d623f7 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <i386/asm.h> | |
29 | #include <assym.s> | |
39236c6e | 30 | #include <debug.h> |
b0d623f7 | 31 | #include <i386/eflags.h> |
6d2010ae | 32 | #include <i386/rtclock_asm.h> |
b0d623f7 A |
33 | #include <i386/trap.h> |
34 | #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */ | |
35 | #include <mach/i386/syscall_sw.h> | |
36 | #include <i386/postcode.h> | |
37 | #include <i386/proc_reg.h> | |
38 | #include <mach/exception_types.h> | |
39 | ||
40 | #if DEBUG | |
41 | #define DEBUG_IDT64 1 | |
42 | #endif | |
43 | ||
44 | /* | |
45 | * This is the low-level trap and interrupt handling code associated with | |
46 | * the IDT. It also includes system call handlers for sysenter/syscall. | |
47 | * The IDT itself is defined in mp_desc.c. | |
48 | * | |
49 | * Code here is structured as follows: | |
50 | * | |
51 | * stubs Code called directly from an IDT vector. | |
52 | * All entry points have the "idt64_" prefix and they are built | |
53 | * using macros expanded by the inclusion of idt_table.h. | |
54 | * This code performs vector-dependent identification and jumps | |
55 | * into the dispatch code. | |
56 | * | |
57 | * dispatch The dispatch code is responsible for saving the thread state | |
58 | * (which is either 64-bit or 32-bit) and then jumping to the | |
59 | * class handler identified by the stub. | |
60 | * | |
61 | * returns Code to restore state and return to the previous context. | |
62 | * | |
63 | * handlers There are several classes of handlers: | |
64 | * interrupt - asynchronous events typically from external devices | |
65 | * trap - synchronous events due to thread execution | |
66 | * syscall - synchronous system call request | |
67 | * fatal - fatal traps | |
68 | */ | |
b0d623f7 | 69 | /* |
5c9f4661 | 70 | * Indices of handlers for each exception type. |
b0d623f7 | 71 | */ |
5c9f4661 A |
72 | #define HNDL_ALLINTRS 0 |
73 | #define HNDL_ALLTRAPS 1 | |
74 | #define HNDL_SYSENTER 2 | |
75 | #define HNDL_SYSCALL 3 | |
76 | #define HNDL_UNIX_SCALL 4 | |
77 | #define HNDL_MACH_SCALL 5 | |
78 | #define HNDL_MDEP_SCALL 6 | |
79 | #define HNDL_DOUBLE_FAULT 7 | |
80 | #define HNDL_MACHINE_CHECK 8 | |
81 | ||
82 | /* Begin double-mapped descriptor section */ | |
83 | ||
84 | .section __HIB, __desc | |
85 | .globl EXT(idt64_hndl_table0) | |
86 | EXT(idt64_hndl_table0): | |
d9a64523 A |
87 | /* 0x00 */ .quad EXT(ks_dispatch) |
88 | /* 0x08 */ .quad EXT(ks_64bit_return) | |
89 | /* 0x10 */ .quad 0 /* Populated with CPU shadow displacement*/ | |
90 | /* 0x18 */ .quad EXT(ks_return) | |
91 | #define TBL0_OFF_DISP_USER_WITH_POPRAX 0x20 | |
92 | /* 0x20 */ .quad EXT(ks_dispatch_user_with_pop_rax) | |
93 | #define TBL0_OFF_DISP_KERN_WITH_POPRAX 0x28 | |
94 | /* 0x28 */ .quad EXT(ks_dispatch_kernel_with_pop_rax) | |
95 | #define TBL0_OFF_PTR_KERNEL_STACK_MASK 0x30 | |
96 | /* 0x30 */ .quad 0 /* &kernel_stack_mask */ | |
5c9f4661 A |
97 | |
98 | EXT(idt64_hndl_table1): | |
99 | .quad EXT(hndl_allintrs) | |
100 | .quad EXT(hndl_alltraps) | |
101 | .quad EXT(hndl_sysenter) | |
102 | .quad EXT(hndl_syscall) | |
103 | .quad EXT(hndl_unix_scall) | |
104 | .quad EXT(hndl_mach_scall) | |
105 | .quad EXT(hndl_mdep_scall) | |
106 | .quad EXT(hndl_double_fault) | |
107 | .quad EXT(hndl_machine_check) | |
108 | .text | |
109 | ||
b0d623f7 A |
110 | |
111 | /* The wrapper for all non-special traps/interrupts */ | |
112 | /* Everything up to PUSH_FUNCTION is just to output | |
113 | * the interrupt number out to the postcode display | |
114 | */ | |
115 | #if DEBUG_IDT64 | |
116 | #define IDT_ENTRY_WRAPPER(n, f) \ | |
117 | push %rax ;\ | |
118 | POSTCODE2(0x6400+n) ;\ | |
119 | pop %rax ;\ | |
5c9f4661 | 120 | pushq $(f) ;\ |
b0d623f7 A |
121 | pushq $(n) ;\ |
122 | jmp L_dispatch | |
123 | #else | |
124 | #define IDT_ENTRY_WRAPPER(n, f) \ | |
5c9f4661 | 125 | pushq $(f) ;\ |
b0d623f7 A |
126 | pushq $(n) ;\ |
127 | jmp L_dispatch | |
128 | #endif | |
129 | ||
130 | /* A trap that comes with an error code already on the stack */ | |
131 | #define TRAP_ERR(n, f) \ | |
132 | Entry(f) ;\ | |
133 | IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS) | |
134 | ||
135 | /* A normal trap */ | |
136 | #define TRAP(n, f) \ | |
137 | Entry(f) ;\ | |
138 | pushq $0 ;\ | |
139 | IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS) | |
140 | ||
141 | #define USER_TRAP TRAP | |
142 | ||
143 | /* An interrupt */ | |
144 | #define INTERRUPT(n) \ | |
145 | Entry(_intr_ ## n) ;\ | |
146 | pushq $0 ;\ | |
147 | IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS) | |
148 | ||
149 | /* A trap with a special-case handler, hence we don't need to define anything */ | |
150 | #define TRAP_SPC(n, f) | |
39236c6e A |
151 | #define TRAP_IST1(n, f) |
152 | #define TRAP_IST2(n, f) | |
b0d623f7 A |
153 | #define USER_TRAP_SPC(n, f) |
154 | ||
5c9f4661 A |
155 | /* Begin double-mapped text section */ |
156 | .section __HIB, __text | |
b0d623f7 A |
157 | /* Generate all the stubs */ |
158 | #include "idt_table.h" | |
159 | ||
5c9f4661 A |
160 | Entry(idt64_page_fault) |
161 | pushq $(HNDL_ALLTRAPS) | |
162 | push $(T_PAGE_FAULT) | |
163 | jmp L_dispatch | |
164 | ||
9d749ea3 A |
165 | /* |
166 | * #DB handler, which runs on IST1, will treat as spurious any #DB received while executing in the | |
167 | * kernel while not on the kernel's gsbase. | |
168 | */ | |
5c9f4661 | 169 | Entry(idt64_debug) |
9d749ea3 | 170 | /* Synthesize common interrupt stack frame */ |
5c9f4661 A |
171 | push $0 /* error code */ |
172 | pushq $(HNDL_ALLTRAPS) | |
173 | pushq $(T_DEBUG) | |
9d749ea3 A |
174 | /* Spill prior to RDMSR */ |
175 | push %rax | |
176 | push %rcx | |
177 | push %rdx | |
178 | mov $(MSR_IA32_GS_BASE), %ecx | |
179 | rdmsr /* Check contents of GSBASE MSR */ | |
180 | test $0x80000000, %edx /* MSB set? Already swapped to kernel's */ | |
181 | jnz 1f | |
182 | ||
183 | /* | |
184 | * If we're not already swapped to the kernel's gsbase AND this #DB originated from kernel space, | |
185 | * it must have happened within the very small window on entry or exit before or after (respectively) | |
186 | * swapgs occurred. In those cases, consider the #DB spurious and immediately return. | |
187 | */ | |
188 | testb $3, 8+8+8+ISF64_CS(%rsp) | |
189 | jnz 2f | |
190 | pop %rdx | |
191 | pop %rcx | |
192 | pop %rax | |
193 | addq $0x18, %rsp /* Remove synthesized interrupt stack frame */ | |
194 | jmp EXT(ret64_iret) | |
195 | 2: | |
196 | swapgs /* direct from user */ | |
197 | 1: | |
198 | pop %rdx | |
199 | ||
200 | leaq EXT(idt64_hndl_table0)(%rip), %rax | |
201 | mov 16(%rax), %rax /* Offset of per-CPU shadow */ | |
202 | mov %gs:CPU_TASK_CR3(%rax), %rax | |
203 | mov %rax, %cr3 | |
204 | ||
205 | pop %rcx | |
206 | ||
207 | /* Note that %rax will be popped from the stack in ks_dispatch, below */ | |
208 | ||
209 | leaq EXT(idt64_hndl_table0)(%rip), %rax | |
210 | jmp *(%rax) | |
211 | ||
5c9f4661 A |
212 | /* |
213 | * Legacy interrupt gate System call handlers. | |
214 | * These are entered via a syscall interrupt. The system call number in %rax | |
215 | * is saved to the error code slot in the stack frame. We then branch to the | |
216 | * common state saving code. | |
217 | */ | |
218 | ||
219 | #ifndef UNIX_INT | |
220 | #error NO UNIX INT!!! | |
221 | #endif | |
222 | Entry(idt64_unix_scall) | |
223 | pushq %rax /* save system call number */ | |
224 | pushq $(HNDL_UNIX_SCALL) | |
225 | pushq $(UNIX_INT) | |
d9a64523 | 226 | jmp L_u64bit_entry_check |
5c9f4661 A |
227 | |
228 | Entry(idt64_mach_scall) | |
229 | pushq %rax /* save system call number */ | |
230 | pushq $(HNDL_MACH_SCALL) | |
231 | pushq $(MACH_INT) | |
d9a64523 | 232 | jmp L_u64bit_entry_check |
5c9f4661 A |
233 | |
234 | Entry(idt64_mdep_scall) | |
235 | pushq %rax /* save system call number */ | |
236 | pushq $(HNDL_MDEP_SCALL) | |
237 | pushq $(MACHDEP_INT) | |
d9a64523 | 238 | jmp L_u64bit_entry_check |
5c9f4661 A |
239 | |
240 | /* | |
241 | * For GP/NP/SS faults, we use the IST1 stack. | |
242 | * For faults from user-space, we have to copy the machine state to the | |
243 | * PCB stack and then dispatch as normal. | |
244 | * For faults in kernel-space, we need to scrub for kernel exit faults and | |
245 | * treat these as user-space faults. But for all other kernel-space faults | |
246 | * we continue to run on the IST1 stack and we dispatch to handle the fault | |
247 | * as fatal. | |
248 | */ | |
249 | Entry(idt64_gen_prot) | |
250 | pushq $(HNDL_ALLTRAPS) | |
251 | pushq $(T_GENERAL_PROTECTION) | |
252 | jmp L_dispatch | |
253 | ||
254 | Entry(idt64_stack_fault) | |
255 | pushq $(HNDL_ALLTRAPS) | |
256 | pushq $(T_STACK_FAULT) | |
257 | jmp L_dispatch | |
258 | ||
259 | Entry(idt64_segnp) | |
260 | pushq $(HNDL_ALLTRAPS) | |
261 | pushq $(T_SEGMENT_NOT_PRESENT) | |
262 | jmp L_dispatch | |
263 | ||
264 | /* | |
265 | * Fatal exception handlers: | |
266 | */ | |
267 | Entry(idt64_db_task_dbl_fault) | |
268 | pushq $(HNDL_DOUBLE_FAULT) | |
269 | pushq $(T_DOUBLE_FAULT) | |
270 | jmp L_dispatch | |
271 | ||
272 | Entry(idt64_db_task_stk_fault) | |
273 | pushq $(HNDL_DOUBLE_FAULT) | |
274 | pushq $(T_STACK_FAULT) | |
275 | jmp L_dispatch | |
276 | ||
277 | Entry(idt64_mc) | |
278 | push $(0) /* Error */ | |
279 | pushq $(HNDL_MACHINE_CHECK) | |
280 | pushq $(T_MACHINE_CHECK) | |
281 | jmp L_dispatch | |
282 | ||
283 | /* | |
284 | * NMI | |
285 | * This may or may not be fatal but extreme care is required | |
286 | * because it may fall when control was already in another trampoline. | |
287 | * | |
a39ff7e2 A |
288 | * We get here on IST2 stack which is used exclusively for NMIs. |
289 | * Machine checks, doublefaults and similar use IST1 | |
5c9f4661 A |
290 | */ |
291 | Entry(idt64_nmi) | |
a39ff7e2 A |
292 | push %rax |
293 | push %rcx | |
294 | push %rdx | |
d9a64523 A |
295 | testb $3, ISF64_CS(%rsp) |
296 | jz 1f | |
297 | ||
298 | /* From user-space: copy interrupt state to user PCB */ | |
299 | swapgs | |
300 | ||
301 | leaq EXT(idt64_hndl_table0)(%rip), %rax | |
302 | mov 16(%rax), %rax /* Offset of per-CPU shadow */ | |
303 | mov %gs:CPU_TASK_CR3(%rax), %rax | |
304 | mov %rax, %cr3 /* note that SMAP is enabled in L_common_dispatch (on Broadwell+) */ | |
305 | ||
306 | mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */ | |
307 | add $(ISF64_SIZE), %rcx /* adjust to base of ISF */ | |
308 | ||
309 | leaq TBL0_OFF_DISP_USER_WITH_POPRAX+EXT(idt64_hndl_table0)(%rip), %rax /* ks_dispatch_user_with_pop_rax */ | |
310 | jmp 4f /* Copy state to PCB */ | |
311 | ||
312 | 1: | |
313 | /* | |
314 | * From kernel-space: | |
315 | * Determine whether the kernel or user GS is set. | |
316 | * Sets the high 32 bits of the return CS to 1 to ensure that we'll swapgs back correctly at IRET. | |
317 | */ | |
a39ff7e2 | 318 | mov $(MSR_IA32_GS_BASE), %ecx |
d9a64523 A |
319 | rdmsr /* read kernel gsbase */ |
320 | test $0x80000000, %edx /* test MSB of address */ | |
321 | jnz 2f | |
322 | swapgs /* so swap */ | |
323 | movl $1, ISF64_CS+4(%rsp) /* and set flag in CS slot */ | |
324 | 2: | |
a39ff7e2 A |
325 | |
326 | leaq EXT(idt64_hndl_table0)(%rip), %rax | |
327 | mov 16(%rax), %rax /* Offset of per-CPU shadow */ | |
d9a64523 A |
328 | mov %cr3, %rdx |
329 | mov %gs:CPU_TASK_CR3(%rax), %rax | |
a39ff7e2 | 330 | mov %rax, %cr3 /* Unconditionally switch to primary kernel pagetables */ |
d9a64523 A |
331 | |
332 | /* | |
333 | * Determine whether we're on the kernel or interrupt stack | |
334 | * when the NMI hit. | |
335 | */ | |
336 | mov ISF64_RSP(%rsp), %rcx | |
337 | mov %gs:CPU_KERNEL_STACK, %rax | |
338 | xor %rcx, %rax | |
339 | movq TBL0_OFF_PTR_KERNEL_STACK_MASK+EXT(idt64_hndl_table0)(%rip), %rdx | |
340 | mov (%rdx), %rdx /* Load kernel_stack_mask */ | |
341 | and %rdx, %rax | |
342 | test %rax, %rax /* are we on the kernel stack? */ | |
343 | jz 3f /* yes */ | |
344 | ||
345 | mov %gs:CPU_INT_STACK_TOP, %rax | |
346 | cmp %rcx, %rax /* are we on the interrupt stack? */ | |
347 | jb 5f /* no */ | |
348 | leaq -INTSTACK_SIZE(%rax), %rax | |
349 | cmp %rcx, %rax | |
350 | jb 3f /* yes */ | |
351 | 5: | |
352 | mov %gs:CPU_KERNEL_STACK, %rcx | |
353 | 3: | |
354 | /* 16-byte-align kernel/interrupt stack for state push */ | |
355 | and $0xFFFFFFFFFFFFFFF0, %rcx | |
356 | ||
357 | leaq TBL0_OFF_DISP_KERN_WITH_POPRAX+EXT(idt64_hndl_table0)(%rip), %rax /* ks_dispatch_kernel_with_pop_rax */ | |
358 | 4: | |
359 | /* | |
360 | * Copy state from NMI stack (RSP) to the save area (RCX) which is | |
361 | * the PCB for user or kernel/interrupt stack from kernel. | |
362 | * ISF64_ERR(RSP) saved RAX | |
363 | * ISF64_TRAPFN(RSP) saved RCX | |
364 | * ISF64_TRAPNO(RSP) saved RDX | |
365 | */ | |
366 | xchg %rsp, %rcx /* set for pushes */ | |
367 | push ISF64_SS(%rcx) | |
368 | push ISF64_RSP(%rcx) | |
369 | push ISF64_RFLAGS(%rcx) | |
370 | push ISF64_CS(%rcx) | |
371 | push ISF64_RIP(%rcx) | |
372 | /* Synthesize common interrupt stack frame */ | |
373 | push $(0) /* error code 0 */ | |
374 | push $(HNDL_ALLINTRS) /* trapfn allintrs */ | |
375 | push $(T_NMI) /* trapno T_NMI */ | |
376 | push ISF64_ERR(%rcx) /* saved %rax is popped in ks_dispatch_{kernel|user}_with_pop_rax */ | |
377 | mov ISF64_TRAPNO(%rcx), %rdx | |
378 | mov ISF64_TRAPFN(%rcx), %rcx | |
379 | ||
380 | jmp *(%rax) /* ks_dispatch_{kernel|user}_with_pop_rax */ | |
5c9f4661 A |
381 | |
382 | Entry(idt64_double_fault) | |
383 | pushq $(HNDL_DOUBLE_FAULT) | |
384 | pushq $(T_DOUBLE_FAULT) | |
385 | jmp L_dispatch | |
386 | ||
387 | Entry(hi64_syscall) | |
388 | Entry(idt64_syscall) | |
389 | swapgs | |
390 | /* Use RAX as a temporary by shifting its contents into R11[32:63] | |
391 | * The systemcall number is defined to be a 32-bit quantity, as is | |
392 | * RFLAGS. | |
393 | */ | |
394 | shlq $32, %rax | |
395 | or %rax, %r11 | |
396 | .globl EXT(dblsyscall_patch_point) | |
397 | EXT(dblsyscall_patch_point): | |
398 | // movabsq $0x12345678ABCDEFFFULL, %rax | |
399 | /* Generate offset to the double-mapped per-CPU data shadow | |
400 | * into RAX | |
401 | */ | |
402 | leaq EXT(idt64_hndl_table0)(%rip), %rax | |
403 | mov 16(%rax), %rax | |
404 | mov %rsp, %gs:CPU_UBER_TMP(%rax) /* save user stack */ | |
405 | mov %gs:CPU_ESTACK(%rax), %rsp /* switch stack to per-cpu estack */ | |
406 | sub $(ISF64_SIZE), %rsp | |
407 | ||
408 | /* | |
409 | * Synthesize an ISF frame on the exception stack | |
410 | */ | |
411 | movl $(USER_DS), ISF64_SS(%rsp) | |
412 | mov %rcx, ISF64_RIP(%rsp) /* rip */ | |
413 | ||
414 | mov %gs:CPU_UBER_TMP(%rax), %rcx | |
415 | mov %rcx, ISF64_RSP(%rsp) /* user stack --changed */ | |
416 | ||
417 | mov %r11, %rax | |
418 | shrq $32, %rax /* Restore RAX */ | |
419 | mov %r11d, %r11d /* Clear r11[32:63] */ | |
420 | ||
421 | mov %r11, ISF64_RFLAGS(%rsp) /* rflags */ | |
422 | movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */ | |
423 | mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */ | |
424 | movq $(HNDL_SYSCALL), ISF64_TRAPFN(%rsp) | |
425 | movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */ | |
426 | swapgs | |
427 | jmp L_dispatch /* this can only be 64-bit */ | |
428 | ||
429 | Entry(hi64_sysenter) | |
430 | Entry(idt64_sysenter) | |
431 | /* Synthesize an interrupt stack frame onto the | |
432 | * exception stack. | |
433 | */ | |
434 | push $(USER_DS) /* ss */ | |
435 | push %rcx /* uesp */ | |
436 | pushf /* flags */ | |
437 | /* | |
438 | * Clear, among others, the Nested Task (NT) flags bit; | |
439 | * this is zeroed by INT, but not by SYSENTER. | |
440 | */ | |
441 | push $0 | |
442 | popf | |
443 | push $(SYSENTER_CS) /* cs */ | |
444 | L_sysenter_continue: | |
445 | push %rdx /* eip */ | |
446 | push %rax /* err/eax - syscall code */ | |
447 | pushq $(HNDL_SYSENTER) | |
448 | pushq $(T_SYSENTER) | |
449 | orl $(EFL_IF), ISF64_RFLAGS(%rsp) | |
d9a64523 | 450 | jmp L_u64bit_entry_check |
5c9f4661 | 451 | |
b0d623f7 A |
452 | /* |
453 | * Common dispatch point. | |
454 | * Determine what mode has been interrupted and save state accordingly. | |
39236c6e A |
455 | * Here with: |
456 | * rsp from user-space: interrupt state in PCB, or | |
457 | * from kernel-space: interrupt state in kernel or interrupt stack | |
458 | * GSBASE from user-space: pthread area, or | |
459 | * from kernel-space: cpu_data | |
b0d623f7 | 460 | */ |
5c9f4661 | 461 | |
b0d623f7 | 462 | L_dispatch: |
5c9f4661 A |
463 | pushq %rax |
464 | testb $3, 8+ISF64_CS(%rsp) | |
465 | jz 1f | |
466 | swapgs | |
467 | leaq EXT(idt64_hndl_table0)(%rip), %rax | |
468 | mov 16(%rax), %rax | |
d9a64523 | 469 | L_dispatch_kgsb: |
5c9f4661 A |
470 | mov %gs:CPU_TASK_CR3(%rax), %rax |
471 | mov %rax, %cr3 | |
472 | #if DEBUG | |
473 | mov %rax, %gs:CPU_ENTRY_CR3 | |
474 | #endif | |
475 | 1: | |
5c9f4661 | 476 | leaq EXT(idt64_hndl_table0)(%rip), %rax |
d9a64523 | 477 | /* The text/data relationship here must be preserved in the doublemap, and the contents must be remapped */ |
5c9f4661 A |
478 | /* Indirect branch to non-doublemapped trampolines */ |
479 | jmp *(%rax) | |
480 | /* User return: register restoration and address space switch sequence */ | |
481 | Entry(ks_64bit_return) | |
482 | mov R64_R14(%r15), %r14 | |
483 | mov R64_R13(%r15), %r13 | |
484 | mov R64_R12(%r15), %r12 | |
485 | mov R64_R11(%r15), %r11 | |
486 | mov R64_R10(%r15), %r10 | |
487 | mov R64_R9(%r15), %r9 | |
488 | mov R64_R8(%r15), %r8 | |
489 | mov R64_RSI(%r15), %rsi | |
490 | mov R64_RDI(%r15), %rdi | |
491 | mov R64_RBP(%r15), %rbp | |
492 | mov R64_RDX(%r15), %rdx | |
493 | mov R64_RCX(%r15), %rcx | |
494 | mov R64_RBX(%r15), %rbx | |
495 | mov R64_RAX(%r15), %rax | |
496 | /* Switch to per-CPU exception stack */ | |
497 | mov %gs:CPU_ESTACK, %rsp | |
498 | ||
499 | /* Synthesize interrupt stack frame from PCB savearea to exception stack */ | |
500 | push R64_SS(%r15) | |
501 | push R64_RSP(%r15) | |
502 | push R64_RFLAGS(%r15) | |
503 | push R64_CS(%r15) | |
504 | push R64_RIP(%r15) | |
505 | ||
506 | mov R64_R15(%r15), %r15 | |
507 | cmpq $(KERNEL64_CS), 8(%rsp) | |
508 | jz 1f | |
509 | /* Discover user cr3/ASID */ | |
510 | push %rax | |
511 | mov %gs:CPU_UCR3, %rax | |
512 | #if DEBUG | |
513 | mov %rax, %gs:CPU_EXIT_CR3 | |
514 | #endif | |
515 | mov %rax, %cr3 | |
516 | /* Continue execution on the shared/doublemapped trampoline */ | |
517 | pop %rax | |
518 | swapgs | |
519 | 1: | |
520 | cmpl $(SYSCALL_CS), 8(%rsp) /* test for exit via SYSRET */ | |
521 | je L_sysret | |
522 | EXT(ret64_iret): | |
523 | iretq /* return from interrupt */ | |
524 | L_sysret: | |
525 | /* | |
526 | * Here to restore rcx/r11/rsp and perform the sysret back to user-space. | |
527 | * rcx user rip | |
528 | * r11 user rflags | |
529 | * rsp user stack pointer | |
530 | */ | |
531 | pop %rcx | |
532 | add $8, %rsp | |
533 | pop %r11 | |
534 | pop %rsp | |
535 | sysretq /* return from system call */ | |
d9a64523 A |
536 | |
537 | L_u64bit_entry_check: | |
538 | /* | |
539 | * Check we're not a confused 64-bit user. | |
540 | */ | |
541 | pushq %rax | |
542 | swapgs | |
543 | leaq EXT(idt64_hndl_table0)(%rip), %rax | |
544 | mov 16(%rax), %rax | |
545 | ||
546 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP(%rax) | |
547 | jne L_64bit_entry_reject | |
548 | jmp L_dispatch_kgsb | |
549 | ||
550 | L_64bit_entry_reject: | |
551 | /* | |
552 | * Here for a 64-bit user attempting an invalid kernel entry. | |
553 | */ | |
554 | movq $(HNDL_ALLTRAPS), 8+ISF64_TRAPFN(%rsp) | |
555 | movq $(T_INVALID_OPCODE), 8+ISF64_TRAPNO(%rsp) | |
556 | jmp L_dispatch_kgsb | |
557 | ||
5c9f4661 A |
558 | /* End of double-mapped TEXT */ |
559 | .text | |
560 | ||
561 | Entry(ks_dispatch) | |
562 | popq %rax | |
6d2010ae | 563 | cmpl $(KERNEL64_CS), ISF64_CS(%rsp) |
5c9f4661 | 564 | je EXT(ks_dispatch_kernel) |
b0d623f7 | 565 | |
5c9f4661 A |
566 | mov %rax, %gs:CPU_UBER_TMP |
567 | mov %gs:CPU_UBER_ISF, %rax | |
568 | add $(ISF64_SIZE), %rax | |
569 | ||
570 | xchg %rsp, %rax | |
571 | /* Memory to memory moves (aint x86 wonderful): | |
572 | * Transfer the exception frame from the per-CPU exception stack to the | |
573 | * 'PCB' stack programmed at cswitch. | |
574 | */ | |
575 | push ISF64_SS(%rax) | |
576 | push ISF64_RSP(%rax) | |
577 | push ISF64_RFLAGS(%rax) | |
578 | push ISF64_CS(%rax) | |
579 | push ISF64_RIP(%rax) | |
580 | push ISF64_ERR(%rax) | |
581 | push ISF64_TRAPFN(%rax) | |
582 | push ISF64_TRAPNO(%rax) | |
583 | mov %gs:CPU_UBER_TMP, %rax | |
584 | jmp EXT(ks_dispatch_user) | |
b0d623f7 | 585 | |
d9a64523 A |
586 | Entry(ks_dispatch_user_with_pop_rax) |
587 | pop %rax | |
588 | jmp EXT(ks_dispatch_user) | |
589 | ||
5c9f4661 | 590 | Entry (ks_return) |
d9a64523 | 591 | jmp . |
5c9f4661 A |
592 | |
593 | Entry(ks_dispatch_user) | |
060df5ea | 594 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP |
39236c6e A |
595 | je L_dispatch_U32 /* 32-bit user task */ |
596 | ||
597 | L_dispatch_U64: | |
598 | subq $(ISS64_OFFSET), %rsp | |
599 | mov %r15, R64_R15(%rsp) | |
600 | mov %rsp, %r15 | |
601 | mov %gs:CPU_KERNEL_STACK, %rsp | |
602 | jmp L_dispatch_64bit | |
603 | ||
d9a64523 A |
604 | Entry(ks_dispatch_kernel_with_pop_rax) |
605 | pop %rax | |
606 | jmp EXT(ks_dispatch_kernel) | |
607 | ||
5c9f4661 | 608 | Entry(ks_dispatch_kernel) |
39236c6e A |
609 | subq $(ISS64_OFFSET), %rsp |
610 | mov %r15, R64_R15(%rsp) | |
611 | mov %rsp, %r15 | |
b0d623f7 A |
612 | |
613 | /* | |
614 | * Here for 64-bit user task or kernel | |
615 | */ | |
39236c6e A |
616 | L_dispatch_64bit: |
617 | movl $(SS_64), SS_FLAVOR(%r15) | |
b0d623f7 A |
618 | |
619 | /* | |
620 | * Save segment regs - for completeness since theyre not used. | |
621 | */ | |
d9a64523 A |
622 | mov %fs, R64_FS(%r15) |
623 | mov %gs, R64_GS(%r15) | |
b0d623f7 A |
624 | |
625 | /* Save general-purpose registers */ | |
39236c6e A |
626 | mov %rax, R64_RAX(%r15) |
627 | mov %rbx, R64_RBX(%r15) | |
628 | mov %rcx, R64_RCX(%r15) | |
629 | mov %rdx, R64_RDX(%r15) | |
630 | mov %rbp, R64_RBP(%r15) | |
631 | mov %rdi, R64_RDI(%r15) | |
632 | mov %rsi, R64_RSI(%r15) | |
633 | mov %r8, R64_R8(%r15) | |
634 | mov %r9, R64_R9(%r15) | |
635 | mov %r10, R64_R10(%r15) | |
636 | mov %r11, R64_R11(%r15) | |
637 | mov %r12, R64_R12(%r15) | |
638 | mov %r13, R64_R13(%r15) | |
639 | mov %r14, R64_R14(%r15) | |
b0d623f7 | 640 | |
a39ff7e2 A |
641 | /* Zero unused GPRs. BX/DX/SI are clobbered elsewhere across the exception handler, and are skipped. */ |
642 | xor %ecx, %ecx | |
643 | xor %edi, %edi | |
644 | xor %r8, %r8 | |
645 | xor %r9, %r9 | |
646 | xor %r10, %r10 | |
647 | xor %r11, %r11 | |
648 | xor %r12, %r12 | |
649 | xor %r13, %r13 | |
650 | xor %r14, %r14 | |
651 | ||
b0d623f7 A |
652 | /* cr2 is significant only for page-faults */ |
653 | mov %cr2, %rax | |
39236c6e | 654 | mov %rax, R64_CR2(%r15) |
b0d623f7 | 655 | |
39236c6e A |
656 | mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */ |
657 | mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */ | |
658 | mov R64_CS(%r15), %esi /* %esi := cs for later */ | |
b0d623f7 | 659 | |
39236c6e | 660 | jmp L_common_dispatch |
b0d623f7 | 661 | |
39236c6e A |
662 | L_dispatch_U32: /* 32-bit user task */ |
663 | subq $(ISS64_OFFSET), %rsp | |
664 | mov %rsp, %r15 | |
665 | mov %gs:CPU_KERNEL_STACK, %rsp | |
666 | movl $(SS_32), SS_FLAVOR(%r15) | |
b0d623f7 A |
667 | |
668 | /* | |
669 | * Save segment regs | |
670 | */ | |
d9a64523 A |
671 | mov %ds, R32_DS(%r15) |
672 | mov %es, R32_ES(%r15) | |
673 | mov %fs, R32_FS(%r15) | |
674 | mov %gs, R32_GS(%r15) | |
b0d623f7 A |
675 | |
676 | /* | |
677 | * Save general 32-bit registers | |
678 | */ | |
39236c6e A |
679 | mov %eax, R32_EAX(%r15) |
680 | mov %ebx, R32_EBX(%r15) | |
681 | mov %ecx, R32_ECX(%r15) | |
682 | mov %edx, R32_EDX(%r15) | |
683 | mov %ebp, R32_EBP(%r15) | |
684 | mov %esi, R32_ESI(%r15) | |
685 | mov %edi, R32_EDI(%r15) | |
b0d623f7 A |
686 | |
687 | /* Unconditionally save cr2; only meaningful on page faults */ | |
688 | mov %cr2, %rax | |
39236c6e | 689 | mov %eax, R32_CR2(%r15) |
a39ff7e2 A |
690 | /* Zero unused GPRs. BX/DX/SI/R15 are clobbered elsewhere across the exception handler, and are skipped. */ |
691 | xor %ecx, %ecx | |
692 | xor %edi, %edi | |
693 | xor %r8, %r8 | |
694 | xor %r9, %r9 | |
695 | xor %r10, %r10 | |
696 | xor %r11, %r11 | |
697 | xor %r12, %r12 | |
698 | xor %r13, %r13 | |
699 | xor %r14, %r14 | |
b0d623f7 A |
700 | |
701 | /* | |
702 | * Copy registers already saved in the machine state | |
703 | * (in the interrupt stack frame) into the compat save area. | |
704 | */ | |
39236c6e A |
705 | mov R64_RIP(%r15), %eax |
706 | mov %eax, R32_EIP(%r15) | |
707 | mov R64_RFLAGS(%r15), %eax | |
708 | mov %eax, R32_EFLAGS(%r15) | |
709 | mov R64_RSP(%r15), %eax | |
710 | mov %eax, R32_UESP(%r15) | |
711 | mov R64_SS(%r15), %eax | |
712 | mov %eax, R32_SS(%r15) | |
713 | L_dispatch_U32_after_fault: | |
714 | mov R64_CS(%r15), %esi /* %esi := %cs for later */ | |
715 | mov %esi, R32_CS(%r15) | |
716 | mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */ | |
717 | mov %ebx, R32_TRAPNO(%r15) | |
718 | mov R64_ERR(%r15), %eax | |
719 | mov %eax, R32_ERR(%r15) | |
720 | mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */ | |
b0d623f7 A |
721 | |
722 | L_common_dispatch: | |
fe8ab488 A |
723 | cld /* Ensure the direction flag is clear in the kernel */ |
724 | cmpl $0, EXT(pmap_smap_enabled)(%rip) | |
725 | je 1f | |
726 | clac /* Clear EFLAGS.AC if SMAP is present/enabled */ | |
727 | 1: | |
b0d623f7 | 728 | /* |
39037602 | 729 | * On entering the kernel, we typically don't switch CR3 |
b0d623f7 | 730 | * because the kernel shares the user's address space. |
39037602 A |
731 | * But we mark the kernel's cr3 as "active" for TLB coherency evaluation |
732 | * If, however, the CPU's invalid TLB flag is set, we have to invalidate the TLB | |
733 | * since the kernel pagetables were changed while we were in userspace. | |
b0d623f7 | 734 | * |
39037602 A |
735 | * For threads with a mapped pagezero (some WINE games) on non-SMAP platforms, |
736 | * we switch to the kernel's address space on entry. Also, | |
737 | * if the global no_shared_cr3 is TRUE we do switch to the kernel's cr3 | |
b0d623f7 A |
738 | * so that illicit accesses to userspace can be trapped. |
739 | */ | |
740 | mov %gs:CPU_KERNEL_CR3, %rcx | |
741 | mov %rcx, %gs:CPU_ACTIVE_CR3 | |
742 | test $3, %esi /* user/kernel? */ | |
fe8ab488 | 743 | jz 2f /* skip cr3 reload from kernel */ |
b0d623f7 | 744 | xor %rbp, %rbp |
39037602 A |
745 | cmpl $0, %gs:CPU_PAGEZERO_MAPPED |
746 | jnz 11f | |
b0d623f7 | 747 | cmpl $0, EXT(no_shared_cr3)(%rip) |
fe8ab488 | 748 | je 2f |
39037602 A |
749 | 11: |
750 | xor %eax, %eax | |
751 | movw %gs:CPU_KERNEL_PCID, %ax | |
752 | or %rax, %rcx | |
b0d623f7 | 753 | mov %rcx, %cr3 /* load kernel cr3 */ |
fe8ab488 A |
754 | jmp 4f /* and skip tlb flush test */ |
755 | 2: | |
6d2010ae A |
756 | mov %gs:CPU_ACTIVE_CR3+4, %rcx |
757 | shr $32, %rcx | |
758 | testl %ecx, %ecx | |
fe8ab488 | 759 | jz 4f |
4bd07ac2 | 760 | movl $0, %gs:CPU_TLB_INVALID |
6d2010ae A |
761 | mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/ |
762 | and $(~CR4_PGE), %rcx | |
763 | mov %rcx, %cr4 | |
764 | or $(CR4_PGE), %rcx | |
765 | mov %rcx, %cr4 | |
fe8ab488 | 766 | 4: |
b0d623f7 | 767 | mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */ |
5ba3f43e A |
768 | testq %rcx, %rcx |
769 | je 5f | |
fe8ab488 | 770 | movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap */ |
6d2010ae | 771 | cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */ |
fe8ab488 | 772 | je 5f |
316670eb | 773 | xor %ecx, %ecx /* If so, reset DR7 (the control) */ |
b0d623f7 | 774 | mov %rcx, %dr7 |
fe8ab488 | 775 | 5: |
6d2010ae | 776 | incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count |
b0d623f7 | 777 | /* Dispatch the designated handler */ |
a39ff7e2 A |
778 | cmp EXT(dblmap_base)(%rip), %rsp |
779 | jb 66f | |
780 | cmp EXT(dblmap_max)(%rip), %rsp | |
781 | jge 66f | |
782 | subq EXT(dblmap_dist)(%rip), %rsp | |
783 | subq EXT(dblmap_dist)(%rip), %r15 | |
784 | 66: | |
5c9f4661 A |
785 | leaq EXT(idt64_hndl_table1)(%rip), %rax |
786 | jmp *(%rax, %rdx, 8) | |
b0d623f7 A |
787 | |
788 | /* | |
789 | * Control is passed here to return to user. | |
790 | */ | |
791 | Entry(return_to_user) | |
792 | TIME_TRAP_UEXIT | |
793 | ||
794 | Entry(ret_to_user) | |
795 | // XXX 'Be nice to tidy up this debug register restore sequence... | |
796 | mov %gs:CPU_ACTIVE_THREAD, %rdx | |
6d2010ae | 797 | movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */ |
b0d623f7 | 798 | |
316670eb | 799 | test %rax, %rax /* Is there a debug register context? */ |
b0d623f7 A |
800 | je 2f /* branch if not */ |
801 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */ | |
802 | jne 1f | |
803 | movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */ | |
804 | movq %rcx, %dr0 | |
805 | movl DS_DR1(%rax), %ecx | |
806 | movq %rcx, %dr1 | |
807 | movl DS_DR2(%rax), %ecx | |
808 | movq %rcx, %dr2 | |
809 | movl DS_DR3(%rax), %ecx | |
810 | movq %rcx, %dr3 | |
811 | movl DS_DR7(%rax), %ecx | |
812 | movq %rcx, %gs:CPU_DR7 | |
813 | jmp 2f | |
814 | 1: | |
815 | mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/ | |
816 | mov %rcx, %dr0 | |
817 | mov DS64_DR1(%rax), %rcx | |
818 | mov %rcx, %dr1 | |
819 | mov DS64_DR2(%rax), %rcx | |
820 | mov %rcx, %dr2 | |
821 | mov DS64_DR3(%rax), %rcx | |
822 | mov %rcx, %dr3 | |
823 | mov DS64_DR7(%rax), %rcx | |
824 | mov %rcx, %gs:CPU_DR7 | |
825 | 2: | |
826 | /* | |
39037602 | 827 | * On exiting the kernel there's typically no need to switch cr3 since we're |
b0d623f7 | 828 | * already running in the user's address space which includes the |
39037602 A |
829 | * kernel. We now mark the task's cr3 as active, for TLB coherency. |
830 | * If the target address space has a pagezero mapping present, or | |
831 | * if no_shared_cr3 is set, we do need to switch cr3 at this point. | |
b0d623f7 A |
832 | */ |
833 | mov %gs:CPU_TASK_CR3, %rcx | |
834 | mov %rcx, %gs:CPU_ACTIVE_CR3 | |
39037602 A |
835 | cmpl $0, %gs:CPU_PAGEZERO_MAPPED |
836 | jnz L_cr3_switch_island | |
6d2010ae A |
837 | movl EXT(no_shared_cr3)(%rip), %eax |
838 | test %eax, %eax /* -no_shared_cr3 */ | |
39037602 A |
839 | jnz L_cr3_switch_island |
840 | ||
841 | L_cr3_switch_return: | |
b0d623f7 A |
842 | mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/ |
843 | cmp $0, %rax | |
844 | je 4f | |
845 | mov %rax, %dr7 /* Set DR7 */ | |
846 | movq $0, %gs:CPU_DR7 | |
847 | 4: | |
39236c6e | 848 | cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */ |
b0d623f7 A |
849 | je L_64bit_return |
850 | ||
851 | L_32bit_return: | |
852 | #if DEBUG_IDT64 | |
39236c6e | 853 | cmpl $(SS_32), SS_FLAVOR(%r15) /* 32-bit state? */ |
b0d623f7 A |
854 | je 1f |
855 | cli | |
856 | POSTCODE2(0x6432) | |
fe8ab488 | 857 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
858 | 1: |
859 | #endif /* DEBUG_IDT64 */ | |
860 | ||
861 | /* | |
862 | * Restore registers into the machine state for iret. | |
39236c6e | 863 | * Here on fault stack and PCB address in R11. |
b0d623f7 | 864 | */ |
39236c6e A |
865 | movl R32_EIP(%r15), %eax |
866 | movl %eax, R64_RIP(%r15) | |
867 | movl R32_EFLAGS(%r15), %eax | |
868 | movl %eax, R64_RFLAGS(%r15) | |
869 | movl R32_CS(%r15), %eax | |
870 | movl %eax, R64_CS(%r15) | |
871 | movl R32_UESP(%r15), %eax | |
872 | movl %eax, R64_RSP(%r15) | |
873 | movl R32_SS(%r15), %eax | |
874 | movl %eax, R64_SS(%r15) | |
b0d623f7 | 875 | |
5c9f4661 A |
876 | /* Validate DS/ES/FS/GS segment selectors with the Load Access Rights instruction prior to restoration */ |
877 | /* Exempt "known good" statically configured selectors, e.g. USER_DS and 0 */ | |
878 | cmpl $(USER_DS), R32_DS(%r15) | |
879 | jz 22f | |
880 | cmpl $0, R32_DS(%r15) | |
881 | jz 22f | |
882 | larw R32_DS(%r15), %ax | |
883 | jz 22f | |
884 | movl $(USER_DS), R32_DS(%r15) | |
885 | 22: | |
886 | cmpl $(USER_DS), R32_ES(%r15) | |
887 | jz 33f | |
888 | cmpl $0, R32_ES(%r15) | |
889 | jz 33f | |
890 | larw R32_ES(%r15), %ax | |
891 | jz 33f | |
892 | movl $(USER_DS), R32_ES(%r15) | |
893 | 33: | |
894 | cmpl $(USER_DS), R32_FS(%r15) | |
895 | jz 44f | |
896 | cmpl $0, R32_FS(%r15) | |
897 | jz 44f | |
898 | larw R32_FS(%r15), %ax | |
899 | jz 44f | |
900 | movl $(USER_DS), R32_FS(%r15) | |
901 | 44: | |
902 | cmpl $(USER_CTHREAD), R32_GS(%r15) | |
903 | jz 55f | |
904 | cmpl $0, R32_GS(%r15) | |
905 | jz 55f | |
906 | larw R32_GS(%r15), %ax | |
907 | jz 55f | |
908 | movl $(USER_CTHREAD), R32_GS(%r15) | |
909 | 55: | |
b0d623f7 A |
910 | /* |
911 | * Restore general 32-bit registers | |
912 | */ | |
39236c6e A |
913 | movl R32_EAX(%r15), %eax |
914 | movl R32_EBX(%r15), %ebx | |
915 | movl R32_ECX(%r15), %ecx | |
916 | movl R32_EDX(%r15), %edx | |
917 | movl R32_EBP(%r15), %ebp | |
918 | movl R32_ESI(%r15), %esi | |
919 | movl R32_EDI(%r15), %edi | |
b0d623f7 A |
920 | |
921 | /* | |
39236c6e A |
922 | * Restore segment registers. A segment exception taken here will |
923 | * push state on the IST1 stack and will not affect the "PCB stack". | |
b0d623f7 | 924 | */ |
39236c6e | 925 | mov %r15, %rsp /* Set the PCB as the stack */ |
b0d623f7 | 926 | swapgs |
00867663 | 927 | |
a39ff7e2 | 928 | /* Zero 64-bit-exclusive GPRs to prevent data leaks */ |
00867663 A |
929 | xor %r8, %r8 |
930 | xor %r9, %r9 | |
931 | xor %r10, %r10 | |
932 | xor %r11, %r11 | |
933 | xor %r12, %r12 | |
934 | xor %r13, %r13 | |
935 | xor %r14, %r14 | |
936 | xor %r15, %r15 | |
937 | ||
b0d623f7 | 938 | EXT(ret32_set_ds): |
5c9f4661 | 939 | movw R32_DS(%rsp), %ds |
b0d623f7 | 940 | EXT(ret32_set_es): |
5c9f4661 | 941 | movw R32_ES(%rsp), %es |
b0d623f7 | 942 | EXT(ret32_set_fs): |
5c9f4661 | 943 | movw R32_FS(%rsp), %fs |
b0d623f7 | 944 | EXT(ret32_set_gs): |
5c9f4661 | 945 | movw R32_GS(%rsp), %gs |
b0d623f7 A |
946 | |
947 | /* pop compat frame + trapno, trapfn and error */ | |
39236c6e | 948 | add $(ISS64_OFFSET)+8+8+8, %rsp |
316670eb | 949 | cmpl $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp) |
b0d623f7 | 950 | /* test for fast entry/exit */ |
316670eb | 951 | je L_fast_exit |
b0d623f7 | 952 | EXT(ret32_iret): |
316670eb | 953 | iretq /* return from interrupt */ |
b0d623f7 A |
954 | |
955 | L_fast_exit: | |
956 | pop %rdx /* user return eip */ | |
957 | pop %rcx /* pop and toss cs */ | |
958 | andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */ | |
959 | popf /* flags - carry denotes failure */ | |
960 | pop %rcx /* user return esp */ | |
961 | sti /* interrupts enabled after sysexit */ | |
39236c6e | 962 | sysexitl /* 32-bit sysexit */ |
b0d623f7 | 963 | |
39037602 A |
964 | L_cr3_switch_island: |
965 | xor %eax, %eax | |
966 | movw %gs:CPU_ACTIVE_PCID, %ax | |
967 | or %rax, %rcx | |
968 | mov %rcx, %cr3 | |
969 | jmp L_cr3_switch_return | |
970 | ||
b0d623f7 A |
971 | ret_to_kernel: |
972 | #if DEBUG_IDT64 | |
39236c6e | 973 | cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */ |
b0d623f7 A |
974 | je 1f |
975 | cli | |
976 | POSTCODE2(0x6464) | |
39236c6e | 977 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
978 | hlt |
979 | 1: | |
39236c6e | 980 | cmpl $(KERNEL64_CS), R64_CS(%r15) |
b0d623f7 | 981 | je 2f |
39236c6e | 982 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
983 | hlt |
984 | 2: | |
985 | #endif | |
986 | ||
987 | L_64bit_return: | |
39236c6e A |
988 | /* |
989 | * Restore general 64-bit registers. | |
990 | * Here on fault stack and PCB address in R15. | |
991 | */ | |
5c9f4661 A |
992 | leaq EXT(idt64_hndl_table0)(%rip), %rax |
993 | jmp *8(%rax) | |
b0d623f7 | 994 | |
5c9f4661 | 995 | Entry(ks_idt64_debug_kernel) |
b0d623f7 A |
996 | /* |
997 | * trap came from kernel mode | |
998 | */ | |
999 | ||
1000 | push %rax /* save %rax temporarily */ | |
b0d623f7 | 1001 | lea EXT(idt64_sysenter)(%rip), %rax |
6d2010ae | 1002 | cmp %rax, ISF64_RIP+8(%rsp) |
b0d623f7 | 1003 | pop %rax |
5c9f4661 | 1004 | jne EXT(ks_dispatch_kernel) |
b0d623f7 A |
1005 | /* |
1006 | * Interrupt stack frame has been pushed on the temporary stack. | |
6d2010ae | 1007 | * We have to switch to pcb stack and patch up the saved state. |
b0d623f7 | 1008 | */ |
6d2010ae A |
1009 | mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */ |
1010 | mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */ | |
b0d623f7 A |
1011 | xchg %rcx,%rsp /* switch to pcb stack */ |
1012 | push $(USER_DS) /* ss */ | |
6d2010ae A |
1013 | push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */ |
1014 | push ISF64_RFLAGS(%rcx) /* rflags */ | |
b0d623f7 | 1015 | push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */ |
6d2010ae | 1016 | mov ISF64_ERR(%rcx),%rcx /* restore %rcx */ |
b0d623f7 | 1017 | jmp L_sysenter_continue /* continue sysenter entry */ |
b0d623f7 | 1018 | |
5c9f4661 | 1019 | Entry(ks_trap_check_kernel_exit) |
39236c6e A |
1020 | testb $3,ISF64_CS(%rsp) |
1021 | jz L_kernel_gpf | |
1022 | ||
1023 | /* Here for fault from user-space. Copy interrupt state to PCB. */ | |
1024 | swapgs | |
b0d623f7 | 1025 | push %rax |
39236c6e A |
1026 | mov %rcx, %gs:CPU_UBER_TMP /* save user RCX */ |
1027 | mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */ | |
1028 | mov ISF64_SS+8(%rsp), %rax | |
1029 | mov %rax, ISF64_SS(%rcx) | |
1030 | mov ISF64_RSP+8(%rsp), %rax | |
1031 | mov %rax, ISF64_RSP(%rcx) | |
1032 | mov ISF64_RFLAGS+8(%rsp), %rax | |
1033 | mov %rax, ISF64_RFLAGS(%rcx) | |
1034 | mov ISF64_CS+8(%rsp), %rax | |
1035 | mov %rax, ISF64_CS(%rcx) | |
1036 | mov ISF64_RIP+8(%rsp), %rax | |
1037 | mov %rax, ISF64_RIP(%rcx) | |
1038 | mov ISF64_ERR+8(%rsp), %rax | |
1039 | mov %rax, ISF64_ERR(%rcx) | |
1040 | mov ISF64_TRAPFN+8(%rsp), %rax | |
1041 | mov %rax, ISF64_TRAPFN(%rcx) | |
1042 | mov ISF64_TRAPNO+8(%rsp), %rax | |
1043 | mov %rax, ISF64_TRAPNO(%rcx) | |
1044 | pop %rax | |
1045 | mov %gs:CPU_UBER_TMP, %rsp /* user RCX into RSP */ | |
1046 | xchg %rcx, %rsp /* to PCB stack with user RCX */ | |
5c9f4661 | 1047 | jmp EXT(ks_dispatch_user) |
b0d623f7 | 1048 | |
39236c6e A |
1049 | L_kernel_gpf: |
1050 | /* Here for GPF from kernel_space. Check for recoverable cases. */ | |
1051 | push %rax | |
b0d623f7 | 1052 | leaq EXT(ret32_iret)(%rip), %rax |
6d2010ae | 1053 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
1054 | je L_fault_iret |
1055 | leaq EXT(ret64_iret)(%rip), %rax | |
6d2010ae | 1056 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
1057 | je L_fault_iret |
1058 | leaq EXT(ret32_set_ds)(%rip), %rax | |
6d2010ae | 1059 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
1060 | je L_32bit_fault_set_seg |
1061 | leaq EXT(ret32_set_es)(%rip), %rax | |
6d2010ae | 1062 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
1063 | je L_32bit_fault_set_seg |
1064 | leaq EXT(ret32_set_fs)(%rip), %rax | |
6d2010ae | 1065 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
1066 | je L_32bit_fault_set_seg |
1067 | leaq EXT(ret32_set_gs)(%rip), %rax | |
6d2010ae | 1068 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 | 1069 | je L_32bit_fault_set_seg |
5c9f4661 | 1070 | jmp EXT(ks_kernel_trap) |
39236c6e | 1071 | /* Fall through */ |
6d2010ae | 1072 | |
5c9f4661 | 1073 | Entry(ks_kernel_trap) |
6d2010ae A |
1074 | /* |
1075 | * Here after taking an unexpected trap from kernel mode - perhaps | |
1076 | * while running in the trampolines hereabouts. | |
1077 | * Note: %rax has been pushed on stack. | |
1078 | * Make sure we're not on the PCB stack, if so move to the kernel stack. | |
1079 | * This is likely a fatal condition. | |
39236c6e | 1080 | * But first, ensure we have the kernel gs base active... |
6d2010ae | 1081 | */ |
39236c6e A |
1082 | push %rcx |
1083 | push %rdx | |
1084 | mov $(MSR_IA32_GS_BASE), %ecx | |
1085 | rdmsr /* read kernel gsbase */ | |
1086 | test $0x80000000, %edx /* test MSB of address */ | |
1087 | jne 1f | |
1088 | swapgs /* so swap */ | |
6d2010ae | 1089 | 1: |
39236c6e A |
1090 | pop %rdx |
1091 | pop %rcx | |
1092 | ||
6d2010ae A |
1093 | movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */ |
1094 | subq %rsp, %rax | |
1095 | cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */ | |
1096 | jb 2f /* - yes, deal with it */ | |
1097 | pop %rax /* - no, restore %rax */ | |
5c9f4661 | 1098 | jmp EXT(ks_dispatch_kernel) |
6d2010ae A |
1099 | 2: |
1100 | /* | |
1101 | * Here if %rsp is in the PCB | |
1102 | * Copy the interrupt stack frame from PCB stack to kernel stack | |
1103 | */ | |
1104 | movq %gs:CPU_KERNEL_STACK, %rax | |
1105 | xchgq %rax, %rsp | |
1106 | pushq 8+ISF64_SS(%rax) | |
1107 | pushq 8+ISF64_RSP(%rax) | |
1108 | pushq 8+ISF64_RFLAGS(%rax) | |
1109 | pushq 8+ISF64_CS(%rax) | |
1110 | pushq 8+ISF64_RIP(%rax) | |
1111 | pushq 8+ISF64_ERR(%rax) | |
1112 | pushq 8+ISF64_TRAPFN(%rax) | |
1113 | pushq 8+ISF64_TRAPNO(%rax) | |
1114 | movq (%rax), %rax | |
5c9f4661 | 1115 | jmp EXT(ks_dispatch_kernel) |
39236c6e | 1116 | |
b0d623f7 | 1117 | |
b0d623f7 A |
1118 | /* |
1119 | * GP/NP fault on IRET: CS or SS is in error. | |
39236c6e A |
1120 | * User GSBASE is active. |
1121 | * On IST1 stack containing: | |
1122 | * (rax saved above, which is immediately popped) | |
1123 | * 0 ISF64_TRAPNO: trap code (NP or GP) | |
1124 | * 8 ISF64_TRAPFN: trap function | |
1125 | * 16 ISF64_ERR: segment number in error (error code) | |
1126 | * 24 ISF64_RIP: kernel RIP | |
1127 | * 32 ISF64_CS: kernel CS | |
1128 | * 40 ISF64_RFLAGS: kernel RFLAGS | |
1129 | * 48 ISF64_RSP: kernel RSP | |
1130 | * 56 ISF64_SS: kernel SS | |
1131 | * On the PCB stack, pointed to by the kernel's RSP is: | |
1132 | * 0 user RIP | |
1133 | * 8 user CS | |
1134 | * 16 user RFLAGS | |
1135 | * 24 user RSP | |
1136 | * 32 user SS | |
b0d623f7 | 1137 | * |
39236c6e A |
1138 | * We need to move the kernel's TRAPNO, TRAPFN and ERR to the PCB and handle |
1139 | * as a user fault with: | |
6d2010ae A |
1140 | * 0 ISF64_TRAPNO: trap code (NP or GP) |
1141 | * 8 ISF64_TRAPFN: trap function | |
1142 | * 16 ISF64_ERR: segment number in error (error code) | |
39236c6e A |
1143 | * 24 user RIP |
1144 | * 32 user CS | |
1145 | * 40 user RFLAGS | |
1146 | * 48 user RSP | |
1147 | * 56 user SS | |
b0d623f7 A |
1148 | */ |
1149 | L_fault_iret: | |
1150 | pop %rax /* recover saved %rax */ | |
6d2010ae | 1151 | mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */ |
39236c6e A |
1152 | mov ISF64_RSP(%rsp), %rax |
1153 | xchg %rax, %rsp /* switch to PCB stack */ | |
1154 | push ISF64_ERR(%rax) | |
1155 | push ISF64_TRAPFN(%rax) | |
1156 | push ISF64_TRAPNO(%rax) | |
1157 | mov ISF64_RIP(%rax), %rax /* restore rax */ | |
b0d623f7 A |
1158 | /* now treat as fault from user */ |
1159 | jmp L_dispatch | |
1160 | ||
1161 | /* | |
1162 | * Fault restoring a segment register. All of the saved state is still | |
1163 | * on the stack untouched since we haven't yet moved the stack pointer. | |
39236c6e A |
1164 | * On IST1 stack containing: |
1165 | * (rax saved above, which is immediately popped) | |
1166 | * 0 ISF64_TRAPNO: trap code (NP or GP) | |
1167 | * 8 ISF64_TRAPFN: trap function | |
1168 | * 16 ISF64_ERR: segment number in error (error code) | |
1169 | * 24 ISF64_RIP: kernel RIP | |
1170 | * 32 ISF64_CS: kernel CS | |
1171 | * 40 ISF64_RFLAGS: kernel RFLAGS | |
1172 | * 48 ISF64_RSP: kernel RSP | |
1173 | * 56 ISF64_SS: kernel SS | |
1174 | * On the PCB stack, pointed to by the kernel's RSP is: | |
1175 | * 0 user trap code | |
1176 | * 8 user trap function | |
1177 | * 16 user err | |
1178 | * 24 user RIP | |
1179 | * 32 user CS | |
1180 | * 40 user RFLAGS | |
1181 | * 48 user RSP | |
1182 | * 56 user SS | |
b0d623f7 A |
1183 | */ |
1184 | L_32bit_fault_set_seg: | |
6d2010ae A |
1185 | swapgs |
1186 | pop %rax /* toss saved %rax from stack */ | |
1187 | mov ISF64_TRAPNO(%rsp), %rax | |
1188 | mov ISF64_TRAPFN(%rsp), %rcx | |
1189 | mov ISF64_ERR(%rsp), %rdx | |
1190 | mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */ | |
39236c6e A |
1191 | mov %rax,R64_TRAPNO(%rsp) |
1192 | mov %rcx,R64_TRAPFN(%rsp) | |
1193 | mov %rdx,R64_ERR(%rsp) | |
b0d623f7 A |
1194 | /* now treat as fault from user */ |
1195 | /* except that all the state is */ | |
1196 | /* already saved - we just have to */ | |
1197 | /* move the trapno and error into */ | |
1198 | /* the compatibility frame */ | |
39236c6e | 1199 | jmp L_dispatch_U32_after_fault |
b0d623f7 | 1200 | |
39236c6e | 1201 | |
39236c6e A |
1202 | /* All 'exceptions' enter hndl_alltraps, with: |
1203 | * r15 x86_saved_state_t address | |
1204 | * rsp kernel stack if user-space, otherwise interrupt or kernel stack | |
1205 | * esi cs at trap | |
b0d623f7 A |
1206 | * |
1207 | * The rest of the state is set up as: | |
39236c6e | 1208 | * both rsp and r15 are 16-byte aligned |
b0d623f7 A |
1209 | * interrupts disabled |
1210 | * direction flag cleared | |
1211 | */ | |
1212 | Entry(hndl_alltraps) | |
1213 | mov %esi, %eax | |
1214 | testb $3, %al | |
1215 | jz trap_from_kernel | |
1216 | ||
1217 | TIME_TRAP_UENTRY | |
1218 | ||
6d2010ae A |
1219 | /* Check for active vtimers in the current task */ |
1220 | mov %gs:CPU_ACTIVE_THREAD, %rcx | |
fe8ab488 | 1221 | movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap/exception */ |
6d2010ae A |
1222 | mov TH_TASK(%rcx), %rbx |
1223 | TASK_VTIMER_CHECK(%rbx, %rcx) | |
1224 | ||
39236c6e | 1225 | CCALL1(user_trap, %r15) /* call user trap routine */ |
6d2010ae | 1226 | /* user_trap() unmasks interrupts */ |
b0d623f7 | 1227 | cli /* hold off intrs - critical section */ |
b0d623f7 A |
1228 | xorl %ecx, %ecx /* don't check if we're in the PFZ */ |
1229 | ||
b0d623f7 A |
1230 | |
1231 | Entry(return_from_trap) | |
39236c6e | 1232 | movq %gs:CPU_ACTIVE_THREAD,%r15 /* Get current thread */ |
fe8ab488 | 1233 | movl $-1, TH_IOTIER_OVERRIDE(%r15) /* Reset IO tier override to -1 before returning to userspace */ |
39236c6e A |
1234 | cmpl $0, TH_RWLOCK_COUNT(%r15) /* Check if current thread has pending RW locks held */ |
1235 | jz 1f | |
1236 | xorq %rbp, %rbp /* clear framepointer */ | |
1237 | mov %r15, %rdi /* Set RDI to current thread */ | |
1238 | CCALL(lck_rw_clear_promotions_x86) /* Clear promotions if needed */ | |
1239 | 1: | |
1240 | movq TH_PCB_ISS(%r15), %r15 /* PCB stack */ | |
b0d623f7 A |
1241 | movl %gs:CPU_PENDING_AST,%eax |
1242 | testl %eax,%eax | |
39236c6e | 1243 | je EXT(return_to_user) /* branch if no AST */ |
b0d623f7 A |
1244 | |
1245 | L_return_from_trap_with_ast: | |
b0d623f7 A |
1246 | testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */ |
1247 | je 2f /* no, go handle the AST */ | |
39236c6e | 1248 | cmpl $(SS_64), SS_FLAVOR(%r15) /* are we a 64-bit task? */ |
b0d623f7 A |
1249 | je 1f |
1250 | /* no... 32-bit user mode */ | |
39236c6e | 1251 | movl R32_EIP(%r15), %edi |
6d2010ae | 1252 | xorq %rbp, %rbp /* clear framepointer */ |
b0d623f7 A |
1253 | CCALL(commpage_is_in_pfz32) |
1254 | testl %eax, %eax | |
1255 | je 2f /* not in the PFZ... go service AST */ | |
39236c6e | 1256 | movl %eax, R32_EBX(%r15) /* let the PFZ know we've pended an AST */ |
b0d623f7 A |
1257 | jmp EXT(return_to_user) |
1258 | 1: | |
39236c6e | 1259 | movq R64_RIP(%r15), %rdi |
6d2010ae | 1260 | xorq %rbp, %rbp /* clear framepointer */ |
b0d623f7 A |
1261 | CCALL(commpage_is_in_pfz64) |
1262 | testl %eax, %eax | |
1263 | je 2f /* not in the PFZ... go service AST */ | |
39236c6e | 1264 | movl %eax, R64_RBX(%r15) /* let the PFZ know we've pended an AST */ |
b0d623f7 | 1265 | jmp EXT(return_to_user) |
5c9f4661 | 1266 | 2: |
b0d623f7 | 1267 | |
6d2010ae | 1268 | xorq %rbp, %rbp /* clear framepointer */ |
5ba3f43e | 1269 | CCALL(ast_taken_user) /* handle all ASTs (enables interrupts, may return via continuation) */ |
b0d623f7 | 1270 | |
3e170ce0 | 1271 | cli |
39236c6e | 1272 | mov %rsp, %r15 /* AST changes stack, saved state */ |
b0d623f7 A |
1273 | xorl %ecx, %ecx /* don't check if we're in the PFZ */ |
1274 | jmp EXT(return_from_trap) /* and check again (rare) */ | |
1275 | ||
1276 | /* | |
1277 | * Trap from kernel mode. No need to switch stacks. | |
1278 | * Interrupts must be off here - we will set them to state at time of trap | |
1279 | * as soon as it's safe for us to do so and not recurse doing preemption | |
39236c6e | 1280 | * |
b0d623f7 | 1281 | */ |
b0d623f7 | 1282 | trap_from_kernel: |
39236c6e A |
1283 | movq %r15, %rdi /* saved state addr */ |
1284 | pushq R64_RIP(%r15) /* Simulate a CALL from fault point */ | |
b0d623f7 A |
1285 | pushq %rbp /* Extend framepointer chain */ |
1286 | movq %rsp, %rbp | |
6d2010ae | 1287 | CCALLWITHSP(kernel_trap) /* to kernel trap routine */ |
b0d623f7 A |
1288 | popq %rbp |
1289 | addq $8, %rsp | |
39236c6e | 1290 | mov %rsp, %r15 /* DTrace slides stack/saved-state */ |
b0d623f7 A |
1291 | cli |
1292 | ||
1293 | movl %gs:CPU_PENDING_AST,%eax /* get pending asts */ | |
1294 | testl $(AST_URGENT),%eax /* any urgent preemption? */ | |
1295 | je ret_to_kernel /* no, nothing to do */ | |
39236c6e | 1296 | cmpl $(T_PREEMPT),R64_TRAPNO(%r15) |
b0d623f7 | 1297 | je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */ |
39236c6e | 1298 | testl $(EFL_IF),R64_RFLAGS(%r15) /* interrupts disabled? */ |
b0d623f7 A |
1299 | je ret_to_kernel |
1300 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */ | |
1301 | jne ret_to_kernel | |
1302 | movq %gs:CPU_KERNEL_STACK,%rax | |
1303 | movq %rsp,%rcx | |
1304 | xorq %rax,%rcx | |
1305 | andq EXT(kernel_stack_mask)(%rip),%rcx | |
1306 | testq %rcx,%rcx /* are we on the kernel stack? */ | |
1307 | jne ret_to_kernel /* no, skip it */ | |
1308 | ||
5ba3f43e | 1309 | CCALL(ast_taken_kernel) /* take the AST */ |
39236c6e A |
1310 | |
1311 | mov %rsp, %r15 /* AST changes stack, saved state */ | |
b0d623f7 A |
1312 | jmp ret_to_kernel |
1313 | ||
1314 | ||
1315 | /* | |
1316 | * All interrupts on all tasks enter here with: | |
39236c6e A |
1317 | * r15 x86_saved_state_t |
1318 | * rsp kernel or interrupt stack | |
b0d623f7 A |
1319 | * esi cs at trap |
1320 | * | |
39236c6e | 1321 | * both rsp and r15 are 16-byte aligned |
b0d623f7 A |
1322 | * interrupts disabled |
1323 | * direction flag cleared | |
1324 | */ | |
1325 | Entry(hndl_allintrs) | |
1326 | /* | |
1327 | * test whether already on interrupt stack | |
1328 | */ | |
1329 | movq %gs:CPU_INT_STACK_TOP,%rcx | |
1330 | cmpq %rsp,%rcx | |
1331 | jb 1f | |
1332 | leaq -INTSTACK_SIZE(%rcx),%rdx | |
1333 | cmpq %rsp,%rdx | |
1334 | jb int_from_intstack | |
060df5ea | 1335 | 1: |
b0d623f7 A |
1336 | xchgq %rcx,%rsp /* switch to interrupt stack */ |
1337 | ||
1338 | mov %cr0,%rax /* get cr0 */ | |
1339 | orl $(CR0_TS),%eax /* or in TS bit */ | |
1340 | mov %rax,%cr0 /* set cr0 */ | |
1341 | ||
b0d623f7 | 1342 | pushq %rcx /* save pointer to old stack */ |
39236c6e A |
1343 | pushq %gs:CPU_INT_STATE /* save previous intr state */ |
1344 | movq %r15,%gs:CPU_INT_STATE /* set intr state */ | |
b0d623f7 A |
1345 | |
1346 | TIME_INT_ENTRY /* do timing */ | |
1347 | ||
6d2010ae A |
1348 | /* Check for active vtimers in the current task */ |
1349 | mov %gs:CPU_ACTIVE_THREAD, %rcx | |
1350 | mov TH_TASK(%rcx), %rbx | |
1351 | TASK_VTIMER_CHECK(%rbx, %rcx) | |
1352 | ||
b0d623f7 A |
1353 | incl %gs:CPU_PREEMPTION_LEVEL |
1354 | incl %gs:CPU_INTERRUPT_LEVEL | |
1355 | ||
39236c6e | 1356 | CCALL1(interrupt, %r15) /* call generic interrupt routine */ |
b0d623f7 | 1357 | |
5c9f4661 | 1358 | .globl EXT(return_to_iret) |
b0d623f7 A |
1359 | LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ |
1360 | ||
1361 | decl %gs:CPU_INTERRUPT_LEVEL | |
1362 | decl %gs:CPU_PREEMPTION_LEVEL | |
1363 | ||
1364 | TIME_INT_EXIT /* do timing */ | |
1365 | ||
39236c6e A |
1366 | popq %gs:CPU_INT_STATE /* reset/clear intr state pointer */ |
1367 | popq %rsp /* switch back to old stack */ | |
1368 | ||
b0d623f7 | 1369 | movq %gs:CPU_ACTIVE_THREAD,%rax |
6d2010ae | 1370 | movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */ |
b0d623f7 A |
1371 | cmpq $0,%rax /* Is there a context */ |
1372 | je 1f /* Branch if not */ | |
1373 | movl FP_VALID(%rax),%eax /* Load fp_valid */ | |
1374 | cmpl $0,%eax /* Check if valid */ | |
1375 | jne 1f /* Branch if valid */ | |
1376 | clts /* Clear TS */ | |
1377 | jmp 2f | |
1378 | 1: | |
1379 | mov %cr0,%rax /* get cr0 */ | |
1380 | orl $(CR0_TS),%eax /* or in TS bit */ | |
1381 | mov %rax,%cr0 /* set cr0 */ | |
1382 | 2: | |
b0d623f7 | 1383 | /* Load interrupted code segment into %eax */ |
39236c6e A |
1384 | movl R32_CS(%r15),%eax /* assume 32-bit state */ |
1385 | cmpl $(SS_64),SS_FLAVOR(%r15)/* 64-bit? */ | |
b0d623f7 A |
1386 | #if DEBUG_IDT64 |
1387 | jne 4f | |
39236c6e | 1388 | movl R64_CS(%r15),%eax /* 64-bit user mode */ |
b0d623f7 A |
1389 | jmp 3f |
1390 | 4: | |
39236c6e | 1391 | cmpl $(SS_32),SS_FLAVOR(%r15) |
b0d623f7 A |
1392 | je 3f |
1393 | POSTCODE2(0x6431) | |
39236c6e | 1394 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
1395 | hlt |
1396 | #else | |
1397 | jne 3f | |
39236c6e | 1398 | movl R64_CS(%r15),%eax /* 64-bit user mode */ |
b0d623f7 A |
1399 | #endif |
1400 | 3: | |
1401 | testb $3,%al /* user mode, */ | |
1402 | jnz ast_from_interrupt_user /* go handle potential ASTs */ | |
1403 | /* | |
1404 | * we only want to handle preemption requests if | |
1405 | * the interrupt fell in the kernel context | |
1406 | * and preemption isn't disabled | |
1407 | */ | |
1408 | movl %gs:CPU_PENDING_AST,%eax | |
1409 | testl $(AST_URGENT),%eax /* any urgent requests? */ | |
1410 | je ret_to_kernel /* no, nothing to do */ | |
1411 | ||
1412 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */ | |
1413 | jne ret_to_kernel /* yes, skip it */ | |
1414 | ||
b0d623f7 A |
1415 | /* |
1416 | * Take an AST from kernel space. We don't need (and don't want) | |
1417 | * to do as much as the case where the interrupt came from user | |
1418 | * space. | |
1419 | */ | |
5ba3f43e | 1420 | CCALL(ast_taken_kernel) |
b0d623f7 | 1421 | |
39236c6e | 1422 | mov %rsp, %r15 /* AST changes stack, saved state */ |
b0d623f7 A |
1423 | jmp ret_to_kernel |
1424 | ||
1425 | ||
1426 | /* | |
1427 | * nested int - simple path, can't preempt etc on way out | |
1428 | */ | |
1429 | int_from_intstack: | |
1430 | incl %gs:CPU_PREEMPTION_LEVEL | |
1431 | incl %gs:CPU_INTERRUPT_LEVEL | |
060df5ea | 1432 | incl %gs:CPU_NESTED_ISTACK |
39236c6e A |
1433 | |
1434 | push %gs:CPU_INT_STATE | |
1435 | mov %r15, %gs:CPU_INT_STATE | |
1436 | ||
1437 | CCALL1(interrupt, %r15) | |
1438 | ||
1439 | pop %gs:CPU_INT_STATE | |
b0d623f7 A |
1440 | |
1441 | decl %gs:CPU_INTERRUPT_LEVEL | |
1442 | decl %gs:CPU_PREEMPTION_LEVEL | |
060df5ea | 1443 | decl %gs:CPU_NESTED_ISTACK |
39236c6e | 1444 | |
b0d623f7 A |
1445 | jmp ret_to_kernel |
1446 | ||
1447 | /* | |
1448 | * Take an AST from an interrupted user | |
1449 | */ | |
1450 | ast_from_interrupt_user: | |
1451 | movl %gs:CPU_PENDING_AST,%eax | |
1452 | testl %eax,%eax /* pending ASTs? */ | |
1453 | je EXT(ret_to_user) /* no, nothing to do */ | |
1454 | ||
1455 | TIME_TRAP_UENTRY | |
1456 | ||
1457 | movl $1, %ecx /* check if we're in the PFZ */ | |
1458 | jmp L_return_from_trap_with_ast /* return */ | |
1459 | ||
1460 | ||
1461 | /* Syscall dispatch routines! */ | |
1462 | ||
1463 | /* | |
1464 | * | |
1465 | * 32bit Tasks | |
1466 | * System call entries via INTR_GATE or sysenter: | |
1467 | * | |
39236c6e A |
1468 | * r15 x86_saved_state32_t |
1469 | * rsp kernel stack | |
1470 | * | |
1471 | * both rsp and r15 are 16-byte aligned | |
b0d623f7 A |
1472 | * interrupts disabled |
1473 | * direction flag cleared | |
1474 | */ | |
1475 | ||
1476 | Entry(hndl_sysenter) | |
1477 | /* | |
1478 | * We can be here either for a mach syscall or a unix syscall, | |
1479 | * as indicated by the sign of the code: | |
1480 | */ | |
39236c6e | 1481 | movl R32_EAX(%r15),%eax |
b0d623f7 A |
1482 | testl %eax,%eax |
1483 | js EXT(hndl_mach_scall) /* < 0 => mach */ | |
1484 | /* > 0 => unix */ | |
1485 | ||
1486 | Entry(hndl_unix_scall) | |
b0d623f7 A |
1487 | |
1488 | TIME_TRAP_UENTRY | |
1489 | ||
b0d623f7 | 1490 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ |
6d2010ae A |
1491 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
1492 | incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */ | |
b0d623f7 A |
1493 | |
1494 | /* Check for active vtimers in the current task */ | |
1495 | TASK_VTIMER_CHECK(%rbx,%rcx) | |
1496 | ||
1497 | sti | |
1498 | ||
39236c6e | 1499 | CCALL1(unix_syscall, %r15) |
b0d623f7 A |
1500 | /* |
1501 | * always returns through thread_exception_return | |
1502 | */ | |
1503 | ||
1504 | ||
1505 | Entry(hndl_mach_scall) | |
1506 | TIME_TRAP_UENTRY | |
1507 | ||
b0d623f7 | 1508 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ |
6d2010ae A |
1509 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
1510 | incl TH_SYSCALLS_MACH(%rcx) /* increment call count */ | |
b0d623f7 A |
1511 | |
1512 | /* Check for active vtimers in the current task */ | |
1513 | TASK_VTIMER_CHECK(%rbx,%rcx) | |
1514 | ||
1515 | sti | |
1516 | ||
39236c6e | 1517 | CCALL1(mach_call_munger, %r15) |
b0d623f7 A |
1518 | /* |
1519 | * always returns through thread_exception_return | |
1520 | */ | |
1521 | ||
1522 | ||
1523 | Entry(hndl_mdep_scall) | |
1524 | TIME_TRAP_UENTRY | |
1525 | ||
b0d623f7 A |
1526 | /* Check for active vtimers in the current task */ |
1527 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ | |
6d2010ae | 1528 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
b0d623f7 A |
1529 | TASK_VTIMER_CHECK(%rbx,%rcx) |
1530 | ||
1531 | sti | |
1532 | ||
39236c6e | 1533 | CCALL1(machdep_syscall, %r15) |
b0d623f7 A |
1534 | /* |
1535 | * always returns through thread_exception_return | |
1536 | */ | |
1537 | ||
b0d623f7 A |
1538 | /* |
1539 | * 64bit Tasks | |
1540 | * System call entries via syscall only: | |
1541 | * | |
39236c6e A |
1542 | * r15 x86_saved_state64_t |
1543 | * rsp kernel stack | |
1544 | * | |
1545 | * both rsp and r15 are 16-byte aligned | |
b0d623f7 A |
1546 | * interrupts disabled |
1547 | * direction flag cleared | |
1548 | */ | |
1549 | ||
1550 | Entry(hndl_syscall) | |
1551 | TIME_TRAP_UENTRY | |
1552 | ||
b0d623f7 | 1553 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ |
fe8ab488 | 1554 | movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling syscall */ |
6d2010ae | 1555 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
b0d623f7 A |
1556 | |
1557 | /* Check for active vtimers in the current task */ | |
1558 | TASK_VTIMER_CHECK(%rbx,%rcx) | |
1559 | ||
1560 | /* | |
1561 | * We can be here either for a mach, unix machdep or diag syscall, | |
1562 | * as indicated by the syscall class: | |
1563 | */ | |
39236c6e | 1564 | movl R64_RAX(%r15), %eax /* syscall number/class */ |
b0d623f7 A |
1565 | movl %eax, %edx |
1566 | andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */ | |
1567 | cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx | |
1568 | je EXT(hndl_mach_scall64) | |
1569 | cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx | |
1570 | je EXT(hndl_unix_scall64) | |
1571 | cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx | |
1572 | je EXT(hndl_mdep_scall64) | |
1573 | cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx | |
1574 | je EXT(hndl_diag_scall64) | |
1575 | ||
1576 | /* Syscall class unknown */ | |
316670eb | 1577 | sti |
b0d623f7 A |
1578 | CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1) |
1579 | /* no return */ | |
1580 | ||
1581 | ||
1582 | Entry(hndl_unix_scall64) | |
6d2010ae | 1583 | incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */ |
b0d623f7 A |
1584 | sti |
1585 | ||
39236c6e | 1586 | CCALL1(unix_syscall64, %r15) |
b0d623f7 A |
1587 | /* |
1588 | * always returns through thread_exception_return | |
1589 | */ | |
1590 | ||
1591 | ||
1592 | Entry(hndl_mach_scall64) | |
6d2010ae | 1593 | incl TH_SYSCALLS_MACH(%rcx) /* increment call count */ |
b0d623f7 A |
1594 | sti |
1595 | ||
39236c6e | 1596 | CCALL1(mach_call_munger64, %r15) |
b0d623f7 A |
1597 | /* |
1598 | * always returns through thread_exception_return | |
1599 | */ | |
1600 | ||
1601 | ||
1602 | ||
1603 | Entry(hndl_mdep_scall64) | |
1604 | sti | |
1605 | ||
39236c6e | 1606 | CCALL1(machdep_syscall64, %r15) |
b0d623f7 A |
1607 | /* |
1608 | * always returns through thread_exception_return | |
1609 | */ | |
1610 | ||
b0d623f7 | 1611 | Entry(hndl_diag_scall64) |
39236c6e | 1612 | CCALL1(diagCall64, %r15) // Call diagnostics |
316670eb | 1613 | test %eax, %eax // What kind of return is this? |
060df5ea | 1614 | je 1f // - branch if bad (zero) |
060df5ea A |
1615 | jmp EXT(return_to_user) // Normal return, do not check asts... |
1616 | 1: | |
316670eb | 1617 | sti |
b0d623f7 A |
1618 | CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1) |
1619 | /* no return */ | |
5c9f4661 | 1620 | /* TODO assert at all 'C' entry points that we're never operating on the fault stack's alias mapping */ |
b0d623f7 | 1621 | Entry(hndl_machine_check) |
5c9f4661 | 1622 | /* Adjust SP and savearea to their canonical, non-aliased addresses */ |
39236c6e | 1623 | CCALL1(panic_machine_check64, %r15) |
b0d623f7 A |
1624 | hlt |
1625 | ||
1626 | Entry(hndl_double_fault) | |
39236c6e | 1627 | CCALL1(panic_double_fault64, %r15) |
b0d623f7 | 1628 | hlt |