]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/idt64.s
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / x86_64 / idt64.s
CommitLineData
b0d623f7 1/*
0a7de745 2 * Copyright (c) 2010-2019 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <i386/asm.h>
29#include <assym.s>
39236c6e 30#include <debug.h>
b0d623f7 31#include <i386/eflags.h>
6d2010ae 32#include <i386/rtclock_asm.h>
b0d623f7
A
33#include <i386/trap.h>
34#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35#include <mach/i386/syscall_sw.h>
36#include <i386/postcode.h>
37#include <i386/proc_reg.h>
38#include <mach/exception_types.h>
39
40#if DEBUG
41#define DEBUG_IDT64 1
42#endif
43
44/*
45 * This is the low-level trap and interrupt handling code associated with
46 * the IDT. It also includes system call handlers for sysenter/syscall.
47 * The IDT itself is defined in mp_desc.c.
48 *
49 * Code here is structured as follows:
50 *
51 * stubs Code called directly from an IDT vector.
52 * All entry points have the "idt64_" prefix and they are built
53 * using macros expanded by the inclusion of idt_table.h.
54 * This code performs vector-dependent identification and jumps
55 * into the dispatch code.
56 *
57 * dispatch The dispatch code is responsible for saving the thread state
58 * (which is either 64-bit or 32-bit) and then jumping to the
59 * class handler identified by the stub.
60 *
61 * returns Code to restore state and return to the previous context.
62 *
63 * handlers There are several classes of handlers:
64 * interrupt - asynchronous events typically from external devices
65 * trap - synchronous events due to thread execution
66 * syscall - synchronous system call request
67 * fatal - fatal traps
68 */
b0d623f7 69/*
5c9f4661 70 * Indices of handlers for each exception type.
b0d623f7 71 */
5c9f4661
A
72#define HNDL_ALLINTRS 0
73#define HNDL_ALLTRAPS 1
74#define HNDL_SYSENTER 2
75#define HNDL_SYSCALL 3
76#define HNDL_UNIX_SCALL 4
77#define HNDL_MACH_SCALL 5
78#define HNDL_MDEP_SCALL 6
79#define HNDL_DOUBLE_FAULT 7
80#define HNDL_MACHINE_CHECK 8
81
82/* Begin double-mapped descriptor section */
83
84.section __HIB, __desc
85.globl EXT(idt64_hndl_table0)
86EXT(idt64_hndl_table0):
d9a64523
A
87/* 0x00 */ .quad EXT(ks_dispatch)
88/* 0x08 */ .quad EXT(ks_64bit_return)
89/* 0x10 */ .quad 0 /* Populated with CPU shadow displacement*/
cb323159 90/* 0x18 */ .quad EXT(ks_32bit_return)
d9a64523
A
91#define TBL0_OFF_DISP_USER_WITH_POPRAX 0x20
92/* 0x20 */ .quad EXT(ks_dispatch_user_with_pop_rax)
93#define TBL0_OFF_DISP_KERN_WITH_POPRAX 0x28
94/* 0x28 */ .quad EXT(ks_dispatch_kernel_with_pop_rax)
95#define TBL0_OFF_PTR_KERNEL_STACK_MASK 0x30
96/* 0x30 */ .quad 0 /* &kernel_stack_mask */
5c9f4661
A
97
98EXT(idt64_hndl_table1):
99 .quad EXT(hndl_allintrs)
100 .quad EXT(hndl_alltraps)
101 .quad EXT(hndl_sysenter)
102 .quad EXT(hndl_syscall)
103 .quad EXT(hndl_unix_scall)
104 .quad EXT(hndl_mach_scall)
105 .quad EXT(hndl_mdep_scall)
106 .quad EXT(hndl_double_fault)
107 .quad EXT(hndl_machine_check)
108.text
109
b0d623f7
A
110
111/* The wrapper for all non-special traps/interrupts */
112/* Everything up to PUSH_FUNCTION is just to output
113 * the interrupt number out to the postcode display
114 */
115#if DEBUG_IDT64
116#define IDT_ENTRY_WRAPPER(n, f) \
117 push %rax ;\
118 POSTCODE2(0x6400+n) ;\
119 pop %rax ;\
5c9f4661 120 pushq $(f) ;\
b0d623f7
A
121 pushq $(n) ;\
122 jmp L_dispatch
123#else
124#define IDT_ENTRY_WRAPPER(n, f) \
5c9f4661 125 pushq $(f) ;\
b0d623f7
A
126 pushq $(n) ;\
127 jmp L_dispatch
128#endif
129
130/* A trap that comes with an error code already on the stack */
131#define TRAP_ERR(n, f) \
132 Entry(f) ;\
133 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
134
135/* A normal trap */
136#define TRAP(n, f) \
137 Entry(f) ;\
138 pushq $0 ;\
139 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
140
141#define USER_TRAP TRAP
142
143/* An interrupt */
144#define INTERRUPT(n) \
145 Entry(_intr_ ## n) ;\
146 pushq $0 ;\
147 IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS)
148
149/* A trap with a special-case handler, hence we don't need to define anything */
150#define TRAP_SPC(n, f)
39236c6e
A
151#define TRAP_IST1(n, f)
152#define TRAP_IST2(n, f)
b0d623f7
A
153#define USER_TRAP_SPC(n, f)
154
5c9f4661
A
155/* Begin double-mapped text section */
156.section __HIB, __text
b0d623f7
A
157/* Generate all the stubs */
158#include "idt_table.h"
159
5c9f4661
A
160Entry(idt64_page_fault)
161 pushq $(HNDL_ALLTRAPS)
162 push $(T_PAGE_FAULT)
163 jmp L_dispatch
164
9d749ea3
A
165/*
166 * #DB handler, which runs on IST1, will treat as spurious any #DB received while executing in the
167 * kernel while not on the kernel's gsbase.
168 */
5c9f4661 169Entry(idt64_debug)
9d749ea3 170 /* Synthesize common interrupt stack frame */
5c9f4661
A
171 push $0 /* error code */
172 pushq $(HNDL_ALLTRAPS)
173 pushq $(T_DEBUG)
9d749ea3
A
174 /* Spill prior to RDMSR */
175 push %rax
176 push %rcx
177 push %rdx
178 mov $(MSR_IA32_GS_BASE), %ecx
179 rdmsr /* Check contents of GSBASE MSR */
180 test $0x80000000, %edx /* MSB set? Already swapped to kernel's */
181 jnz 1f
182
183 /*
184 * If we're not already swapped to the kernel's gsbase AND this #DB originated from kernel space,
185 * it must have happened within the very small window on entry or exit before or after (respectively)
186 * swapgs occurred. In those cases, consider the #DB spurious and immediately return.
187 */
188 testb $3, 8+8+8+ISF64_CS(%rsp)
189 jnz 2f
190 pop %rdx
191 pop %rcx
192 pop %rax
193 addq $0x18, %rsp /* Remove synthesized interrupt stack frame */
194 jmp EXT(ret64_iret)
1952:
196 swapgs /* direct from user */
1971:
198 pop %rdx
199
200 leaq EXT(idt64_hndl_table0)(%rip), %rax
201 mov 16(%rax), %rax /* Offset of per-CPU shadow */
0a7de745
A
202
203 mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax
9d749ea3
A
204 mov %rax, %cr3
205
206 pop %rcx
207
208 /* Note that %rax will be popped from the stack in ks_dispatch, below */
209
210 leaq EXT(idt64_hndl_table0)(%rip), %rax
211 jmp *(%rax)
212
5c9f4661
A
213/*
214 * Legacy interrupt gate System call handlers.
215 * These are entered via a syscall interrupt. The system call number in %rax
216 * is saved to the error code slot in the stack frame. We then branch to the
217 * common state saving code.
218 */
219
220#ifndef UNIX_INT
221#error NO UNIX INT!!!
222#endif
223Entry(idt64_unix_scall)
224 pushq %rax /* save system call number */
225 pushq $(HNDL_UNIX_SCALL)
226 pushq $(UNIX_INT)
d9a64523 227 jmp L_u64bit_entry_check
5c9f4661
A
228
229Entry(idt64_mach_scall)
230 pushq %rax /* save system call number */
231 pushq $(HNDL_MACH_SCALL)
232 pushq $(MACH_INT)
d9a64523 233 jmp L_u64bit_entry_check
5c9f4661
A
234
235Entry(idt64_mdep_scall)
236 pushq %rax /* save system call number */
237 pushq $(HNDL_MDEP_SCALL)
238 pushq $(MACHDEP_INT)
d9a64523 239 jmp L_u64bit_entry_check
5c9f4661
A
240
241/*
242 * For GP/NP/SS faults, we use the IST1 stack.
243 * For faults from user-space, we have to copy the machine state to the
244 * PCB stack and then dispatch as normal.
245 * For faults in kernel-space, we need to scrub for kernel exit faults and
246 * treat these as user-space faults. But for all other kernel-space faults
cb323159 247 * we continue to run on the IST1 stack as we dispatch to handle the fault
5c9f4661
A
248 * as fatal.
249 */
cb323159
A
250Entry(idt64_segnp)
251 pushq $(HNDL_ALLTRAPS)
252 pushq $(T_SEGMENT_NOT_PRESENT)
253 jmp L_check_for_kern_flt
254
5c9f4661
A
255Entry(idt64_gen_prot)
256 pushq $(HNDL_ALLTRAPS)
257 pushq $(T_GENERAL_PROTECTION)
0a7de745 258 jmp L_check_for_kern_flt
5c9f4661
A
259
260Entry(idt64_stack_fault)
261 pushq $(HNDL_ALLTRAPS)
262 pushq $(T_STACK_FAULT)
0a7de745
A
263 jmp L_check_for_kern_flt
264
265L_check_for_kern_flt:
266 /*
267 * If we took a #GP or #SS from the kernel, check if we took them
268 * from either ret32_iret or ret64_iret. If we did, we need to
269 * jump into L_dispatch at the swapgs so that the code in L_dispatch
270 * can proceed with the correct GSbase.
271 */
272 pushq %rax
273 testb $3, 8+ISF64_CS(%rsp)
274 jnz L_dispatch_from_user_no_push_rax /* Fault from user, go straight to dispatch */
cb323159
A
275
276 /* Check if the fault occurred in the 32-bit segment restoration window (which executes with user gsb) */
277 leaq L_32bit_seg_restore_begin(%rip), %rax
278 cmpq %rax, 8+ISF64_RIP(%rsp)
279 jb L_not_32bit_segrestores
280 leaq L_32bit_seg_restore_done(%rip), %rax
281 cmpq %rax, 8+ISF64_RIP(%rsp)
282 jae L_not_32bit_segrestores
283 jmp 1f
284L_not_32bit_segrestores:
0a7de745
A
285 leaq EXT(ret32_iret)(%rip), %rax
286 cmpq %rax, 8+ISF64_RIP(%rsp)
287 je 1f
288 leaq EXT(ret64_iret)(%rip), %rax
289 cmpq %rax, 8+ISF64_RIP(%rsp)
290 je 1f
291 jmp L_dispatch_from_kernel_no_push_rax
292 /*
293 * We hit the fault on iretq, so check the original return %cs. If
294 * it's a user %cs, fixup the stack and then jump to dispatch..
295 *
296 * With this type of fault, the stack is layed-out as follows:
297 *
298 *
299 * orig %ss saved_rsp+32
300 * orig %rsp saved_rsp+24
301 * orig %rflags saved_rsp+16
302 * orig %cs saved_rsp+8
303 * orig %rip saved_rsp
304 * ^^^^^^^^^ (maybe on another stack, since we switched to IST1)
305 * %ss +64 -8
306 * saved_rsp +56 -16
307 * %rflags +48 -24
308 * %cs +40 -32
309 * %rip +32 -40
310 * error code +24 -48
311 * hander +16 -56
312 * trap number +8 -64
313 * <saved %rax> <== %rsp -72
314 */
3151:
316 pushq %rbx
317 movq 16+ISF64_RSP(%rsp), %rbx
318 movq ISF64_CS-24(%rbx), %rax
319 testb $3, %al /* If the original return destination was to user */
320 jnz 2f
321 popq %rbx
322 jmp L_dispatch_from_kernel_no_push_rax /* Fault occurred when trying to return to kernel */
3232:
324 /*
325 * Fix the stack so the original trap frame is current, then jump to dispatch
326 */
cb323159 327
0a7de745
A
328 movq %rax, 16+ISF64_CS(%rsp)
329
330 movq ISF64_RSP-24(%rbx), %rax
331 movq %rax, 16+ISF64_RSP(%rsp)
332
333 movq ISF64_RIP-24(%rbx), %rax
334 movq %rax, 16+ISF64_RIP(%rsp)
335
336 movq ISF64_SS-24(%rbx), %rax
337 movq %rax, 16+ISF64_SS(%rsp)
338
339 movq ISF64_RFLAGS-24(%rbx), %rax
340 movq %rax, 16+ISF64_RFLAGS(%rsp)
341
342 popq %rbx
343 jmp L_dispatch_from_user_no_push_rax
5c9f4661 344
5c9f4661
A
345
346/*
347 * Fatal exception handlers:
348 */
349Entry(idt64_db_task_dbl_fault)
350 pushq $(HNDL_DOUBLE_FAULT)
351 pushq $(T_DOUBLE_FAULT)
352 jmp L_dispatch
353
354Entry(idt64_db_task_stk_fault)
355 pushq $(HNDL_DOUBLE_FAULT)
356 pushq $(T_STACK_FAULT)
357 jmp L_dispatch
358
359Entry(idt64_mc)
360 push $(0) /* Error */
361 pushq $(HNDL_MACHINE_CHECK)
362 pushq $(T_MACHINE_CHECK)
363 jmp L_dispatch
364
365/*
366 * NMI
367 * This may or may not be fatal but extreme care is required
368 * because it may fall when control was already in another trampoline.
369 *
a39ff7e2
A
370 * We get here on IST2 stack which is used exclusively for NMIs.
371 * Machine checks, doublefaults and similar use IST1
5c9f4661
A
372 */
373Entry(idt64_nmi)
a39ff7e2
A
374 push %rax
375 push %rcx
376 push %rdx
d9a64523
A
377 testb $3, ISF64_CS(%rsp)
378 jz 1f
379
380 /* From user-space: copy interrupt state to user PCB */
381 swapgs
382
383 leaq EXT(idt64_hndl_table0)(%rip), %rax
384 mov 16(%rax), %rax /* Offset of per-CPU shadow */
0a7de745 385 mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax
d9a64523
A
386 mov %rax, %cr3 /* note that SMAP is enabled in L_common_dispatch (on Broadwell+) */
387
388 mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */
389 add $(ISF64_SIZE), %rcx /* adjust to base of ISF */
390
391 leaq TBL0_OFF_DISP_USER_WITH_POPRAX+EXT(idt64_hndl_table0)(%rip), %rax /* ks_dispatch_user_with_pop_rax */
392 jmp 4f /* Copy state to PCB */
393
3941:
395 /*
396 * From kernel-space:
397 * Determine whether the kernel or user GS is set.
398 * Sets the high 32 bits of the return CS to 1 to ensure that we'll swapgs back correctly at IRET.
399 */
a39ff7e2 400 mov $(MSR_IA32_GS_BASE), %ecx
d9a64523
A
401 rdmsr /* read kernel gsbase */
402 test $0x80000000, %edx /* test MSB of address */
403 jnz 2f
404 swapgs /* so swap */
405 movl $1, ISF64_CS+4(%rsp) /* and set flag in CS slot */
4062:
a39ff7e2
A
407
408 leaq EXT(idt64_hndl_table0)(%rip), %rax
409 mov 16(%rax), %rax /* Offset of per-CPU shadow */
d9a64523 410 mov %cr3, %rdx
0a7de745 411 mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax
a39ff7e2 412 mov %rax, %cr3 /* Unconditionally switch to primary kernel pagetables */
d9a64523
A
413
414 /*
415 * Determine whether we're on the kernel or interrupt stack
416 * when the NMI hit.
417 */
418 mov ISF64_RSP(%rsp), %rcx
419 mov %gs:CPU_KERNEL_STACK, %rax
420 xor %rcx, %rax
421 movq TBL0_OFF_PTR_KERNEL_STACK_MASK+EXT(idt64_hndl_table0)(%rip), %rdx
422 mov (%rdx), %rdx /* Load kernel_stack_mask */
423 and %rdx, %rax
424 test %rax, %rax /* are we on the kernel stack? */
425 jz 3f /* yes */
426
427 mov %gs:CPU_INT_STACK_TOP, %rax
428 cmp %rcx, %rax /* are we on the interrupt stack? */
429 jb 5f /* no */
430 leaq -INTSTACK_SIZE(%rax), %rax
431 cmp %rcx, %rax
432 jb 3f /* yes */
4335:
434 mov %gs:CPU_KERNEL_STACK, %rcx
4353:
436 /* 16-byte-align kernel/interrupt stack for state push */
437 and $0xFFFFFFFFFFFFFFF0, %rcx
438
439 leaq TBL0_OFF_DISP_KERN_WITH_POPRAX+EXT(idt64_hndl_table0)(%rip), %rax /* ks_dispatch_kernel_with_pop_rax */
4404:
441 /*
442 * Copy state from NMI stack (RSP) to the save area (RCX) which is
443 * the PCB for user or kernel/interrupt stack from kernel.
444 * ISF64_ERR(RSP) saved RAX
445 * ISF64_TRAPFN(RSP) saved RCX
446 * ISF64_TRAPNO(RSP) saved RDX
447 */
448 xchg %rsp, %rcx /* set for pushes */
449 push ISF64_SS(%rcx)
450 push ISF64_RSP(%rcx)
451 push ISF64_RFLAGS(%rcx)
452 push ISF64_CS(%rcx)
453 push ISF64_RIP(%rcx)
454 /* Synthesize common interrupt stack frame */
455 push $(0) /* error code 0 */
456 push $(HNDL_ALLINTRS) /* trapfn allintrs */
457 push $(T_NMI) /* trapno T_NMI */
458 push ISF64_ERR(%rcx) /* saved %rax is popped in ks_dispatch_{kernel|user}_with_pop_rax */
459 mov ISF64_TRAPNO(%rcx), %rdx
460 mov ISF64_TRAPFN(%rcx), %rcx
461
462 jmp *(%rax) /* ks_dispatch_{kernel|user}_with_pop_rax */
5c9f4661
A
463
464Entry(idt64_double_fault)
465 pushq $(HNDL_DOUBLE_FAULT)
466 pushq $(T_DOUBLE_FAULT)
467 jmp L_dispatch
468
469Entry(hi64_syscall)
470Entry(idt64_syscall)
471 swapgs
472 /* Use RAX as a temporary by shifting its contents into R11[32:63]
473 * The systemcall number is defined to be a 32-bit quantity, as is
474 * RFLAGS.
475 */
476 shlq $32, %rax
477 or %rax, %r11
478.globl EXT(dblsyscall_patch_point)
479EXT(dblsyscall_patch_point):
480// movabsq $0x12345678ABCDEFFFULL, %rax
481 /* Generate offset to the double-mapped per-CPU data shadow
482 * into RAX
483 */
484 leaq EXT(idt64_hndl_table0)(%rip), %rax
485 mov 16(%rax), %rax
486 mov %rsp, %gs:CPU_UBER_TMP(%rax) /* save user stack */
487 mov %gs:CPU_ESTACK(%rax), %rsp /* switch stack to per-cpu estack */
488 sub $(ISF64_SIZE), %rsp
489
490 /*
491 * Synthesize an ISF frame on the exception stack
492 */
493 movl $(USER_DS), ISF64_SS(%rsp)
494 mov %rcx, ISF64_RIP(%rsp) /* rip */
495
496 mov %gs:CPU_UBER_TMP(%rax), %rcx
497 mov %rcx, ISF64_RSP(%rsp) /* user stack --changed */
498
499 mov %r11, %rax
500 shrq $32, %rax /* Restore RAX */
501 mov %r11d, %r11d /* Clear r11[32:63] */
502
503 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
504 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
505 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
506 movq $(HNDL_SYSCALL), ISF64_TRAPFN(%rsp)
507 movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
508 swapgs
509 jmp L_dispatch /* this can only be 64-bit */
510
511Entry(hi64_sysenter)
512Entry(idt64_sysenter)
513 /* Synthesize an interrupt stack frame onto the
514 * exception stack.
515 */
516 push $(USER_DS) /* ss */
517 push %rcx /* uesp */
518 pushf /* flags */
519 /*
520 * Clear, among others, the Nested Task (NT) flags bit;
521 * this is zeroed by INT, but not by SYSENTER.
522 */
523 push $0
524 popf
525 push $(SYSENTER_CS) /* cs */
526L_sysenter_continue:
527 push %rdx /* eip */
528 push %rax /* err/eax - syscall code */
529 pushq $(HNDL_SYSENTER)
530 pushq $(T_SYSENTER)
531 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
d9a64523 532 jmp L_u64bit_entry_check
5c9f4661 533
b0d623f7
A
534/*
535 * Common dispatch point.
536 * Determine what mode has been interrupted and save state accordingly.
39236c6e
A
537 * Here with:
538 * rsp from user-space: interrupt state in PCB, or
539 * from kernel-space: interrupt state in kernel or interrupt stack
540 * GSBASE from user-space: pthread area, or
541 * from kernel-space: cpu_data
b0d623f7 542 */
5c9f4661 543
b0d623f7 544L_dispatch:
5c9f4661
A
545 pushq %rax
546 testb $3, 8+ISF64_CS(%rsp)
547 jz 1f
0a7de745 548L_dispatch_from_user_no_push_rax:
5c9f4661
A
549 swapgs
550 leaq EXT(idt64_hndl_table0)(%rip), %rax
551 mov 16(%rax), %rax
d9a64523 552L_dispatch_kgsb:
0a7de745 553 mov %gs:CPU_SHADOWTASK_CR3(%rax), %rax
5c9f4661
A
554 mov %rax, %cr3
555#if DEBUG
556 mov %rax, %gs:CPU_ENTRY_CR3
557#endif
0a7de745 558L_dispatch_from_kernel_no_push_rax:
5c9f4661 5591:
5c9f4661 560 leaq EXT(idt64_hndl_table0)(%rip), %rax
d9a64523 561 /* The text/data relationship here must be preserved in the doublemap, and the contents must be remapped */
5c9f4661
A
562 /* Indirect branch to non-doublemapped trampolines */
563 jmp *(%rax)
564/* User return: register restoration and address space switch sequence */
565Entry(ks_64bit_return)
cb323159 566
5c9f4661
A
567 mov R64_R14(%r15), %r14
568 mov R64_R13(%r15), %r13
569 mov R64_R12(%r15), %r12
570 mov R64_R11(%r15), %r11
571 mov R64_R10(%r15), %r10
572 mov R64_R9(%r15), %r9
573 mov R64_R8(%r15), %r8
574 mov R64_RSI(%r15), %rsi
575 mov R64_RDI(%r15), %rdi
576 mov R64_RBP(%r15), %rbp
577 mov R64_RDX(%r15), %rdx
578 mov R64_RCX(%r15), %rcx
579 mov R64_RBX(%r15), %rbx
580 mov R64_RAX(%r15), %rax
581 /* Switch to per-CPU exception stack */
582 mov %gs:CPU_ESTACK, %rsp
583
584 /* Synthesize interrupt stack frame from PCB savearea to exception stack */
585 push R64_SS(%r15)
586 push R64_RSP(%r15)
587 push R64_RFLAGS(%r15)
588 push R64_CS(%r15)
589 push R64_RIP(%r15)
590
5c9f4661 591 cmpq $(KERNEL64_CS), 8(%rsp)
0a7de745
A
592 jne 1f /* Returning to user (%r15 will be restored after the segment checks) */
593 mov R64_R15(%r15), %r15
594 jmp L_64b_kernel_return /* Returning to kernel */
595
5961:
597 push %rax /* [A] */
598 movl %gs:CPU_NEED_SEGCHK, %eax
599 push %rax /* [B] */
600
601 /* Returning to user */
602 cmpl $0, %gs:CPU_CURTASK_HAS_LDT /* If the current task has an LDT, check and restore segment regs */
603 jne L_64b_segops_island
604
605 /*
606 * Restore %r15, since we're now done accessing saved state
607 * and (%r15) won't be accessible after the %cr3 load anyway.
608 * Note that %r15 is restored below for the segment-restore
609 * case, just after we no longer need to access register state
610 * relative to %r15.
611 */
612 mov R64_R15(%r15), %r15
613
614 /*
615 * Note that this %cr3 sequence is duplicated here to save
616 * [at least] a load and comparison that would be required if
617 * this block were shared.
618 */
5c9f4661 619 /* Discover user cr3/ASID */
5c9f4661
A
620 mov %gs:CPU_UCR3, %rax
621#if DEBUG
622 mov %rax, %gs:CPU_EXIT_CR3
623#endif
624 mov %rax, %cr3
625 /* Continue execution on the shared/doublemapped trampoline */
5c9f4661 626 swapgs
0a7de745
A
627
628L_chk_sysret:
629 pop %rax /* Matched to [B], above (segchk required) */
630
631 /*
632 * At this point, the stack contains:
633 *
634 * +--------------+
635 * | Return SS | +40
636 * | Return RSP | +32
637 * | Return RFL | +24
638 * | Return CS | +16
639 * | Return RIP | +8
640 * | Saved RAX | <-- rsp
641 * +--------------+
642 */
643 cmpl $(SYSCALL_CS), 16(%rsp) /* test for exit via SYSRET */
5c9f4661 644 je L_sysret
0a7de745
A
645
646 cmpl $1, %eax
647 je L_verw_island_2
648
649 pop %rax /* Matched to [A], above */
650
651L_64b_kernel_return:
652.globl EXT(ret64_iret)
5c9f4661 653EXT(ret64_iret):
0a7de745
A
654 iretq /* return from interrupt */
655
656
5c9f4661 657L_sysret:
0a7de745
A
658 cmpl $1, %eax
659 je L_verw_island_3
660
661 pop %rax /* Matched to [A], above */
5c9f4661
A
662 /*
663 * Here to restore rcx/r11/rsp and perform the sysret back to user-space.
664 * rcx user rip
665 * r11 user rflags
666 * rsp user stack pointer
667 */
668 pop %rcx
669 add $8, %rsp
670 pop %r11
671 pop %rsp
0a7de745
A
672 sysretq /* return from system call */
673
674
675L_verw_island_2:
676
677 pop %rax /* Matched to [A], above */
678 verw 40(%rsp) /* verw operates on the %ss value already on the stack */
679 jmp EXT(ret64_iret)
680
681
682L_verw_island_3:
683
684 pop %rax /* Matched to [A], above */
685
686 /*
687 * Here to restore rcx/r11/rsp and perform the sysret back to user-space.
688 * rcx user rip
689 * r11 user rflags
690 * rsp user stack pointer
691 */
692 pop %rcx
693 add $8, %rsp
694 pop %r11
695 verw 8(%rsp) /* verw operates on the %ss value already on the stack */
696 pop %rsp
697 sysretq /* return from system call */
698
699
700L_64b_segops_island:
701
702 /* Validate CS/DS/ES/FS/GS segment selectors with the Load Access Rights instruction prior to restoration */
703 /* Exempt "known good" statically configured selectors, e.g. USER64_CS and 0 */
704 cmpl $(USER64_CS), R64_CS(%r15)
705 jz 11f
706 larw R64_CS(%r15), %ax
707 jnz L_64_reset_cs
708 /* Ensure that the segment referenced by CS in the saved state is a code segment (bit 11 == 1) */
709 testw $0x800, %ax
710 jz L_64_reset_cs /* Update stored %cs with known-good selector if ZF == 1 */
711 jmp 11f
712L_64_reset_cs:
713 movl $(USER64_CS), R64_CS(%r15)
71411:
715 cmpl $0, R64_DS(%r15)
716 jz 22f
717 larw R64_DS(%r15), %ax
718 jz 22f
719 movl $0, R64_DS(%r15)
72022:
721 cmpl $0, R64_ES(%r15)
722 jz 33f
723 larw R64_ES(%r15), %ax
724 jz 33f
725 movl $0, R64_ES(%r15)
72633:
727 cmpl $0, R64_FS(%r15)
728 jz 44f
729 larw R64_FS(%r15), %ax
730 jz 44f
731 movl $0, R64_FS(%r15)
73244:
733 cmpl $0, R64_GS(%r15)
734 jz 55f
735 larw R64_GS(%r15), %ax
736 jz 55f
737 movl $0, R64_GS(%r15)
73855:
739 /*
740 * Pack the segment registers in %rax since (%r15) will not
741 * be accessible after the %cr3 switch.
742 * Only restore %gs if cthread_self is zero, (indicate
743 * this to the code below with a value of 0xffff)
744 */
745 mov %gs:CPU_ACTIVE_THREAD, %rax /* Get the active thread */
746 cmpq $0, TH_CTH_SELF(%rax)
747 je L_restore_gs
748 movw $0xFFFF, %ax
749 jmp 1f
750L_restore_gs:
751 movw R64_GS(%r15), %ax
7521:
753 shlq $16, %rax
754 movw R64_FS(%r15), %ax
755 shlq $16, %rax
756 movw R64_ES(%r15), %ax
757 shlq $16, %rax
758 movw R64_DS(%r15), %ax
759
760 /*
761 * Restore %r15, since we're done accessing saved state
762 * and (%r15) won't be accessible after the %cr3 switch.
763 */
764 mov R64_R15(%r15), %r15
765
766 /* Discover user cr3/ASID */
767 push %rax
768 mov %gs:CPU_UCR3, %rax
769#if DEBUG
770 mov %rax, %gs:CPU_EXIT_CR3
771#endif
772 mov %rax, %cr3
773 /* Continue execution on the shared/doublemapped trampoline */
774 pop %rax
775 swapgs
776
777 /*
778 * Returning to user; restore segment registers that might be used
779 * by compatibility-mode code in a 64-bit user process.
780 *
781 * Note that if we take a fault here, it's OK that we haven't yet
782 * popped %rax from the stack, because %rsp will be reset to
783 * the value pushed onto the exception stack (above).
784 */
785 movw %ax, %ds
786 shrq $16, %rax
787
788 movw %ax, %es
789 shrq $16, %rax
790
791 movw %ax, %fs
792 shrq $16, %rax
793
794 /*
795 * 0xFFFF is the sentinel set above that indicates we should
796 * not restore %gs (because GS.base was already set elsewhere
797 * (e.g.: in act_machine_set_pcb or machine_thread_set_tsd_base))
798 */
799 cmpw $0xFFFF, %ax
800 je L_chk_sysret
801 movw %ax, %gs /* Restore %gs to user-set value */
802 jmp L_chk_sysret
803
d9a64523
A
804
805L_u64bit_entry_check:
806 /*
807 * Check we're not a confused 64-bit user.
808 */
809 pushq %rax
810 swapgs
811 leaq EXT(idt64_hndl_table0)(%rip), %rax
812 mov 16(%rax), %rax
813
814 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP(%rax)
815 jne L_64bit_entry_reject
816 jmp L_dispatch_kgsb
817
818L_64bit_entry_reject:
819 /*
820 * Here for a 64-bit user attempting an invalid kernel entry.
821 */
822 movq $(HNDL_ALLTRAPS), 8+ISF64_TRAPFN(%rsp)
823 movq $(T_INVALID_OPCODE), 8+ISF64_TRAPNO(%rsp)
824 jmp L_dispatch_kgsb
825
cb323159
A
826Entry(ks_32bit_return)
827
828 /* Validate CS/DS/ES/FS/GS segment selectors with the Load Access Rights instruction prior to restoration */
829 /* Exempt "known good" statically configured selectors, e.g. USER_CS, USER_DS and 0 */
830 cmpl $(USER_CS), R32_CS(%r15)
831 jz 11f
832 larw R32_CS(%r15), %ax
833 jnz L_32_reset_cs
834 /* Ensure that the segment referenced by CS in the saved state is a code segment (bit 11 == 1) */
835 testw $0x800, %ax
836 jz L_32_reset_cs /* Update stored %cs with known-good selector if ZF == 1 */
837 jmp 11f
838L_32_reset_cs:
839 movl $(USER_CS), R32_CS(%r15)
84011:
841 cmpl $(USER_DS), R32_DS(%r15)
842 jz 22f
843 cmpl $0, R32_DS(%r15)
844 jz 22f
845 larw R32_DS(%r15), %ax
846 jz 22f
847 movl $(USER_DS), R32_DS(%r15)
84822:
849 cmpl $(USER_DS), R32_ES(%r15)
850 jz 33f
851 cmpl $0, R32_ES(%r15)
852 jz 33f
853 larw R32_ES(%r15), %ax
854 jz 33f
855 movl $(USER_DS), R32_ES(%r15)
85633:
857 cmpl $(USER_DS), R32_FS(%r15)
858 jz 44f
859 cmpl $0, R32_FS(%r15)
860 jz 44f
861 larw R32_FS(%r15), %ax
862 jz 44f
863 movl $(USER_DS), R32_FS(%r15)
86444:
865 cmpl $(USER_CTHREAD), R32_GS(%r15)
866 jz 55f
867 cmpl $0, R32_GS(%r15)
868 jz 55f
869 larw R32_GS(%r15), %ax
870 jz 55f
871 movl $(USER_CTHREAD), R32_GS(%r15)
87255:
873
874 /*
875 * Restore general 32-bit registers
876 */
877 movl R32_EAX(%r15), %eax
878 movl R32_EBX(%r15), %ebx
879 movl R32_ECX(%r15), %ecx
880 movl R32_EDX(%r15), %edx
881 movl R32_EBP(%r15), %ebp
882 movl R32_ESI(%r15), %esi
883 movl R32_EDI(%r15), %edi
884 movl R32_DS(%r15), %r8d
885 movl R32_ES(%r15), %r9d
886 movl R32_FS(%r15), %r10d
887 movl R32_GS(%r15), %r11d
888
889 /* Switch to the per-cpu (doublemapped) exception stack */
890 mov %gs:CPU_ESTACK, %rsp
891
892 /* Now transfer the ISF to the exception stack in preparation for iret, below */
893 movl R32_SS(%r15), %r12d
894 push %r12
895 movl R32_UESP(%r15), %r12d
896 push %r12
897 movl R32_EFLAGS(%r15), %r12d
898 push %r12
899 movl R32_CS(%r15), %r12d
900 push %r12
901 movl R32_EIP(%r15), %r12d
902 push %r12
903
904 movl %gs:CPU_NEED_SEGCHK, %r14d /* %r14 will be zeroed just before we return */
905
906 /*
907 * Finally, switch to the user pagetables. After this, all %gs-relative
908 * accesses MUST be to cpu shadow data ONLY. Note that after we restore %gs
909 * (after the swapgs), no %gs-relative accesses should be performed.
910 */
911 /* Discover user cr3/ASID */
912 mov %gs:CPU_UCR3, %r13
913#if DEBUG
914 mov %r13, %gs:CPU_EXIT_CR3
915#endif
916 mov %r13, %cr3
917
918 swapgs
919
920 /*
921 * Restore segment registers. A #GP taken here will push state onto IST1,
922 * not the exception stack. Note that the placement of the labels here
923 * corresponds to the fault address-detection logic (so do not change them
924 * without also changing that code).
925 */
926L_32bit_seg_restore_begin:
927 mov %r8, %ds
928 mov %r9, %es
929 mov %r10, %fs
930 mov %r11, %gs
931L_32bit_seg_restore_done:
932
933 /* Zero 64-bit-exclusive GPRs to prevent data leaks */
934 xor %r8, %r8
935 xor %r9, %r9
936 xor %r10, %r10
937 xor %r11, %r11
938 xor %r12, %r12
939 xor %r13, %r13
940 xor %r15, %r15
941
942 /*
943 * At this point, the stack contains:
944 *
945 * +--------------+
946 * | Return SS | +32
947 * | Return RSP | +24
948 * | Return RFL | +16
949 * | Return CS | +8
950 * | Return RIP | <-- rsp
951 * +--------------+
952 */
953
954 cmpl $(SYSENTER_CS), 8(%rsp)
955 /* test for sysexit */
956 je L_rtu_via_sysexit
957
958 cmpl $1, %r14d
959 je L_verw_island
960
961L_after_verw:
962 xor %r14, %r14
963
964.globl EXT(ret32_iret)
965EXT(ret32_iret):
966 iretq /* return from interrupt */
967
968L_verw_island:
969 verw 32(%rsp)
970 jmp L_after_verw
971
972L_verw_island_1:
973 verw 16(%rsp)
974 jmp L_after_verw_1
975
976L_rtu_via_sysexit:
977 pop %rdx /* user return eip */
978 pop %rcx /* pop and toss cs */
979 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
980
981 /*
982 * %ss is now at 16(%rsp)
983 */
984 cmpl $1, %r14d
985 je L_verw_island_1
986L_after_verw_1:
987 xor %r14, %r14
988
989 popf /* flags - carry denotes failure */
990 pop %rcx /* user return esp */
991
992
993 sti /* interrupts enabled after sysexit */
994 sysexitl /* 32-bit sysexit */
995
5c9f4661
A
996/* End of double-mapped TEXT */
997.text
998
999Entry(ks_dispatch)
1000 popq %rax
6d2010ae 1001 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
5c9f4661 1002 je EXT(ks_dispatch_kernel)
b0d623f7 1003
5c9f4661
A
1004 mov %rax, %gs:CPU_UBER_TMP
1005 mov %gs:CPU_UBER_ISF, %rax
1006 add $(ISF64_SIZE), %rax
1007
1008 xchg %rsp, %rax
1009/* Memory to memory moves (aint x86 wonderful):
1010 * Transfer the exception frame from the per-CPU exception stack to the
1011 * 'PCB' stack programmed at cswitch.
1012 */
1013 push ISF64_SS(%rax)
1014 push ISF64_RSP(%rax)
1015 push ISF64_RFLAGS(%rax)
1016 push ISF64_CS(%rax)
1017 push ISF64_RIP(%rax)
1018 push ISF64_ERR(%rax)
1019 push ISF64_TRAPFN(%rax)
1020 push ISF64_TRAPNO(%rax)
1021 mov %gs:CPU_UBER_TMP, %rax
1022 jmp EXT(ks_dispatch_user)
b0d623f7 1023
d9a64523
A
1024Entry(ks_dispatch_user_with_pop_rax)
1025 pop %rax
1026 jmp EXT(ks_dispatch_user)
1027
5c9f4661 1028Entry(ks_dispatch_user)
060df5ea 1029 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
39236c6e
A
1030 je L_dispatch_U32 /* 32-bit user task */
1031
1032L_dispatch_U64:
1033 subq $(ISS64_OFFSET), %rsp
1034 mov %r15, R64_R15(%rsp)
1035 mov %rsp, %r15
1036 mov %gs:CPU_KERNEL_STACK, %rsp
1037 jmp L_dispatch_64bit
1038
d9a64523
A
1039Entry(ks_dispatch_kernel_with_pop_rax)
1040 pop %rax
1041 jmp EXT(ks_dispatch_kernel)
1042
5c9f4661 1043Entry(ks_dispatch_kernel)
39236c6e
A
1044 subq $(ISS64_OFFSET), %rsp
1045 mov %r15, R64_R15(%rsp)
1046 mov %rsp, %r15
b0d623f7
A
1047
1048/*
1049 * Here for 64-bit user task or kernel
1050 */
39236c6e
A
1051L_dispatch_64bit:
1052 movl $(SS_64), SS_FLAVOR(%r15)
b0d623f7
A
1053
1054 /*
0a7de745
A
1055 * Save segment regs if a 64-bit task has
1056 * installed customized segments in the LDT
b0d623f7 1057 */
0a7de745
A
1058 cmpl $0, %gs:CPU_CURTASK_HAS_LDT
1059 je L_skip_save_extra_segregs
1060
1061 mov %ds, R64_DS(%r15)
1062 mov %es, R64_ES(%r15)
1063
1064L_skip_save_extra_segregs:
d9a64523
A
1065 mov %fs, R64_FS(%r15)
1066 mov %gs, R64_GS(%r15)
b0d623f7 1067
0a7de745 1068
b0d623f7 1069 /* Save general-purpose registers */
39236c6e
A
1070 mov %rax, R64_RAX(%r15)
1071 mov %rbx, R64_RBX(%r15)
1072 mov %rcx, R64_RCX(%r15)
1073 mov %rdx, R64_RDX(%r15)
1074 mov %rbp, R64_RBP(%r15)
1075 mov %rdi, R64_RDI(%r15)
1076 mov %rsi, R64_RSI(%r15)
1077 mov %r8, R64_R8(%r15)
1078 mov %r9, R64_R9(%r15)
1079 mov %r10, R64_R10(%r15)
1080 mov %r11, R64_R11(%r15)
1081 mov %r12, R64_R12(%r15)
1082 mov %r13, R64_R13(%r15)
1083 mov %r14, R64_R14(%r15)
b0d623f7 1084
a39ff7e2
A
1085 /* Zero unused GPRs. BX/DX/SI are clobbered elsewhere across the exception handler, and are skipped. */
1086 xor %ecx, %ecx
1087 xor %edi, %edi
1088 xor %r8, %r8
1089 xor %r9, %r9
1090 xor %r10, %r10
1091 xor %r11, %r11
1092 xor %r12, %r12
1093 xor %r13, %r13
1094 xor %r14, %r14
1095
b0d623f7
A
1096 /* cr2 is significant only for page-faults */
1097 mov %cr2, %rax
39236c6e 1098 mov %rax, R64_CR2(%r15)
b0d623f7 1099
0a7de745 1100L_dispatch_U64_after_fault:
39236c6e
A
1101 mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */
1102 mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */
1103 mov R64_CS(%r15), %esi /* %esi := cs for later */
b0d623f7 1104
39236c6e 1105 jmp L_common_dispatch
b0d623f7 1106
39236c6e
A
1107L_dispatch_U32: /* 32-bit user task */
1108 subq $(ISS64_OFFSET), %rsp
1109 mov %rsp, %r15
1110 mov %gs:CPU_KERNEL_STACK, %rsp
1111 movl $(SS_32), SS_FLAVOR(%r15)
b0d623f7
A
1112
1113 /*
1114 * Save segment regs
1115 */
d9a64523
A
1116 mov %ds, R32_DS(%r15)
1117 mov %es, R32_ES(%r15)
1118 mov %fs, R32_FS(%r15)
1119 mov %gs, R32_GS(%r15)
b0d623f7
A
1120
1121 /*
1122 * Save general 32-bit registers
1123 */
39236c6e
A
1124 mov %eax, R32_EAX(%r15)
1125 mov %ebx, R32_EBX(%r15)
1126 mov %ecx, R32_ECX(%r15)
1127 mov %edx, R32_EDX(%r15)
1128 mov %ebp, R32_EBP(%r15)
1129 mov %esi, R32_ESI(%r15)
1130 mov %edi, R32_EDI(%r15)
b0d623f7
A
1131
1132 /* Unconditionally save cr2; only meaningful on page faults */
1133 mov %cr2, %rax
39236c6e 1134 mov %eax, R32_CR2(%r15)
a39ff7e2
A
1135 /* Zero unused GPRs. BX/DX/SI/R15 are clobbered elsewhere across the exception handler, and are skipped. */
1136 xor %ecx, %ecx
1137 xor %edi, %edi
1138 xor %r8, %r8
1139 xor %r9, %r9
1140 xor %r10, %r10
1141 xor %r11, %r11
1142 xor %r12, %r12
1143 xor %r13, %r13
1144 xor %r14, %r14
b0d623f7
A
1145
1146 /*
1147 * Copy registers already saved in the machine state
1148 * (in the interrupt stack frame) into the compat save area.
1149 */
39236c6e
A
1150 mov R64_RIP(%r15), %eax
1151 mov %eax, R32_EIP(%r15)
1152 mov R64_RFLAGS(%r15), %eax
1153 mov %eax, R32_EFLAGS(%r15)
1154 mov R64_RSP(%r15), %eax
1155 mov %eax, R32_UESP(%r15)
1156 mov R64_SS(%r15), %eax
1157 mov %eax, R32_SS(%r15)
1158L_dispatch_U32_after_fault:
1159 mov R64_CS(%r15), %esi /* %esi := %cs for later */
1160 mov %esi, R32_CS(%r15)
1161 mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */
1162 mov %ebx, R32_TRAPNO(%r15)
1163 mov R64_ERR(%r15), %eax
1164 mov %eax, R32_ERR(%r15)
1165 mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */
b0d623f7
A
1166
1167L_common_dispatch:
fe8ab488
A
1168 cld /* Ensure the direction flag is clear in the kernel */
1169 cmpl $0, EXT(pmap_smap_enabled)(%rip)
1170 je 1f
1171 clac /* Clear EFLAGS.AC if SMAP is present/enabled */
11721:
b0d623f7 1173 /*
0a7de745 1174 * We mark the kernel's cr3 as "active" for TLB coherency evaluation
39037602
A
1175 * For threads with a mapped pagezero (some WINE games) on non-SMAP platforms,
1176 * we switch to the kernel's address space on entry. Also,
1177 * if the global no_shared_cr3 is TRUE we do switch to the kernel's cr3
b0d623f7
A
1178 * so that illicit accesses to userspace can be trapped.
1179 */
1180 mov %gs:CPU_KERNEL_CR3, %rcx
1181 mov %rcx, %gs:CPU_ACTIVE_CR3
0a7de745
A
1182 test $3, %esi /* CS: user/kernel? */
1183 jz 2f /* skip CR3 reload if from kernel */
1184 xor %ebp, %ebp
39037602
A
1185 cmpl $0, %gs:CPU_PAGEZERO_MAPPED
1186 jnz 11f
b0d623f7 1187 cmpl $0, EXT(no_shared_cr3)(%rip)
fe8ab488 1188 je 2f
39037602
A
118911:
1190 xor %eax, %eax
1191 movw %gs:CPU_KERNEL_PCID, %ax
1192 or %rax, %rcx
b0d623f7 1193 mov %rcx, %cr3 /* load kernel cr3 */
0a7de745 1194 jmp 4f
fe8ab488 11952:
0a7de745
A
1196 /* Deferred processing of pending kernel address space TLB invalidations */
1197 mov %gs:CPU_ACTIVE_CR3+4, %rcx
1198 shr $32, %rcx
1199 testl %ecx, %ecx
1200 jz 4f
1201 movl $0, %gs:CPU_TLB_INVALID
1202 cmpb $0, EXT(invpcid_enabled)(%rip)
1203 jz L_cr4_island
1204 movl $2, %ecx
1205 invpcid %gs:CPU_IP_DESC, %rcx
fe8ab488 12064:
0a7de745 1207L_set_act:
b0d623f7 1208 mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */
5ba3f43e 1209 testq %rcx, %rcx
0a7de745 1210 je L_intcnt
fe8ab488 1211 movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap */
6d2010ae 1212 cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */
0a7de745
A
1213 jnz L_dr7_island
1214L_intcnt:
6d2010ae 1215 incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
b0d623f7 1216 /* Dispatch the designated handler */
a39ff7e2
A
1217 cmp EXT(dblmap_base)(%rip), %rsp
1218 jb 66f
1219 cmp EXT(dblmap_max)(%rip), %rsp
1220 jge 66f
1221 subq EXT(dblmap_dist)(%rip), %rsp
1222 subq EXT(dblmap_dist)(%rip), %r15
122366:
5c9f4661
A
1224 leaq EXT(idt64_hndl_table1)(%rip), %rax
1225 jmp *(%rax, %rdx, 8)
b0d623f7 1226
0a7de745
A
1227L_cr4_island:
1228 mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/
1229 and $(~CR4_PGE), %rcx
1230 mov %rcx, %cr4
1231 or $(CR4_PGE), %rcx
1232 mov %rcx, %cr4
1233 jmp L_set_act
1234L_dr7_island:
1235 xor %ecx, %ecx /* If so, reset DR7 (the control) */
1236 mov %rcx, %dr7
1237 jmp L_intcnt
b0d623f7
A
1238/*
1239 * Control is passed here to return to user.
1240 */
1241Entry(return_to_user)
1242 TIME_TRAP_UEXIT
1243
1244Entry(ret_to_user)
b0d623f7 1245 mov %gs:CPU_ACTIVE_THREAD, %rdx
0a7de745
A
1246 cmpq $0, TH_PCB_IDS(%rdx) /* Is there a debug register context? */
1247 jnz L_dr_restore_island
1248L_post_dr_restore:
b0d623f7 1249 /*
0a7de745
A
1250 * We now mark the task's address space as active for TLB coherency.
1251 * Handle special cases such as pagezero-less tasks here.
b0d623f7
A
1252 */
1253 mov %gs:CPU_TASK_CR3, %rcx
1254 mov %rcx, %gs:CPU_ACTIVE_CR3
39037602
A
1255 cmpl $0, %gs:CPU_PAGEZERO_MAPPED
1256 jnz L_cr3_switch_island
6d2010ae
A
1257 movl EXT(no_shared_cr3)(%rip), %eax
1258 test %eax, %eax /* -no_shared_cr3 */
39037602
A
1259 jnz L_cr3_switch_island
1260
1261L_cr3_switch_return:
b0d623f7
A
1262 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
1263 cmp $0, %rax
1264 je 4f
1265 mov %rax, %dr7 /* Set DR7 */
1266 movq $0, %gs:CPU_DR7
12674:
39236c6e 1268 cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */
cb323159
A
1269 jne L_32bit_return
1270
1271 /*
1272 * Restore general 64-bit registers.
1273 * Here on fault stack and PCB address in R15.
1274 */
1275 leaq EXT(idt64_hndl_table0)(%rip), %rax
1276 jmp *8(%rax)
1277
b0d623f7
A
1278
1279L_32bit_return:
1280#if DEBUG_IDT64
39236c6e 1281 cmpl $(SS_32), SS_FLAVOR(%r15) /* 32-bit state? */
b0d623f7
A
1282 je 1f
1283 cli
1284 POSTCODE2(0x6432)
fe8ab488 1285 CCALL1(panic_idt64, %r15)
b0d623f7
A
12861:
1287#endif /* DEBUG_IDT64 */
1288
cb323159
A
1289 leaq EXT(idt64_hndl_table0)(%rip), %rax
1290 jmp *0x18(%rax)
0a7de745 1291
b0d623f7 1292
0a7de745
A
1293L_dr_restore_island:
1294 movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */
1295 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
1296 jne 1f
1297 movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */
1298 movq %rcx, %dr0
1299 movl DS_DR1(%rax), %ecx
1300 movq %rcx, %dr1
1301 movl DS_DR2(%rax), %ecx
1302 movq %rcx, %dr2
1303 movl DS_DR3(%rax), %ecx
1304 movq %rcx, %dr3
1305 movl DS_DR7(%rax), %ecx
1306 movq %rcx, %gs:CPU_DR7
1307 jmp 2f
13081:
1309 mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/
1310 mov %rcx, %dr0
1311 mov DS64_DR1(%rax), %rcx
1312 mov %rcx, %dr1
1313 mov DS64_DR2(%rax), %rcx
1314 mov %rcx, %dr2
1315 mov DS64_DR3(%rax), %rcx
1316 mov %rcx, %dr3
1317 mov DS64_DR7(%rax), %rcx
1318 mov %rcx, %gs:CPU_DR7
13192:
1320 jmp L_post_dr_restore
39037602
A
1321L_cr3_switch_island:
1322 xor %eax, %eax
1323 movw %gs:CPU_ACTIVE_PCID, %ax
1324 or %rax, %rcx
1325 mov %rcx, %cr3
1326 jmp L_cr3_switch_return
1327
b0d623f7
A
1328ret_to_kernel:
1329#if DEBUG_IDT64
39236c6e 1330 cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */
b0d623f7
A
1331 je 1f
1332 cli
1333 POSTCODE2(0x6464)
39236c6e 1334 CCALL1(panic_idt64, %r15)
b0d623f7
A
1335 hlt
13361:
39236c6e 1337 cmpl $(KERNEL64_CS), R64_CS(%r15)
b0d623f7 1338 je 2f
39236c6e 1339 CCALL1(panic_idt64, %r15)
b0d623f7
A
1340 hlt
13412:
1342#endif
39236c6e
A
1343 /*
1344 * Restore general 64-bit registers.
1345 * Here on fault stack and PCB address in R15.
1346 */
5c9f4661
A
1347 leaq EXT(idt64_hndl_table0)(%rip), %rax
1348 jmp *8(%rax)
b0d623f7 1349
39236c6e
A
1350/* All 'exceptions' enter hndl_alltraps, with:
1351 * r15 x86_saved_state_t address
1352 * rsp kernel stack if user-space, otherwise interrupt or kernel stack
1353 * esi cs at trap
b0d623f7
A
1354 *
1355 * The rest of the state is set up as:
39236c6e 1356 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1357 * interrupts disabled
1358 * direction flag cleared
1359 */
1360Entry(hndl_alltraps)
1361 mov %esi, %eax
1362 testb $3, %al
1363 jz trap_from_kernel
1364
1365 TIME_TRAP_UENTRY
1366
6d2010ae
A
1367 /* Check for active vtimers in the current task */
1368 mov %gs:CPU_ACTIVE_THREAD, %rcx
fe8ab488 1369 movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap/exception */
6d2010ae
A
1370 mov TH_TASK(%rcx), %rbx
1371 TASK_VTIMER_CHECK(%rbx, %rcx)
1372
39236c6e 1373 CCALL1(user_trap, %r15) /* call user trap routine */
6d2010ae 1374 /* user_trap() unmasks interrupts */
b0d623f7 1375 cli /* hold off intrs - critical section */
b0d623f7
A
1376 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1377
b0d623f7
A
1378
1379Entry(return_from_trap)
39236c6e 1380 movq %gs:CPU_ACTIVE_THREAD,%r15 /* Get current thread */
fe8ab488 1381 movl $-1, TH_IOTIER_OVERRIDE(%r15) /* Reset IO tier override to -1 before returning to userspace */
39236c6e
A
1382 cmpl $0, TH_RWLOCK_COUNT(%r15) /* Check if current thread has pending RW locks held */
1383 jz 1f
1384 xorq %rbp, %rbp /* clear framepointer */
1385 mov %r15, %rdi /* Set RDI to current thread */
1386 CCALL(lck_rw_clear_promotions_x86) /* Clear promotions if needed */
13871:
1388 movq TH_PCB_ISS(%r15), %r15 /* PCB stack */
b0d623f7
A
1389 movl %gs:CPU_PENDING_AST,%eax
1390 testl %eax,%eax
39236c6e 1391 je EXT(return_to_user) /* branch if no AST */
b0d623f7
A
1392
1393L_return_from_trap_with_ast:
b0d623f7
A
1394 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
1395 je 2f /* no, go handle the AST */
39236c6e 1396 cmpl $(SS_64), SS_FLAVOR(%r15) /* are we a 64-bit task? */
b0d623f7
A
1397 je 1f
1398 /* no... 32-bit user mode */
39236c6e 1399 movl R32_EIP(%r15), %edi
6d2010ae 1400 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
1401 CCALL(commpage_is_in_pfz32)
1402 testl %eax, %eax
1403 je 2f /* not in the PFZ... go service AST */
39236c6e 1404 movl %eax, R32_EBX(%r15) /* let the PFZ know we've pended an AST */
b0d623f7
A
1405 jmp EXT(return_to_user)
14061:
39236c6e 1407 movq R64_RIP(%r15), %rdi
6d2010ae 1408 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
1409 CCALL(commpage_is_in_pfz64)
1410 testl %eax, %eax
1411 je 2f /* not in the PFZ... go service AST */
39236c6e 1412 movl %eax, R64_RBX(%r15) /* let the PFZ know we've pended an AST */
b0d623f7 1413 jmp EXT(return_to_user)
5c9f4661 14142:
b0d623f7 1415
6d2010ae 1416 xorq %rbp, %rbp /* clear framepointer */
5ba3f43e 1417 CCALL(ast_taken_user) /* handle all ASTs (enables interrupts, may return via continuation) */
b0d623f7 1418
3e170ce0 1419 cli
39236c6e 1420 mov %rsp, %r15 /* AST changes stack, saved state */
b0d623f7
A
1421 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1422 jmp EXT(return_from_trap) /* and check again (rare) */
1423
1424/*
1425 * Trap from kernel mode. No need to switch stacks.
1426 * Interrupts must be off here - we will set them to state at time of trap
1427 * as soon as it's safe for us to do so and not recurse doing preemption
39236c6e 1428 *
b0d623f7 1429 */
b0d623f7 1430trap_from_kernel:
39236c6e
A
1431 movq %r15, %rdi /* saved state addr */
1432 pushq R64_RIP(%r15) /* Simulate a CALL from fault point */
b0d623f7
A
1433 pushq %rbp /* Extend framepointer chain */
1434 movq %rsp, %rbp
6d2010ae 1435 CCALLWITHSP(kernel_trap) /* to kernel trap routine */
b0d623f7
A
1436 popq %rbp
1437 addq $8, %rsp
39236c6e 1438 mov %rsp, %r15 /* DTrace slides stack/saved-state */
b0d623f7
A
1439 cli
1440
1441 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
1442 testl $(AST_URGENT),%eax /* any urgent preemption? */
1443 je ret_to_kernel /* no, nothing to do */
39236c6e 1444 cmpl $(T_PREEMPT),R64_TRAPNO(%r15)
b0d623f7 1445 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
39236c6e 1446 testl $(EFL_IF),R64_RFLAGS(%r15) /* interrupts disabled? */
b0d623f7
A
1447 je ret_to_kernel
1448 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1449 jne ret_to_kernel
1450 movq %gs:CPU_KERNEL_STACK,%rax
1451 movq %rsp,%rcx
1452 xorq %rax,%rcx
1453 andq EXT(kernel_stack_mask)(%rip),%rcx
1454 testq %rcx,%rcx /* are we on the kernel stack? */
1455 jne ret_to_kernel /* no, skip it */
1456
5ba3f43e 1457 CCALL(ast_taken_kernel) /* take the AST */
39236c6e
A
1458
1459 mov %rsp, %r15 /* AST changes stack, saved state */
b0d623f7
A
1460 jmp ret_to_kernel
1461
1462
1463/*
1464 * All interrupts on all tasks enter here with:
39236c6e
A
1465 * r15 x86_saved_state_t
1466 * rsp kernel or interrupt stack
b0d623f7
A
1467 * esi cs at trap
1468 *
39236c6e 1469 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1470 * interrupts disabled
1471 * direction flag cleared
1472 */
1473Entry(hndl_allintrs)
1474 /*
1475 * test whether already on interrupt stack
1476 */
1477 movq %gs:CPU_INT_STACK_TOP,%rcx
1478 cmpq %rsp,%rcx
1479 jb 1f
1480 leaq -INTSTACK_SIZE(%rcx),%rdx
1481 cmpq %rsp,%rdx
1482 jb int_from_intstack
060df5ea 14831:
b0d623f7
A
1484 xchgq %rcx,%rsp /* switch to interrupt stack */
1485
1486 mov %cr0,%rax /* get cr0 */
1487 orl $(CR0_TS),%eax /* or in TS bit */
1488 mov %rax,%cr0 /* set cr0 */
1489
b0d623f7 1490 pushq %rcx /* save pointer to old stack */
39236c6e
A
1491 pushq %gs:CPU_INT_STATE /* save previous intr state */
1492 movq %r15,%gs:CPU_INT_STATE /* set intr state */
b0d623f7
A
1493
1494 TIME_INT_ENTRY /* do timing */
1495
6d2010ae
A
1496 /* Check for active vtimers in the current task */
1497 mov %gs:CPU_ACTIVE_THREAD, %rcx
1498 mov TH_TASK(%rcx), %rbx
1499 TASK_VTIMER_CHECK(%rbx, %rcx)
1500
b0d623f7
A
1501 incl %gs:CPU_PREEMPTION_LEVEL
1502 incl %gs:CPU_INTERRUPT_LEVEL
1503
39236c6e 1504 CCALL1(interrupt, %r15) /* call generic interrupt routine */
b0d623f7 1505
5c9f4661 1506.globl EXT(return_to_iret)
b0d623f7
A
1507LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1508
1509 decl %gs:CPU_INTERRUPT_LEVEL
1510 decl %gs:CPU_PREEMPTION_LEVEL
1511
1512 TIME_INT_EXIT /* do timing */
1513
39236c6e
A
1514 popq %gs:CPU_INT_STATE /* reset/clear intr state pointer */
1515 popq %rsp /* switch back to old stack */
1516
b0d623f7 1517 movq %gs:CPU_ACTIVE_THREAD,%rax
6d2010ae 1518 movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */
b0d623f7
A
1519 cmpq $0,%rax /* Is there a context */
1520 je 1f /* Branch if not */
1521 movl FP_VALID(%rax),%eax /* Load fp_valid */
1522 cmpl $0,%eax /* Check if valid */
1523 jne 1f /* Branch if valid */
1524 clts /* Clear TS */
1525 jmp 2f
15261:
1527 mov %cr0,%rax /* get cr0 */
1528 orl $(CR0_TS),%eax /* or in TS bit */
1529 mov %rax,%cr0 /* set cr0 */
15302:
b0d623f7 1531 /* Load interrupted code segment into %eax */
39236c6e
A
1532 movl R32_CS(%r15),%eax /* assume 32-bit state */
1533 cmpl $(SS_64),SS_FLAVOR(%r15)/* 64-bit? */
b0d623f7
A
1534#if DEBUG_IDT64
1535 jne 4f
39236c6e 1536 movl R64_CS(%r15),%eax /* 64-bit user mode */
b0d623f7
A
1537 jmp 3f
15384:
39236c6e 1539 cmpl $(SS_32),SS_FLAVOR(%r15)
b0d623f7
A
1540 je 3f
1541 POSTCODE2(0x6431)
39236c6e 1542 CCALL1(panic_idt64, %r15)
b0d623f7
A
1543 hlt
1544#else
1545 jne 3f
39236c6e 1546 movl R64_CS(%r15),%eax /* 64-bit user mode */
b0d623f7
A
1547#endif
15483:
1549 testb $3,%al /* user mode, */
1550 jnz ast_from_interrupt_user /* go handle potential ASTs */
1551 /*
1552 * we only want to handle preemption requests if
1553 * the interrupt fell in the kernel context
1554 * and preemption isn't disabled
1555 */
1556 movl %gs:CPU_PENDING_AST,%eax
1557 testl $(AST_URGENT),%eax /* any urgent requests? */
1558 je ret_to_kernel /* no, nothing to do */
1559
1560 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1561 jne ret_to_kernel /* yes, skip it */
1562
b0d623f7
A
1563 /*
1564 * Take an AST from kernel space. We don't need (and don't want)
1565 * to do as much as the case where the interrupt came from user
1566 * space.
1567 */
5ba3f43e 1568 CCALL(ast_taken_kernel)
b0d623f7 1569
39236c6e 1570 mov %rsp, %r15 /* AST changes stack, saved state */
b0d623f7
A
1571 jmp ret_to_kernel
1572
1573
1574/*
1575 * nested int - simple path, can't preempt etc on way out
1576 */
1577int_from_intstack:
1578 incl %gs:CPU_PREEMPTION_LEVEL
1579 incl %gs:CPU_INTERRUPT_LEVEL
060df5ea 1580 incl %gs:CPU_NESTED_ISTACK
39236c6e
A
1581
1582 push %gs:CPU_INT_STATE
1583 mov %r15, %gs:CPU_INT_STATE
1584
1585 CCALL1(interrupt, %r15)
1586
1587 pop %gs:CPU_INT_STATE
b0d623f7
A
1588
1589 decl %gs:CPU_INTERRUPT_LEVEL
1590 decl %gs:CPU_PREEMPTION_LEVEL
060df5ea 1591 decl %gs:CPU_NESTED_ISTACK
39236c6e 1592
b0d623f7
A
1593 jmp ret_to_kernel
1594
1595/*
1596 * Take an AST from an interrupted user
1597 */
1598ast_from_interrupt_user:
1599 movl %gs:CPU_PENDING_AST,%eax
1600 testl %eax,%eax /* pending ASTs? */
1601 je EXT(ret_to_user) /* no, nothing to do */
1602
1603 TIME_TRAP_UENTRY
1604
1605 movl $1, %ecx /* check if we're in the PFZ */
1606 jmp L_return_from_trap_with_ast /* return */
1607
1608
1609/* Syscall dispatch routines! */
1610
1611/*
1612 *
1613 * 32bit Tasks
1614 * System call entries via INTR_GATE or sysenter:
1615 *
39236c6e
A
1616 * r15 x86_saved_state32_t
1617 * rsp kernel stack
1618 *
1619 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1620 * interrupts disabled
1621 * direction flag cleared
1622 */
1623
1624Entry(hndl_sysenter)
1625 /*
1626 * We can be here either for a mach syscall or a unix syscall,
1627 * as indicated by the sign of the code:
1628 */
39236c6e 1629 movl R32_EAX(%r15),%eax
b0d623f7
A
1630 testl %eax,%eax
1631 js EXT(hndl_mach_scall) /* < 0 => mach */
1632 /* > 0 => unix */
1633
1634Entry(hndl_unix_scall)
b0d623f7
A
1635
1636 TIME_TRAP_UENTRY
1637
b0d623f7 1638 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1639 movq TH_TASK(%rcx),%rbx /* point to current task */
1640 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1641
1642 /* Check for active vtimers in the current task */
1643 TASK_VTIMER_CHECK(%rbx,%rcx)
1644
1645 sti
1646
39236c6e 1647 CCALL1(unix_syscall, %r15)
b0d623f7
A
1648 /*
1649 * always returns through thread_exception_return
1650 */
1651
1652
1653Entry(hndl_mach_scall)
1654 TIME_TRAP_UENTRY
1655
b0d623f7 1656 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1657 movq TH_TASK(%rcx),%rbx /* point to current task */
1658 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1659
1660 /* Check for active vtimers in the current task */
1661 TASK_VTIMER_CHECK(%rbx,%rcx)
1662
1663 sti
1664
39236c6e 1665 CCALL1(mach_call_munger, %r15)
b0d623f7
A
1666 /*
1667 * always returns through thread_exception_return
1668 */
1669
1670
1671Entry(hndl_mdep_scall)
1672 TIME_TRAP_UENTRY
1673
b0d623f7
A
1674 /* Check for active vtimers in the current task */
1675 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1676 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1677 TASK_VTIMER_CHECK(%rbx,%rcx)
1678
1679 sti
1680
39236c6e 1681 CCALL1(machdep_syscall, %r15)
b0d623f7
A
1682 /*
1683 * always returns through thread_exception_return
1684 */
1685
b0d623f7
A
1686/*
1687 * 64bit Tasks
1688 * System call entries via syscall only:
1689 *
39236c6e
A
1690 * r15 x86_saved_state64_t
1691 * rsp kernel stack
1692 *
1693 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1694 * interrupts disabled
1695 * direction flag cleared
1696 */
1697
1698Entry(hndl_syscall)
1699 TIME_TRAP_UENTRY
1700
b0d623f7 1701 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
fe8ab488 1702 movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling syscall */
6d2010ae 1703 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1704
1705 /* Check for active vtimers in the current task */
1706 TASK_VTIMER_CHECK(%rbx,%rcx)
1707
1708 /*
1709 * We can be here either for a mach, unix machdep or diag syscall,
1710 * as indicated by the syscall class:
1711 */
39236c6e 1712 movl R64_RAX(%r15), %eax /* syscall number/class */
b0d623f7
A
1713 movl %eax, %edx
1714 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1715 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1716 je EXT(hndl_mach_scall64)
1717 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1718 je EXT(hndl_unix_scall64)
1719 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1720 je EXT(hndl_mdep_scall64)
1721 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1722 je EXT(hndl_diag_scall64)
1723
1724 /* Syscall class unknown */
316670eb 1725 sti
b0d623f7
A
1726 CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1)
1727 /* no return */
1728
1729
1730Entry(hndl_unix_scall64)
6d2010ae 1731 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1732 sti
1733
39236c6e 1734 CCALL1(unix_syscall64, %r15)
b0d623f7
A
1735 /*
1736 * always returns through thread_exception_return
1737 */
1738
1739
1740Entry(hndl_mach_scall64)
6d2010ae 1741 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1742 sti
1743
39236c6e 1744 CCALL1(mach_call_munger64, %r15)
b0d623f7
A
1745 /*
1746 * always returns through thread_exception_return
1747 */
1748
1749
1750
1751Entry(hndl_mdep_scall64)
1752 sti
1753
39236c6e 1754 CCALL1(machdep_syscall64, %r15)
b0d623f7
A
1755 /*
1756 * always returns through thread_exception_return
1757 */
1758
b0d623f7 1759Entry(hndl_diag_scall64)
39236c6e 1760 CCALL1(diagCall64, %r15) // Call diagnostics
316670eb 1761 test %eax, %eax // What kind of return is this?
060df5ea 1762 je 1f // - branch if bad (zero)
060df5ea
A
1763 jmp EXT(return_to_user) // Normal return, do not check asts...
17641:
316670eb 1765 sti
b0d623f7
A
1766 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
1767 /* no return */
5c9f4661 1768/* TODO assert at all 'C' entry points that we're never operating on the fault stack's alias mapping */
b0d623f7 1769Entry(hndl_machine_check)
5c9f4661 1770 /* Adjust SP and savearea to their canonical, non-aliased addresses */
39236c6e 1771 CCALL1(panic_machine_check64, %r15)
b0d623f7
A
1772 hlt
1773
1774Entry(hndl_double_fault)
39236c6e 1775 CCALL1(panic_double_fault64, %r15)
b0d623f7 1776 hlt