]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/idt64.s
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / x86_64 / idt64.s
CommitLineData
b0d623f7 1/*
6d2010ae 2 * Copyright (c) 2010 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <i386/asm.h>
29#include <assym.s>
39236c6e 30#include <debug.h>
b0d623f7 31#include <i386/eflags.h>
6d2010ae 32#include <i386/rtclock_asm.h>
b0d623f7
A
33#include <i386/trap.h>
34#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35#include <mach/i386/syscall_sw.h>
36#include <i386/postcode.h>
37#include <i386/proc_reg.h>
38#include <mach/exception_types.h>
39
40#if DEBUG
41#define DEBUG_IDT64 1
42#endif
43
44/*
45 * This is the low-level trap and interrupt handling code associated with
46 * the IDT. It also includes system call handlers for sysenter/syscall.
47 * The IDT itself is defined in mp_desc.c.
48 *
49 * Code here is structured as follows:
50 *
51 * stubs Code called directly from an IDT vector.
52 * All entry points have the "idt64_" prefix and they are built
53 * using macros expanded by the inclusion of idt_table.h.
54 * This code performs vector-dependent identification and jumps
55 * into the dispatch code.
56 *
57 * dispatch The dispatch code is responsible for saving the thread state
58 * (which is either 64-bit or 32-bit) and then jumping to the
59 * class handler identified by the stub.
60 *
61 * returns Code to restore state and return to the previous context.
62 *
63 * handlers There are several classes of handlers:
64 * interrupt - asynchronous events typically from external devices
65 * trap - synchronous events due to thread execution
66 * syscall - synchronous system call request
67 * fatal - fatal traps
68 */
69
70/*
71 * Handlers:
72 */
73#define HNDL_ALLINTRS EXT(hndl_allintrs)
74#define HNDL_ALLTRAPS EXT(hndl_alltraps)
75#define HNDL_SYSENTER EXT(hndl_sysenter)
76#define HNDL_SYSCALL EXT(hndl_syscall)
77#define HNDL_UNIX_SCALL EXT(hndl_unix_scall)
78#define HNDL_MACH_SCALL EXT(hndl_mach_scall)
79#define HNDL_MDEP_SCALL EXT(hndl_mdep_scall)
b0d623f7
A
80#define HNDL_DOUBLE_FAULT EXT(hndl_double_fault)
81#define HNDL_MACHINE_CHECK EXT(hndl_machine_check)
82
b0d623f7
A
83
84#if 1
85#define PUSH_FUNCTION(func) \
86 sub $8, %rsp ;\
87 push %rax ;\
88 leaq func(%rip), %rax ;\
89 movq %rax, 8(%rsp) ;\
90 pop %rax
91#else
92#define PUSH_FUNCTION(func) pushq func
93#endif
94
95/* The wrapper for all non-special traps/interrupts */
96/* Everything up to PUSH_FUNCTION is just to output
97 * the interrupt number out to the postcode display
98 */
99#if DEBUG_IDT64
100#define IDT_ENTRY_WRAPPER(n, f) \
101 push %rax ;\
102 POSTCODE2(0x6400+n) ;\
103 pop %rax ;\
104 PUSH_FUNCTION(f) ;\
105 pushq $(n) ;\
106 jmp L_dispatch
107#else
108#define IDT_ENTRY_WRAPPER(n, f) \
109 PUSH_FUNCTION(f) ;\
110 pushq $(n) ;\
111 jmp L_dispatch
112#endif
113
114/* A trap that comes with an error code already on the stack */
115#define TRAP_ERR(n, f) \
116 Entry(f) ;\
117 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
118
119/* A normal trap */
120#define TRAP(n, f) \
121 Entry(f) ;\
122 pushq $0 ;\
123 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
124
125#define USER_TRAP TRAP
126
127/* An interrupt */
128#define INTERRUPT(n) \
129 Entry(_intr_ ## n) ;\
130 pushq $0 ;\
131 IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS)
132
133/* A trap with a special-case handler, hence we don't need to define anything */
134#define TRAP_SPC(n, f)
39236c6e
A
135#define TRAP_IST1(n, f)
136#define TRAP_IST2(n, f)
b0d623f7
A
137#define USER_TRAP_SPC(n, f)
138
139/* Generate all the stubs */
140#include "idt_table.h"
141
142/*
143 * Common dispatch point.
144 * Determine what mode has been interrupted and save state accordingly.
39236c6e
A
145 * Here with:
146 * rsp from user-space: interrupt state in PCB, or
147 * from kernel-space: interrupt state in kernel or interrupt stack
148 * GSBASE from user-space: pthread area, or
149 * from kernel-space: cpu_data
b0d623f7
A
150 */
151L_dispatch:
6d2010ae 152 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
39236c6e 153 je L_dispatch_kernel
b0d623f7
A
154
155 swapgs
156
39236c6e 157L_dispatch_user:
060df5ea 158 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
39236c6e
A
159 je L_dispatch_U32 /* 32-bit user task */
160
161L_dispatch_U64:
162 subq $(ISS64_OFFSET), %rsp
163 mov %r15, R64_R15(%rsp)
164 mov %rsp, %r15
165 mov %gs:CPU_KERNEL_STACK, %rsp
166 jmp L_dispatch_64bit
167
168L_dispatch_kernel:
169 subq $(ISS64_OFFSET), %rsp
170 mov %r15, R64_R15(%rsp)
171 mov %rsp, %r15
b0d623f7
A
172
173/*
174 * Here for 64-bit user task or kernel
175 */
39236c6e
A
176L_dispatch_64bit:
177 movl $(SS_64), SS_FLAVOR(%r15)
b0d623f7
A
178
179 /*
180 * Save segment regs - for completeness since theyre not used.
181 */
39236c6e
A
182 movl %fs, R64_FS(%r15)
183 movl %gs, R64_GS(%r15)
b0d623f7
A
184
185 /* Save general-purpose registers */
39236c6e
A
186 mov %rax, R64_RAX(%r15)
187 mov %rbx, R64_RBX(%r15)
188 mov %rcx, R64_RCX(%r15)
189 mov %rdx, R64_RDX(%r15)
190 mov %rbp, R64_RBP(%r15)
191 mov %rdi, R64_RDI(%r15)
192 mov %rsi, R64_RSI(%r15)
193 mov %r8, R64_R8(%r15)
194 mov %r9, R64_R9(%r15)
195 mov %r10, R64_R10(%r15)
196 mov %r11, R64_R11(%r15)
197 mov %r12, R64_R12(%r15)
198 mov %r13, R64_R13(%r15)
199 mov %r14, R64_R14(%r15)
b0d623f7
A
200
201 /* cr2 is significant only for page-faults */
202 mov %cr2, %rax
39236c6e 203 mov %rax, R64_CR2(%r15)
b0d623f7 204
39236c6e
A
205 mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */
206 mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */
207 mov R64_CS(%r15), %esi /* %esi := cs for later */
b0d623f7 208
39236c6e 209 jmp L_common_dispatch
b0d623f7
A
210
211L_64bit_entry_reject:
212 /*
213 * Here for a 64-bit user attempting an invalid kernel entry.
214 */
215 pushq %rax
216 leaq HNDL_ALLTRAPS(%rip), %rax
217 movq %rax, ISF64_TRAPFN+8(%rsp)
218 popq %rax
219 movq $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
39236c6e 220 jmp L_dispatch_U64
b0d623f7
A
221
222L_32bit_entry_check:
223 /*
224 * Check we're not a confused 64-bit user.
225 */
226 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
227 jne L_64bit_entry_reject
228 /* fall through to 32-bit handler: */
229
39236c6e
A
230L_dispatch_U32: /* 32-bit user task */
231 subq $(ISS64_OFFSET), %rsp
232 mov %rsp, %r15
233 mov %gs:CPU_KERNEL_STACK, %rsp
234 movl $(SS_32), SS_FLAVOR(%r15)
b0d623f7
A
235
236 /*
237 * Save segment regs
238 */
39236c6e
A
239 movl %ds, R32_DS(%r15)
240 movl %es, R32_ES(%r15)
241 movl %fs, R32_FS(%r15)
242 movl %gs, R32_GS(%r15)
b0d623f7
A
243
244 /*
245 * Save general 32-bit registers
246 */
39236c6e
A
247 mov %eax, R32_EAX(%r15)
248 mov %ebx, R32_EBX(%r15)
249 mov %ecx, R32_ECX(%r15)
250 mov %edx, R32_EDX(%r15)
251 mov %ebp, R32_EBP(%r15)
252 mov %esi, R32_ESI(%r15)
253 mov %edi, R32_EDI(%r15)
b0d623f7
A
254
255 /* Unconditionally save cr2; only meaningful on page faults */
256 mov %cr2, %rax
39236c6e 257 mov %eax, R32_CR2(%r15)
b0d623f7
A
258
259 /*
260 * Copy registers already saved in the machine state
261 * (in the interrupt stack frame) into the compat save area.
262 */
39236c6e
A
263 mov R64_RIP(%r15), %eax
264 mov %eax, R32_EIP(%r15)
265 mov R64_RFLAGS(%r15), %eax
266 mov %eax, R32_EFLAGS(%r15)
267 mov R64_RSP(%r15), %eax
268 mov %eax, R32_UESP(%r15)
269 mov R64_SS(%r15), %eax
270 mov %eax, R32_SS(%r15)
271L_dispatch_U32_after_fault:
272 mov R64_CS(%r15), %esi /* %esi := %cs for later */
273 mov %esi, R32_CS(%r15)
274 mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */
275 mov %ebx, R32_TRAPNO(%r15)
276 mov R64_ERR(%r15), %eax
277 mov %eax, R32_ERR(%r15)
278 mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */
b0d623f7
A
279
280L_common_dispatch:
39236c6e
A
281 cld /* Ensure the direction flag is clear in the kernel */
282
b0d623f7
A
283 /*
284 * On entering the kernel, we don't need to switch cr3
285 * because the kernel shares the user's address space.
286 * But we mark the kernel's cr3 as "active".
287 * If, however, the invalid cr3 flag is set, we have to flush tlbs
288 * since the kernel's mapping was changed while we were in userspace.
289 *
290 * But: if global no_shared_cr3 is TRUE we do switch to the kernel's cr3
291 * so that illicit accesses to userspace can be trapped.
292 */
293 mov %gs:CPU_KERNEL_CR3, %rcx
294 mov %rcx, %gs:CPU_ACTIVE_CR3
295 test $3, %esi /* user/kernel? */
296 jz 1f /* skip cr3 reload from kernel */
297 xor %rbp, %rbp
298 cmpl $0, EXT(no_shared_cr3)(%rip)
299 je 1f
300 mov %rcx, %cr3 /* load kernel cr3 */
301 jmp 2f /* and skip tlb flush test */
6d2010ae
A
3021:
303 mov %gs:CPU_ACTIVE_CR3+4, %rcx
304 shr $32, %rcx
305 testl %ecx, %ecx
306 jz 2f
307 movl $0, %gs:CPU_TLB_INVALID
308 testl $(1<<16), %ecx /* Global? */
309 jz 11f
310 mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/
311 and $(~CR4_PGE), %rcx
312 mov %rcx, %cr4
313 or $(CR4_PGE), %rcx
314 mov %rcx, %cr4
315 jmp 2f
316
31711: mov %cr3, %rcx
b0d623f7
A
318 mov %rcx, %cr3
3192:
320 mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */
6d2010ae 321 cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */
b0d623f7 322 je 3f
316670eb 323 xor %ecx, %ecx /* If so, reset DR7 (the control) */
b0d623f7
A
324 mov %rcx, %dr7
3253:
6d2010ae 326 incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
b0d623f7 327 /* Dispatch the designated handler */
b0d623f7
A
328 jmp *%rdx
329
330/*
331 * Control is passed here to return to user.
332 */
333Entry(return_to_user)
334 TIME_TRAP_UEXIT
335
336Entry(ret_to_user)
337// XXX 'Be nice to tidy up this debug register restore sequence...
338 mov %gs:CPU_ACTIVE_THREAD, %rdx
6d2010ae 339 movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */
b0d623f7 340
316670eb 341 test %rax, %rax /* Is there a debug register context? */
b0d623f7
A
342 je 2f /* branch if not */
343 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
344 jne 1f
345 movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */
346 movq %rcx, %dr0
347 movl DS_DR1(%rax), %ecx
348 movq %rcx, %dr1
349 movl DS_DR2(%rax), %ecx
350 movq %rcx, %dr2
351 movl DS_DR3(%rax), %ecx
352 movq %rcx, %dr3
353 movl DS_DR7(%rax), %ecx
354 movq %rcx, %gs:CPU_DR7
355 jmp 2f
3561:
357 mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/
358 mov %rcx, %dr0
359 mov DS64_DR1(%rax), %rcx
360 mov %rcx, %dr1
361 mov DS64_DR2(%rax), %rcx
362 mov %rcx, %dr2
363 mov DS64_DR3(%rax), %rcx
364 mov %rcx, %dr3
365 mov DS64_DR7(%rax), %rcx
366 mov %rcx, %gs:CPU_DR7
3672:
368 /*
369 * On exiting the kernel there's no need to switch cr3 since we're
370 * already running in the user's address space which includes the
371 * kernel. Nevertheless, we now mark the task's cr3 as active.
b0d623f7
A
372 * But, if no_shared_cr3 is set, we do need to switch cr3 at this point.
373 */
374 mov %gs:CPU_TASK_CR3, %rcx
375 mov %rcx, %gs:CPU_ACTIVE_CR3
6d2010ae
A
376 movl EXT(no_shared_cr3)(%rip), %eax
377 test %eax, %eax /* -no_shared_cr3 */
b0d623f7 378 jz 3f
b0d623f7
A
379 mov %rcx, %cr3
3803:
b0d623f7
A
381 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
382 cmp $0, %rax
383 je 4f
384 mov %rax, %dr7 /* Set DR7 */
385 movq $0, %gs:CPU_DR7
3864:
39236c6e 387 cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */
b0d623f7
A
388 je L_64bit_return
389
390L_32bit_return:
391#if DEBUG_IDT64
39236c6e 392 cmpl $(SS_32), SS_FLAVOR(%r15) /* 32-bit state? */
b0d623f7
A
393 je 1f
394 cli
395 POSTCODE2(0x6432)
396 CCALL1(panic_idt64, %rsp)
3971:
398#endif /* DEBUG_IDT64 */
399
400 /*
401 * Restore registers into the machine state for iret.
39236c6e 402 * Here on fault stack and PCB address in R11.
b0d623f7 403 */
39236c6e
A
404 movl R32_EIP(%r15), %eax
405 movl %eax, R64_RIP(%r15)
406 movl R32_EFLAGS(%r15), %eax
407 movl %eax, R64_RFLAGS(%r15)
408 movl R32_CS(%r15), %eax
409 movl %eax, R64_CS(%r15)
410 movl R32_UESP(%r15), %eax
411 movl %eax, R64_RSP(%r15)
412 movl R32_SS(%r15), %eax
413 movl %eax, R64_SS(%r15)
b0d623f7
A
414
415 /*
416 * Restore general 32-bit registers
417 */
39236c6e
A
418 movl R32_EAX(%r15), %eax
419 movl R32_EBX(%r15), %ebx
420 movl R32_ECX(%r15), %ecx
421 movl R32_EDX(%r15), %edx
422 movl R32_EBP(%r15), %ebp
423 movl R32_ESI(%r15), %esi
424 movl R32_EDI(%r15), %edi
b0d623f7
A
425
426 /*
39236c6e
A
427 * Restore segment registers. A segment exception taken here will
428 * push state on the IST1 stack and will not affect the "PCB stack".
b0d623f7 429 */
39236c6e 430 mov %r15, %rsp /* Set the PCB as the stack */
b0d623f7
A
431 swapgs
432EXT(ret32_set_ds):
316670eb 433 movl R32_DS(%rsp), %ds
b0d623f7 434EXT(ret32_set_es):
316670eb 435 movl R32_ES(%rsp), %es
b0d623f7 436EXT(ret32_set_fs):
316670eb 437 movl R32_FS(%rsp), %fs
b0d623f7 438EXT(ret32_set_gs):
316670eb 439 movl R32_GS(%rsp), %gs
b0d623f7
A
440
441 /* pop compat frame + trapno, trapfn and error */
39236c6e 442 add $(ISS64_OFFSET)+8+8+8, %rsp
316670eb 443 cmpl $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
b0d623f7 444 /* test for fast entry/exit */
316670eb 445 je L_fast_exit
b0d623f7 446EXT(ret32_iret):
316670eb 447 iretq /* return from interrupt */
b0d623f7
A
448
449L_fast_exit:
450 pop %rdx /* user return eip */
451 pop %rcx /* pop and toss cs */
452 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
453 popf /* flags - carry denotes failure */
454 pop %rcx /* user return esp */
455 sti /* interrupts enabled after sysexit */
39236c6e 456 sysexitl /* 32-bit sysexit */
b0d623f7
A
457
458ret_to_kernel:
459#if DEBUG_IDT64
39236c6e 460 cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */
b0d623f7
A
461 je 1f
462 cli
463 POSTCODE2(0x6464)
39236c6e 464 CCALL1(panic_idt64, %r15)
b0d623f7
A
465 hlt
4661:
39236c6e 467 cmpl $(KERNEL64_CS), R64_CS(%r15)
b0d623f7 468 je 2f
39236c6e 469 CCALL1(panic_idt64, %r15)
b0d623f7
A
470 hlt
4712:
472#endif
473
474L_64bit_return:
39236c6e
A
475 /*
476 * Restore general 64-bit registers.
477 * Here on fault stack and PCB address in R15.
478 */
479 mov R64_R14(%r15), %r14
480 mov R64_R13(%r15), %r13
481 mov R64_R12(%r15), %r12
482 mov R64_R11(%r15), %r11
483 mov R64_R10(%r15), %r10
484 mov R64_R9(%r15), %r9
485 mov R64_R8(%r15), %r8
486 mov R64_RSI(%r15), %rsi
487 mov R64_RDI(%r15), %rdi
488 mov R64_RBP(%r15), %rbp
489 mov R64_RDX(%r15), %rdx
490 mov R64_RCX(%r15), %rcx
491 mov R64_RBX(%r15), %rbx
492 mov R64_RAX(%r15), %rax
b0d623f7
A
493
494 /*
39236c6e
A
495 * We must swap GS base if we're returning to user-space,
496 * or we're returning from an NMI that occurred in a trampoline
497 * before the user GS had been swapped. In the latter case, the NMI
498 * handler will have flagged the high-order 32-bits of the CS.
b0d623f7 499 */
39236c6e
A
500 cmpq $(KERNEL64_CS), R64_CS(%r15)
501 jz 1f
502 swapgs
5031:
504 mov R64_R15(%r15), %rsp
505 xchg %r15, %rsp
506 add $(ISS64_OFFSET)+24, %rsp /* pop saved state */
507 /* + trapno/trapfn/error */
b0d623f7 508 cmpl $(SYSCALL_CS),ISF64_CS-24(%rsp)
39236c6e 509 /* test for fast entry/exit */
b0d623f7
A
510 je L_sysret
511.globl _dump_iretq
512EXT(ret64_iret):
513 iretq /* return from interrupt */
514
515L_sysret:
516 /*
517 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
518 * rcx user rip
39236c6e 519 * r11 user rflags
b0d623f7
A
520 * rsp user stack pointer
521 */
522 mov ISF64_RIP-24(%rsp), %rcx
523 mov ISF64_RFLAGS-24(%rsp), %r11
524 mov ISF64_RSP-24(%rsp), %rsp
525 sysretq /* return from systen call */
526
527
528
529/*
530 * System call handlers.
531 * These are entered via a syscall interrupt. The system call number in %rax
532 * is saved to the error code slot in the stack frame. We then branch to the
533 * common state saving code.
534 */
535
536#ifndef UNIX_INT
537#error NO UNIX INT!!!
538#endif
539Entry(idt64_unix_scall)
540 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
541 pushq %rax /* save system call number */
542 PUSH_FUNCTION(HNDL_UNIX_SCALL)
543 pushq $(UNIX_INT)
544 jmp L_32bit_entry_check
545
546
547Entry(idt64_mach_scall)
548 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
549 pushq %rax /* save system call number */
550 PUSH_FUNCTION(HNDL_MACH_SCALL)
551 pushq $(MACH_INT)
552 jmp L_32bit_entry_check
553
554
555Entry(idt64_mdep_scall)
556 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
557 pushq %rax /* save system call number */
558 PUSH_FUNCTION(HNDL_MDEP_SCALL)
559 pushq $(MACHDEP_INT)
560 jmp L_32bit_entry_check
561
b0d623f7
A
562Entry(hi64_syscall)
563Entry(idt64_syscall)
b0d623f7 564L_syscall_continue:
6d2010ae 565 swapgs /* Kapow! get per-cpu data area */
b0d623f7
A
566 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
567 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
568
569 /*
570 * Save values in the ISF frame in the PCB
571 * to cons up the saved machine state.
572 */
573 movl $(USER_DS), ISF64_SS(%rsp)
574 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
575 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
576 mov %rcx, ISF64_RIP(%rsp) /* rip */
577 mov %gs:CPU_UBER_TMP, %rcx
578 mov %rcx, ISF64_RSP(%rsp) /* user stack */
579 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
580 movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
581 leaq HNDL_SYSCALL(%rip), %r11;
582 movq %r11, ISF64_TRAPFN(%rsp)
39236c6e
A
583 mov ISF64_RFLAGS(%rsp), %r11 /* Avoid leak, restore R11 */
584 jmp L_dispatch_U64 /* this can only be 64-bit */
b0d623f7
A
585
586/*
587 * sysenter entry point
588 * Requires user code to set up:
589 * edx: user instruction pointer (return address)
590 * ecx: user stack pointer
591 * on which is pushed stub ret addr and saved ebx
592 * Return to user-space is made using sysexit.
593 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
594 * or requiring ecx to be preserved.
595 */
596Entry(hi64_sysenter)
597Entry(idt64_sysenter)
598 movq (%rsp), %rsp
599 /*
600 * Push values on to the PCB stack
601 * to cons up the saved machine state.
602 */
603 push $(USER_DS) /* ss */
604 push %rcx /* uesp */
605 pushf /* flags */
6d2010ae
A
606 /*
607 * Clear, among others, the Nested Task (NT) flags bit;
608 * this is zeroed by INT, but not by SYSENTER.
609 */
610 push $0
611 popf
b0d623f7 612 push $(SYSENTER_CS) /* cs */
b0d623f7 613L_sysenter_continue:
6d2010ae 614 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
615 push %rdx /* eip */
616 push %rax /* err/eax - syscall code */
617 PUSH_FUNCTION(HNDL_SYSENTER)
618 pushq $(T_SYSENTER)
619 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
620 jmp L_32bit_entry_check
621
622
623Entry(idt64_page_fault)
624 PUSH_FUNCTION(HNDL_ALLTRAPS)
6d2010ae
A
625 push $(T_PAGE_FAULT)
626 push %rax /* save %rax temporarily */
6d2010ae
A
627 testb $3, 8+ISF64_CS(%rsp) /* was trap from kernel? */
628 jz L_kernel_trap /* - yes, handle with care */
629 pop %rax /* restore %rax, swapgs, and continue */
630 swapgs
39236c6e 631 jmp L_dispatch_user
b0d623f7
A
632
633
634/*
635 * Debug trap. Check for single-stepping across system call into
636 * kernel. If this is the case, taking the debug trap has turned
637 * off single-stepping - save the flags register with the trace
638 * bit set.
639 */
640Entry(idt64_debug)
641 push $0 /* error code */
642 PUSH_FUNCTION(HNDL_ALLTRAPS)
643 pushq $(T_DEBUG)
644
645 testb $3, ISF64_CS(%rsp)
646 jnz L_dispatch
647
648 /*
649 * trap came from kernel mode
650 */
651
652 push %rax /* save %rax temporarily */
b0d623f7 653 lea EXT(idt64_sysenter)(%rip), %rax
6d2010ae 654 cmp %rax, ISF64_RIP+8(%rsp)
b0d623f7 655 pop %rax
6d2010ae 656 jne L_dispatch
b0d623f7
A
657 /*
658 * Interrupt stack frame has been pushed on the temporary stack.
6d2010ae 659 * We have to switch to pcb stack and patch up the saved state.
b0d623f7 660 */
6d2010ae
A
661 mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */
662 mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */
b0d623f7
A
663 xchg %rcx,%rsp /* switch to pcb stack */
664 push $(USER_DS) /* ss */
6d2010ae
A
665 push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */
666 push ISF64_RFLAGS(%rcx) /* rflags */
b0d623f7 667 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
6d2010ae 668 mov ISF64_ERR(%rcx),%rcx /* restore %rcx */
b0d623f7 669 jmp L_sysenter_continue /* continue sysenter entry */
b0d623f7
A
670
671
672Entry(idt64_double_fault)
673 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
674 pushq $(T_DOUBLE_FAULT)
675
676 push %rax
677 leaq EXT(idt64_syscall)(%rip), %rax
6d2010ae 678 cmp %rax, ISF64_RIP+8(%rsp)
b0d623f7 679 pop %rax
39236c6e 680 jne L_dispatch_kernel
b0d623f7
A
681
682 mov ISF64_RSP(%rsp), %rsp
683 jmp L_syscall_continue
684
685
686/*
39236c6e
A
687 * For GP/NP/SS faults, we use the IST1 stack.
688 * For faults from user-space, we have to copy the machine state to the
689 * PCB stack and then dispatch as normal.
690 * For faults in kernel-space, we need to scrub for kernel exit faults and
691 * treat these as user-space faults. But for all other kernel-space faults
692 * we continue to run on the IST1 stack and we dispatch to handle the fault
693 * as fatal.
b0d623f7
A
694 */
695Entry(idt64_gen_prot)
696 PUSH_FUNCTION(HNDL_ALLTRAPS)
697 pushq $(T_GENERAL_PROTECTION)
698 jmp trap_check_kernel_exit /* check for kernel exit sequence */
699
700Entry(idt64_stack_fault)
701 PUSH_FUNCTION(HNDL_ALLTRAPS)
702 pushq $(T_STACK_FAULT)
703 jmp trap_check_kernel_exit /* check for kernel exit sequence */
704
705Entry(idt64_segnp)
706 PUSH_FUNCTION(HNDL_ALLTRAPS)
707 pushq $(T_SEGMENT_NOT_PRESENT)
708 /* indicate fault type */
709trap_check_kernel_exit:
39236c6e
A
710 testb $3,ISF64_CS(%rsp)
711 jz L_kernel_gpf
712
713 /* Here for fault from user-space. Copy interrupt state to PCB. */
714 swapgs
b0d623f7 715 push %rax
39236c6e
A
716 mov %rcx, %gs:CPU_UBER_TMP /* save user RCX */
717 mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */
718 mov ISF64_SS+8(%rsp), %rax
719 mov %rax, ISF64_SS(%rcx)
720 mov ISF64_RSP+8(%rsp), %rax
721 mov %rax, ISF64_RSP(%rcx)
722 mov ISF64_RFLAGS+8(%rsp), %rax
723 mov %rax, ISF64_RFLAGS(%rcx)
724 mov ISF64_CS+8(%rsp), %rax
725 mov %rax, ISF64_CS(%rcx)
726 mov ISF64_RIP+8(%rsp), %rax
727 mov %rax, ISF64_RIP(%rcx)
728 mov ISF64_ERR+8(%rsp), %rax
729 mov %rax, ISF64_ERR(%rcx)
730 mov ISF64_TRAPFN+8(%rsp), %rax
731 mov %rax, ISF64_TRAPFN(%rcx)
732 mov ISF64_TRAPNO+8(%rsp), %rax
733 mov %rax, ISF64_TRAPNO(%rcx)
734 pop %rax
735 mov %gs:CPU_UBER_TMP, %rsp /* user RCX into RSP */
736 xchg %rcx, %rsp /* to PCB stack with user RCX */
737 jmp L_dispatch_user
b0d623f7 738
39236c6e
A
739L_kernel_gpf:
740 /* Here for GPF from kernel_space. Check for recoverable cases. */
741 push %rax
b0d623f7 742 leaq EXT(ret32_iret)(%rip), %rax
6d2010ae 743 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
744 je L_fault_iret
745 leaq EXT(ret64_iret)(%rip), %rax
6d2010ae 746 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
747 je L_fault_iret
748 leaq EXT(ret32_set_ds)(%rip), %rax
6d2010ae 749 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
750 je L_32bit_fault_set_seg
751 leaq EXT(ret32_set_es)(%rip), %rax
6d2010ae 752 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
753 je L_32bit_fault_set_seg
754 leaq EXT(ret32_set_fs)(%rip), %rax
6d2010ae 755 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
756 je L_32bit_fault_set_seg
757 leaq EXT(ret32_set_gs)(%rip), %rax
6d2010ae 758 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
759 je L_32bit_fault_set_seg
760
39236c6e 761 /* Fall through */
6d2010ae
A
762
763L_kernel_trap:
764 /*
765 * Here after taking an unexpected trap from kernel mode - perhaps
766 * while running in the trampolines hereabouts.
767 * Note: %rax has been pushed on stack.
768 * Make sure we're not on the PCB stack, if so move to the kernel stack.
769 * This is likely a fatal condition.
39236c6e 770 * But first, ensure we have the kernel gs base active...
6d2010ae 771 */
39236c6e
A
772 push %rcx
773 push %rdx
774 mov $(MSR_IA32_GS_BASE), %ecx
775 rdmsr /* read kernel gsbase */
776 test $0x80000000, %edx /* test MSB of address */
777 jne 1f
778 swapgs /* so swap */
6d2010ae 7791:
39236c6e
A
780 pop %rdx
781 pop %rcx
782
6d2010ae
A
783 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
784 subq %rsp, %rax
785 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
786 jb 2f /* - yes, deal with it */
787 pop %rax /* - no, restore %rax */
39236c6e 788 jmp L_dispatch_kernel
6d2010ae
A
7892:
790 /*
791 * Here if %rsp is in the PCB
792 * Copy the interrupt stack frame from PCB stack to kernel stack
793 */
794 movq %gs:CPU_KERNEL_STACK, %rax
795 xchgq %rax, %rsp
796 pushq 8+ISF64_SS(%rax)
797 pushq 8+ISF64_RSP(%rax)
798 pushq 8+ISF64_RFLAGS(%rax)
799 pushq 8+ISF64_CS(%rax)
800 pushq 8+ISF64_RIP(%rax)
801 pushq 8+ISF64_ERR(%rax)
802 pushq 8+ISF64_TRAPFN(%rax)
803 pushq 8+ISF64_TRAPNO(%rax)
804 movq (%rax), %rax
39236c6e
A
805 jmp L_dispatch_kernel
806
b0d623f7 807
b0d623f7
A
808/*
809 * GP/NP fault on IRET: CS or SS is in error.
39236c6e
A
810 * User GSBASE is active.
811 * On IST1 stack containing:
812 * (rax saved above, which is immediately popped)
813 * 0 ISF64_TRAPNO: trap code (NP or GP)
814 * 8 ISF64_TRAPFN: trap function
815 * 16 ISF64_ERR: segment number in error (error code)
816 * 24 ISF64_RIP: kernel RIP
817 * 32 ISF64_CS: kernel CS
818 * 40 ISF64_RFLAGS: kernel RFLAGS
819 * 48 ISF64_RSP: kernel RSP
820 * 56 ISF64_SS: kernel SS
821 * On the PCB stack, pointed to by the kernel's RSP is:
822 * 0 user RIP
823 * 8 user CS
824 * 16 user RFLAGS
825 * 24 user RSP
826 * 32 user SS
b0d623f7 827 *
39236c6e
A
828 * We need to move the kernel's TRAPNO, TRAPFN and ERR to the PCB and handle
829 * as a user fault with:
6d2010ae
A
830 * 0 ISF64_TRAPNO: trap code (NP or GP)
831 * 8 ISF64_TRAPFN: trap function
832 * 16 ISF64_ERR: segment number in error (error code)
39236c6e
A
833 * 24 user RIP
834 * 32 user CS
835 * 40 user RFLAGS
836 * 48 user RSP
837 * 56 user SS
b0d623f7
A
838 */
839L_fault_iret:
840 pop %rax /* recover saved %rax */
6d2010ae 841 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
39236c6e
A
842 mov ISF64_RSP(%rsp), %rax
843 xchg %rax, %rsp /* switch to PCB stack */
844 push ISF64_ERR(%rax)
845 push ISF64_TRAPFN(%rax)
846 push ISF64_TRAPNO(%rax)
847 mov ISF64_RIP(%rax), %rax /* restore rax */
b0d623f7
A
848 /* now treat as fault from user */
849 jmp L_dispatch
850
851/*
852 * Fault restoring a segment register. All of the saved state is still
853 * on the stack untouched since we haven't yet moved the stack pointer.
39236c6e
A
854 * On IST1 stack containing:
855 * (rax saved above, which is immediately popped)
856 * 0 ISF64_TRAPNO: trap code (NP or GP)
857 * 8 ISF64_TRAPFN: trap function
858 * 16 ISF64_ERR: segment number in error (error code)
859 * 24 ISF64_RIP: kernel RIP
860 * 32 ISF64_CS: kernel CS
861 * 40 ISF64_RFLAGS: kernel RFLAGS
862 * 48 ISF64_RSP: kernel RSP
863 * 56 ISF64_SS: kernel SS
864 * On the PCB stack, pointed to by the kernel's RSP is:
865 * 0 user trap code
866 * 8 user trap function
867 * 16 user err
868 * 24 user RIP
869 * 32 user CS
870 * 40 user RFLAGS
871 * 48 user RSP
872 * 56 user SS
b0d623f7
A
873 */
874L_32bit_fault_set_seg:
6d2010ae
A
875 swapgs
876 pop %rax /* toss saved %rax from stack */
877 mov ISF64_TRAPNO(%rsp), %rax
878 mov ISF64_TRAPFN(%rsp), %rcx
879 mov ISF64_ERR(%rsp), %rdx
880 mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */
39236c6e
A
881 mov %rax,R64_TRAPNO(%rsp)
882 mov %rcx,R64_TRAPFN(%rsp)
883 mov %rdx,R64_ERR(%rsp)
b0d623f7
A
884 /* now treat as fault from user */
885 /* except that all the state is */
886 /* already saved - we just have to */
887 /* move the trapno and error into */
888 /* the compatibility frame */
39236c6e 889 jmp L_dispatch_U32_after_fault
b0d623f7
A
890
891/*
892 * Fatal exception handlers:
893 */
894Entry(idt64_db_task_dbl_fault)
895 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
896 pushq $(T_DOUBLE_FAULT)
897 jmp L_dispatch
898
899Entry(idt64_db_task_stk_fault)
900 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
901 pushq $(T_STACK_FAULT)
902 jmp L_dispatch
903
904Entry(idt64_mc)
905 push $(0) /* Error */
906 PUSH_FUNCTION(HNDL_MACHINE_CHECK)
907 pushq $(T_MACHINE_CHECK)
908 jmp L_dispatch
909
39236c6e
A
910/*
911 * NMI
912 * This may or may not be fatal but extreme care is required
913 * because it may fall when control was already in another trampoline.
914 *
915 * We get here on IST2 stack which is used for NMIs only.
916 * We must be aware of the interrupted state:
917 * - from user-space, we
918 * - copy state to the PCB and continue;
919 * - from kernel-space, we
920 * - copy state to the kernel stack and continue, but
921 * - check what GSBASE was active, set the kernel base and
922 * - ensure that the active state is restored when the NMI is dismissed.
923 */
924Entry(idt64_nmi)
925 push %rax /* save RAX to ISF64_ERR */
926 push %rcx /* save RCX to ISF64_TRAPFN */
927 push %rdx /* save RDX to ISF64_TRAPNO */
928 testb $3, ISF64_CS(%rsp) /* NMI from user-space? */
929 je 1f
930
931 /* From user-space: copy interrupt state to user PCB */
932 swapgs
933 mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */
934 add $(ISF64_SIZE), %rcx /* adjust to base of ISF */
935 swapgs /* swap back for L_dispatch */
936 jmp 4f /* Copy state to PCB */
937
9381:
939 /*
940 * From kernel-space:
941 * Determine whether the kernel or user GS is set.
942 * Set the kernel and ensure that we'll swap back correctly at IRET.
943 */
944 mov $(MSR_IA32_GS_BASE), %ecx
945 rdmsr /* read kernel gsbase */
946 test $0x80000000, %edx /* test MSB of address */
947 jne 2f
948 swapgs /* so swap */
949 movl $1, ISF64_CS+4(%rsp) /* and set flag in CS slot */
9502:
951 /*
952 * Determine whether we're on the kernel or interrupt stack
953 * when the NMI hit.
954 */
955 mov ISF64_RSP(%rsp), %rcx
956 mov %gs:CPU_KERNEL_STACK, %rax
957 xor %rcx, %rax
958 and EXT(kernel_stack_mask)(%rip), %rax
959 test %rax, %rax /* are we on the kernel stack? */
960 je 3f /* yes */
961
962 mov %gs:CPU_INT_STACK_TOP, %rax
963 dec %rax /* intr stack top is byte above max */
964 xor %rcx, %rax
965 and EXT(kernel_stack_mask)(%rip), %rax
966 test %rax, %rax /* are we on the interrupt stack? */
967 je 3f /* yes */
968
969 mov %gs:CPU_KERNEL_STACK, %rcx
9703:
971 /* 16-byte-align kernel/interrupt stack for state push */
972 and $0xFFFFFFFFFFFFFFF0, %rcx
973
9744:
975 /*
976 * Copy state from NMI stack (RSP) to the save area (RCX) which is
977 * the PCB for user or kernel/interrupt stack from kernel.
978 * ISF64_ERR(RSP) saved RAX
979 * ISF64_TRAPFN(RSP) saved RCX
980 * ISF64_TRAPNO(RSP) saved RDX
981 */
982 xchg %rsp, %rcx /* set for pushes */
983 push ISF64_SS(%rcx)
984 push ISF64_RSP(%rcx)
985 push ISF64_RFLAGS(%rcx)
986 push ISF64_CS(%rcx)
987 push ISF64_RIP(%rcx)
988 push $(0) /* error code 0 */
989 lea HNDL_ALLINTRS(%rip), %rax
990 push %rax /* trapfn allintrs */
991 push $(T_NMI) /* trapno T_NMI */
992 mov ISF64_ERR(%rcx), %rax
993 mov ISF64_TRAPNO(%rcx), %rdx
994 mov ISF64_TRAPFN(%rcx), %rcx
995 jmp L_dispatch
b0d623f7 996
39236c6e
A
997
998/* All 'exceptions' enter hndl_alltraps, with:
999 * r15 x86_saved_state_t address
1000 * rsp kernel stack if user-space, otherwise interrupt or kernel stack
1001 * esi cs at trap
b0d623f7
A
1002 *
1003 * The rest of the state is set up as:
39236c6e 1004 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1005 * interrupts disabled
1006 * direction flag cleared
1007 */
1008Entry(hndl_alltraps)
1009 mov %esi, %eax
1010 testb $3, %al
1011 jz trap_from_kernel
1012
1013 TIME_TRAP_UENTRY
1014
6d2010ae
A
1015 /* Check for active vtimers in the current task */
1016 mov %gs:CPU_ACTIVE_THREAD, %rcx
1017 mov TH_TASK(%rcx), %rbx
1018 TASK_VTIMER_CHECK(%rbx, %rcx)
1019
39236c6e 1020 CCALL1(user_trap, %r15) /* call user trap routine */
6d2010ae 1021 /* user_trap() unmasks interrupts */
b0d623f7 1022 cli /* hold off intrs - critical section */
b0d623f7
A
1023 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1024
1025#define CLI cli
1026#define STI sti
1027
1028Entry(return_from_trap)
39236c6e
A
1029 movq %gs:CPU_ACTIVE_THREAD,%r15 /* Get current thread */
1030 movl $-1, TH_IOTIER_OVERRIDE(%r15) /* Clear IO tier override before returning to userspace */
1031 cmpl $0, TH_RWLOCK_COUNT(%r15) /* Check if current thread has pending RW locks held */
1032 jz 1f
1033 xorq %rbp, %rbp /* clear framepointer */
1034 mov %r15, %rdi /* Set RDI to current thread */
1035 CCALL(lck_rw_clear_promotions_x86) /* Clear promotions if needed */
10361:
1037 movq TH_PCB_ISS(%r15), %r15 /* PCB stack */
b0d623f7
A
1038 movl %gs:CPU_PENDING_AST,%eax
1039 testl %eax,%eax
39236c6e 1040 je EXT(return_to_user) /* branch if no AST */
b0d623f7
A
1041
1042L_return_from_trap_with_ast:
b0d623f7
A
1043 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
1044 je 2f /* no, go handle the AST */
39236c6e 1045 cmpl $(SS_64), SS_FLAVOR(%r15) /* are we a 64-bit task? */
b0d623f7
A
1046 je 1f
1047 /* no... 32-bit user mode */
39236c6e 1048 movl R32_EIP(%r15), %edi
6d2010ae 1049 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
1050 CCALL(commpage_is_in_pfz32)
1051 testl %eax, %eax
1052 je 2f /* not in the PFZ... go service AST */
39236c6e 1053 movl %eax, R32_EBX(%r15) /* let the PFZ know we've pended an AST */
b0d623f7
A
1054 jmp EXT(return_to_user)
10551:
39236c6e 1056 movq R64_RIP(%r15), %rdi
6d2010ae 1057 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
1058 CCALL(commpage_is_in_pfz64)
1059 testl %eax, %eax
1060 je 2f /* not in the PFZ... go service AST */
39236c6e 1061 movl %eax, R64_RBX(%r15) /* let the PFZ know we've pended an AST */
b0d623f7
A
1062 jmp EXT(return_to_user)
10632:
1064 STI /* interrupts always enabled on return to user mode */
1065
6d2010ae
A
1066 xor %edi, %edi /* zero %rdi */
1067 xorq %rbp, %rbp /* clear framepointer */
1068 CCALL(i386_astintr) /* take the AST */
b0d623f7
A
1069
1070 CLI
39236c6e 1071 mov %rsp, %r15 /* AST changes stack, saved state */
b0d623f7
A
1072 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1073 jmp EXT(return_from_trap) /* and check again (rare) */
1074
1075/*
1076 * Trap from kernel mode. No need to switch stacks.
1077 * Interrupts must be off here - we will set them to state at time of trap
1078 * as soon as it's safe for us to do so and not recurse doing preemption
39236c6e 1079 *
b0d623f7 1080 */
b0d623f7 1081trap_from_kernel:
39236c6e
A
1082 movq %r15, %rdi /* saved state addr */
1083 pushq R64_RIP(%r15) /* Simulate a CALL from fault point */
b0d623f7
A
1084 pushq %rbp /* Extend framepointer chain */
1085 movq %rsp, %rbp
6d2010ae 1086 CCALLWITHSP(kernel_trap) /* to kernel trap routine */
b0d623f7
A
1087 popq %rbp
1088 addq $8, %rsp
39236c6e 1089 mov %rsp, %r15 /* DTrace slides stack/saved-state */
b0d623f7
A
1090 cli
1091
1092 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
1093 testl $(AST_URGENT),%eax /* any urgent preemption? */
1094 je ret_to_kernel /* no, nothing to do */
39236c6e 1095 cmpl $(T_PREEMPT),R64_TRAPNO(%r15)
b0d623f7 1096 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
39236c6e 1097 testl $(EFL_IF),R64_RFLAGS(%r15) /* interrupts disabled? */
b0d623f7
A
1098 je ret_to_kernel
1099 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1100 jne ret_to_kernel
1101 movq %gs:CPU_KERNEL_STACK,%rax
1102 movq %rsp,%rcx
1103 xorq %rax,%rcx
1104 andq EXT(kernel_stack_mask)(%rip),%rcx
1105 testq %rcx,%rcx /* are we on the kernel stack? */
1106 jne ret_to_kernel /* no, skip it */
1107
1108 CCALL1(i386_astintr, $1) /* take the AST */
39236c6e
A
1109
1110 mov %rsp, %r15 /* AST changes stack, saved state */
b0d623f7
A
1111 jmp ret_to_kernel
1112
1113
1114/*
1115 * All interrupts on all tasks enter here with:
39236c6e
A
1116 * r15 x86_saved_state_t
1117 * rsp kernel or interrupt stack
b0d623f7
A
1118 * esi cs at trap
1119 *
39236c6e 1120 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1121 * interrupts disabled
1122 * direction flag cleared
1123 */
1124Entry(hndl_allintrs)
1125 /*
1126 * test whether already on interrupt stack
1127 */
1128 movq %gs:CPU_INT_STACK_TOP,%rcx
1129 cmpq %rsp,%rcx
1130 jb 1f
1131 leaq -INTSTACK_SIZE(%rcx),%rdx
1132 cmpq %rsp,%rdx
1133 jb int_from_intstack
060df5ea 11341:
b0d623f7
A
1135 xchgq %rcx,%rsp /* switch to interrupt stack */
1136
1137 mov %cr0,%rax /* get cr0 */
1138 orl $(CR0_TS),%eax /* or in TS bit */
1139 mov %rax,%cr0 /* set cr0 */
1140
b0d623f7 1141 pushq %rcx /* save pointer to old stack */
39236c6e
A
1142 pushq %gs:CPU_INT_STATE /* save previous intr state */
1143 movq %r15,%gs:CPU_INT_STATE /* set intr state */
b0d623f7
A
1144
1145 TIME_INT_ENTRY /* do timing */
1146
6d2010ae
A
1147 /* Check for active vtimers in the current task */
1148 mov %gs:CPU_ACTIVE_THREAD, %rcx
1149 mov TH_TASK(%rcx), %rbx
1150 TASK_VTIMER_CHECK(%rbx, %rcx)
1151
b0d623f7
A
1152 incl %gs:CPU_PREEMPTION_LEVEL
1153 incl %gs:CPU_INTERRUPT_LEVEL
1154
39236c6e 1155 CCALL1(interrupt, %r15) /* call generic interrupt routine */
b0d623f7
A
1156
1157 cli /* just in case we returned with intrs enabled */
b0d623f7
A
1158
1159 .globl EXT(return_to_iret)
1160LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1161
1162 decl %gs:CPU_INTERRUPT_LEVEL
1163 decl %gs:CPU_PREEMPTION_LEVEL
1164
1165 TIME_INT_EXIT /* do timing */
1166
39236c6e
A
1167 popq %gs:CPU_INT_STATE /* reset/clear intr state pointer */
1168 popq %rsp /* switch back to old stack */
1169
b0d623f7 1170 movq %gs:CPU_ACTIVE_THREAD,%rax
6d2010ae 1171 movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */
b0d623f7
A
1172 cmpq $0,%rax /* Is there a context */
1173 je 1f /* Branch if not */
1174 movl FP_VALID(%rax),%eax /* Load fp_valid */
1175 cmpl $0,%eax /* Check if valid */
1176 jne 1f /* Branch if valid */
1177 clts /* Clear TS */
1178 jmp 2f
11791:
1180 mov %cr0,%rax /* get cr0 */
1181 orl $(CR0_TS),%eax /* or in TS bit */
1182 mov %rax,%cr0 /* set cr0 */
11832:
b0d623f7 1184 /* Load interrupted code segment into %eax */
39236c6e
A
1185 movl R32_CS(%r15),%eax /* assume 32-bit state */
1186 cmpl $(SS_64),SS_FLAVOR(%r15)/* 64-bit? */
b0d623f7
A
1187#if DEBUG_IDT64
1188 jne 4f
39236c6e 1189 movl R64_CS(%r15),%eax /* 64-bit user mode */
b0d623f7
A
1190 jmp 3f
11914:
39236c6e 1192 cmpl $(SS_32),SS_FLAVOR(%r15)
b0d623f7
A
1193 je 3f
1194 POSTCODE2(0x6431)
39236c6e 1195 CCALL1(panic_idt64, %r15)
b0d623f7
A
1196 hlt
1197#else
1198 jne 3f
39236c6e 1199 movl R64_CS(%r15),%eax /* 64-bit user mode */
b0d623f7
A
1200#endif
12013:
1202 testb $3,%al /* user mode, */
1203 jnz ast_from_interrupt_user /* go handle potential ASTs */
1204 /*
1205 * we only want to handle preemption requests if
1206 * the interrupt fell in the kernel context
1207 * and preemption isn't disabled
1208 */
1209 movl %gs:CPU_PENDING_AST,%eax
1210 testl $(AST_URGENT),%eax /* any urgent requests? */
1211 je ret_to_kernel /* no, nothing to do */
1212
1213 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1214 jne ret_to_kernel /* yes, skip it */
1215
b0d623f7
A
1216 /*
1217 * Take an AST from kernel space. We don't need (and don't want)
1218 * to do as much as the case where the interrupt came from user
1219 * space.
1220 */
1221 CCALL1(i386_astintr, $1)
1222
39236c6e 1223 mov %rsp, %r15 /* AST changes stack, saved state */
b0d623f7
A
1224 jmp ret_to_kernel
1225
1226
1227/*
1228 * nested int - simple path, can't preempt etc on way out
1229 */
1230int_from_intstack:
1231 incl %gs:CPU_PREEMPTION_LEVEL
1232 incl %gs:CPU_INTERRUPT_LEVEL
060df5ea 1233 incl %gs:CPU_NESTED_ISTACK
39236c6e
A
1234
1235 push %gs:CPU_INT_STATE
1236 mov %r15, %gs:CPU_INT_STATE
1237
1238 CCALL1(interrupt, %r15)
1239
1240 pop %gs:CPU_INT_STATE
b0d623f7
A
1241
1242 decl %gs:CPU_INTERRUPT_LEVEL
1243 decl %gs:CPU_PREEMPTION_LEVEL
060df5ea 1244 decl %gs:CPU_NESTED_ISTACK
39236c6e 1245
b0d623f7
A
1246 jmp ret_to_kernel
1247
1248/*
1249 * Take an AST from an interrupted user
1250 */
1251ast_from_interrupt_user:
1252 movl %gs:CPU_PENDING_AST,%eax
1253 testl %eax,%eax /* pending ASTs? */
1254 je EXT(ret_to_user) /* no, nothing to do */
1255
1256 TIME_TRAP_UENTRY
1257
1258 movl $1, %ecx /* check if we're in the PFZ */
1259 jmp L_return_from_trap_with_ast /* return */
1260
1261
1262/* Syscall dispatch routines! */
1263
1264/*
1265 *
1266 * 32bit Tasks
1267 * System call entries via INTR_GATE or sysenter:
1268 *
39236c6e
A
1269 * r15 x86_saved_state32_t
1270 * rsp kernel stack
1271 *
1272 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1273 * interrupts disabled
1274 * direction flag cleared
1275 */
1276
1277Entry(hndl_sysenter)
1278 /*
1279 * We can be here either for a mach syscall or a unix syscall,
1280 * as indicated by the sign of the code:
1281 */
39236c6e 1282 movl R32_EAX(%r15),%eax
b0d623f7
A
1283 testl %eax,%eax
1284 js EXT(hndl_mach_scall) /* < 0 => mach */
1285 /* > 0 => unix */
1286
1287Entry(hndl_unix_scall)
b0d623f7
A
1288
1289 TIME_TRAP_UENTRY
1290
b0d623f7 1291 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1292 movq TH_TASK(%rcx),%rbx /* point to current task */
1293 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1294
1295 /* Check for active vtimers in the current task */
1296 TASK_VTIMER_CHECK(%rbx,%rcx)
1297
1298 sti
1299
39236c6e 1300 CCALL1(unix_syscall, %r15)
b0d623f7
A
1301 /*
1302 * always returns through thread_exception_return
1303 */
1304
1305
1306Entry(hndl_mach_scall)
1307 TIME_TRAP_UENTRY
1308
b0d623f7 1309 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1310 movq TH_TASK(%rcx),%rbx /* point to current task */
1311 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1312
1313 /* Check for active vtimers in the current task */
1314 TASK_VTIMER_CHECK(%rbx,%rcx)
1315
1316 sti
1317
39236c6e 1318 CCALL1(mach_call_munger, %r15)
b0d623f7
A
1319 /*
1320 * always returns through thread_exception_return
1321 */
1322
1323
1324Entry(hndl_mdep_scall)
1325 TIME_TRAP_UENTRY
1326
b0d623f7
A
1327 /* Check for active vtimers in the current task */
1328 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1329 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1330 TASK_VTIMER_CHECK(%rbx,%rcx)
1331
1332 sti
1333
39236c6e 1334 CCALL1(machdep_syscall, %r15)
b0d623f7
A
1335 /*
1336 * always returns through thread_exception_return
1337 */
1338
b0d623f7
A
1339/*
1340 * 64bit Tasks
1341 * System call entries via syscall only:
1342 *
39236c6e
A
1343 * r15 x86_saved_state64_t
1344 * rsp kernel stack
1345 *
1346 * both rsp and r15 are 16-byte aligned
b0d623f7
A
1347 * interrupts disabled
1348 * direction flag cleared
1349 */
1350
1351Entry(hndl_syscall)
1352 TIME_TRAP_UENTRY
1353
b0d623f7 1354 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1355 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1356
1357 /* Check for active vtimers in the current task */
1358 TASK_VTIMER_CHECK(%rbx,%rcx)
1359
1360 /*
1361 * We can be here either for a mach, unix machdep or diag syscall,
1362 * as indicated by the syscall class:
1363 */
39236c6e 1364 movl R64_RAX(%r15), %eax /* syscall number/class */
b0d623f7
A
1365 movl %eax, %edx
1366 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1367 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1368 je EXT(hndl_mach_scall64)
1369 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1370 je EXT(hndl_unix_scall64)
1371 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1372 je EXT(hndl_mdep_scall64)
1373 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1374 je EXT(hndl_diag_scall64)
1375
1376 /* Syscall class unknown */
316670eb 1377 sti
b0d623f7
A
1378 CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1)
1379 /* no return */
1380
1381
1382Entry(hndl_unix_scall64)
6d2010ae 1383 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1384 sti
1385
39236c6e 1386 CCALL1(unix_syscall64, %r15)
b0d623f7
A
1387 /*
1388 * always returns through thread_exception_return
1389 */
1390
1391
1392Entry(hndl_mach_scall64)
6d2010ae 1393 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1394 sti
1395
39236c6e 1396 CCALL1(mach_call_munger64, %r15)
b0d623f7
A
1397 /*
1398 * always returns through thread_exception_return
1399 */
1400
1401
1402
1403Entry(hndl_mdep_scall64)
1404 sti
1405
39236c6e 1406 CCALL1(machdep_syscall64, %r15)
b0d623f7
A
1407 /*
1408 * always returns through thread_exception_return
1409 */
1410
b0d623f7 1411Entry(hndl_diag_scall64)
39236c6e 1412 CCALL1(diagCall64, %r15) // Call diagnostics
060df5ea 1413 cli // Disable interruptions just in case
316670eb 1414 test %eax, %eax // What kind of return is this?
060df5ea 1415 je 1f // - branch if bad (zero)
060df5ea
A
1416 jmp EXT(return_to_user) // Normal return, do not check asts...
14171:
316670eb 1418 sti
b0d623f7
A
1419 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
1420 /* no return */
1421
1422Entry(hndl_machine_check)
39236c6e 1423 CCALL1(panic_machine_check64, %r15)
b0d623f7
A
1424 hlt
1425
1426Entry(hndl_double_fault)
39236c6e 1427 CCALL1(panic_double_fault64, %r15)
b0d623f7 1428 hlt