]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/idt64.s
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / x86_64 / idt64.s
CommitLineData
b0d623f7 1/*
6d2010ae 2 * Copyright (c) 2010 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <i386/asm.h>
29#include <assym.s>
b0d623f7 30#include <i386/eflags.h>
6d2010ae 31#include <i386/rtclock_asm.h>
b0d623f7
A
32#include <i386/trap.h>
33#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
34#include <mach/i386/syscall_sw.h>
35#include <i386/postcode.h>
36#include <i386/proc_reg.h>
37#include <mach/exception_types.h>
38
39#if DEBUG
40#define DEBUG_IDT64 1
41#endif
42
43/*
44 * This is the low-level trap and interrupt handling code associated with
45 * the IDT. It also includes system call handlers for sysenter/syscall.
46 * The IDT itself is defined in mp_desc.c.
47 *
48 * Code here is structured as follows:
49 *
50 * stubs Code called directly from an IDT vector.
51 * All entry points have the "idt64_" prefix and they are built
52 * using macros expanded by the inclusion of idt_table.h.
53 * This code performs vector-dependent identification and jumps
54 * into the dispatch code.
55 *
56 * dispatch The dispatch code is responsible for saving the thread state
57 * (which is either 64-bit or 32-bit) and then jumping to the
58 * class handler identified by the stub.
59 *
60 * returns Code to restore state and return to the previous context.
61 *
62 * handlers There are several classes of handlers:
63 * interrupt - asynchronous events typically from external devices
64 * trap - synchronous events due to thread execution
65 * syscall - synchronous system call request
66 * fatal - fatal traps
67 */
68
69/*
70 * Handlers:
71 */
72#define HNDL_ALLINTRS EXT(hndl_allintrs)
73#define HNDL_ALLTRAPS EXT(hndl_alltraps)
74#define HNDL_SYSENTER EXT(hndl_sysenter)
75#define HNDL_SYSCALL EXT(hndl_syscall)
76#define HNDL_UNIX_SCALL EXT(hndl_unix_scall)
77#define HNDL_MACH_SCALL EXT(hndl_mach_scall)
78#define HNDL_MDEP_SCALL EXT(hndl_mdep_scall)
b0d623f7
A
79#define HNDL_DOUBLE_FAULT EXT(hndl_double_fault)
80#define HNDL_MACHINE_CHECK EXT(hndl_machine_check)
81
b0d623f7
A
82
83#if 1
84#define PUSH_FUNCTION(func) \
85 sub $8, %rsp ;\
86 push %rax ;\
87 leaq func(%rip), %rax ;\
88 movq %rax, 8(%rsp) ;\
89 pop %rax
90#else
91#define PUSH_FUNCTION(func) pushq func
92#endif
93
94/* The wrapper for all non-special traps/interrupts */
95/* Everything up to PUSH_FUNCTION is just to output
96 * the interrupt number out to the postcode display
97 */
98#if DEBUG_IDT64
99#define IDT_ENTRY_WRAPPER(n, f) \
100 push %rax ;\
101 POSTCODE2(0x6400+n) ;\
102 pop %rax ;\
103 PUSH_FUNCTION(f) ;\
104 pushq $(n) ;\
105 jmp L_dispatch
106#else
107#define IDT_ENTRY_WRAPPER(n, f) \
108 PUSH_FUNCTION(f) ;\
109 pushq $(n) ;\
110 jmp L_dispatch
111#endif
112
113/* A trap that comes with an error code already on the stack */
114#define TRAP_ERR(n, f) \
115 Entry(f) ;\
116 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
117
118/* A normal trap */
119#define TRAP(n, f) \
120 Entry(f) ;\
121 pushq $0 ;\
122 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
123
124#define USER_TRAP TRAP
125
126/* An interrupt */
127#define INTERRUPT(n) \
128 Entry(_intr_ ## n) ;\
129 pushq $0 ;\
130 IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS)
131
132/* A trap with a special-case handler, hence we don't need to define anything */
133#define TRAP_SPC(n, f)
134#define TRAP_IST(n, f)
135#define USER_TRAP_SPC(n, f)
136
137/* Generate all the stubs */
138#include "idt_table.h"
139
140/*
141 * Common dispatch point.
142 * Determine what mode has been interrupted and save state accordingly.
143 */
144L_dispatch:
6d2010ae 145 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
b0d623f7
A
146 je L_64bit_dispatch
147
148 swapgs
149
6d2010ae
A
150 /*
151 * Check for trap from EFI32, and restore cr3 and rsp if so.
152 * A trap from EFI32 is fatal.
153 */
154 cmpl $(KERNEL32_CS), ISF64_CS(%rsp)
155 jne L_dispatch_continue
156 push %rcx
157 mov EXT(pal_efi_saved_cr3)(%rip), %rcx
158 mov %rcx, %cr3
316670eb 159 leaq (%rip), %rcx
6d2010ae
A
160 shr $32, %rcx /* splice the upper 32-bits of rip */
161 shl $32, %rsp /* .. and the lower 32-bits of rsp */
162 shrd $32, %rcx, %rsp /* to recover the full 64-bits of rsp */
163 pop %rcx
164
165L_dispatch_continue:
060df5ea
A
166 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
167 je L_32bit_dispatch /* 32-bit user task */
b0d623f7
A
168 /* fall through to 64bit user dispatch */
169
170/*
171 * Here for 64-bit user task or kernel
172 */
173L_64bit_dispatch:
174 subq $(ISS64_OFFSET), %rsp
175 movl $(SS_64), SS_FLAVOR(%rsp)
176
6d2010ae
A
177 cld
178
b0d623f7
A
179 /*
180 * Save segment regs - for completeness since theyre not used.
181 */
316670eb
A
182 movl %fs, R64_FS(%rsp)
183 movl %gs, R64_GS(%rsp)
b0d623f7
A
184
185 /* Save general-purpose registers */
186 mov %rax, R64_RAX(%rsp)
187 mov %rcx, R64_RCX(%rsp)
188 mov %rbx, R64_RBX(%rsp)
189 mov %rbp, R64_RBP(%rsp)
190 mov %r11, R64_R11(%rsp)
191 mov %r12, R64_R12(%rsp)
192 mov %r13, R64_R13(%rsp)
193 mov %r14, R64_R14(%rsp)
194 mov %r15, R64_R15(%rsp)
195
196 /* cr2 is significant only for page-faults */
197 mov %cr2, %rax
198 mov %rax, R64_CR2(%rsp)
199
200 /* Other registers (which may contain syscall args) */
201 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
202 mov %rsi, R64_RSI(%rsp)
203 mov %rdx, R64_RDX(%rsp)
204 mov %r10, R64_R10(%rsp)
205 mov %r8, R64_R8(%rsp)
206 mov %r9, R64_R9(%rsp) /* .. arg5 */
207
208 mov R64_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
209 mov R64_TRAPFN(%rsp), %rdx /* %rdx := trapfn for later */
210 mov R64_CS(%rsp), %esi /* %esi := cs for later */
211
212 jmp L_common_dispatch
213
214L_64bit_entry_reject:
215 /*
216 * Here for a 64-bit user attempting an invalid kernel entry.
217 */
218 pushq %rax
219 leaq HNDL_ALLTRAPS(%rip), %rax
220 movq %rax, ISF64_TRAPFN+8(%rsp)
221 popq %rax
222 movq $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
223 jmp L_64bit_dispatch
224
225L_32bit_entry_check:
226 /*
227 * Check we're not a confused 64-bit user.
228 */
229 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
230 jne L_64bit_entry_reject
231 /* fall through to 32-bit handler: */
232
233L_32bit_dispatch: /* 32-bit user task */
234 subq $(ISC32_OFFSET), %rsp
235 movl $(SS_32), SS_FLAVOR(%rsp)
236
6d2010ae 237 cld
b0d623f7
A
238 /*
239 * Save segment regs
240 */
316670eb
A
241 movl %ds, R32_DS(%rsp)
242 movl %es, R32_ES(%rsp)
243 movl %fs, R32_FS(%rsp)
244 movl %gs, R32_GS(%rsp)
b0d623f7
A
245
246 /*
247 * Save general 32-bit registers
248 */
249 mov %eax, R32_EAX(%rsp)
250 mov %ebx, R32_EBX(%rsp)
251 mov %ecx, R32_ECX(%rsp)
252 mov %edx, R32_EDX(%rsp)
253 mov %ebp, R32_EBP(%rsp)
254 mov %esi, R32_ESI(%rsp)
255 mov %edi, R32_EDI(%rsp)
256
257 /* Unconditionally save cr2; only meaningful on page faults */
258 mov %cr2, %rax
259 mov %eax, R32_CR2(%rsp)
260
261 /*
262 * Copy registers already saved in the machine state
263 * (in the interrupt stack frame) into the compat save area.
264 */
265 mov ISC32_RIP(%rsp), %eax
266 mov %eax, R32_EIP(%rsp)
267 mov ISC32_RFLAGS(%rsp), %eax
268 mov %eax, R32_EFLAGS(%rsp)
b0d623f7
A
269 mov ISC32_RSP(%rsp), %eax
270 mov %eax, R32_UESP(%rsp)
271 mov ISC32_SS(%rsp), %eax
272 mov %eax, R32_SS(%rsp)
273L_32bit_dispatch_after_fault:
13f56ec4
A
274 mov ISC32_CS(%rsp), %esi /* %esi := %cs for later */
275 mov %esi, R32_CS(%rsp)
b0d623f7
A
276 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
277 mov %ebx, R32_TRAPNO(%rsp)
278 mov ISC32_ERR(%rsp), %eax
279 mov %eax, R32_ERR(%rsp)
280 mov ISC32_TRAPFN(%rsp), %rdx /* %rdx := trapfn for later */
281
282L_common_dispatch:
283 /*
284 * On entering the kernel, we don't need to switch cr3
285 * because the kernel shares the user's address space.
286 * But we mark the kernel's cr3 as "active".
287 * If, however, the invalid cr3 flag is set, we have to flush tlbs
288 * since the kernel's mapping was changed while we were in userspace.
289 *
290 * But: if global no_shared_cr3 is TRUE we do switch to the kernel's cr3
291 * so that illicit accesses to userspace can be trapped.
292 */
293 mov %gs:CPU_KERNEL_CR3, %rcx
294 mov %rcx, %gs:CPU_ACTIVE_CR3
295 test $3, %esi /* user/kernel? */
296 jz 1f /* skip cr3 reload from kernel */
297 xor %rbp, %rbp
298 cmpl $0, EXT(no_shared_cr3)(%rip)
299 je 1f
300 mov %rcx, %cr3 /* load kernel cr3 */
301 jmp 2f /* and skip tlb flush test */
6d2010ae
A
3021:
303 mov %gs:CPU_ACTIVE_CR3+4, %rcx
304 shr $32, %rcx
305 testl %ecx, %ecx
306 jz 2f
307 movl $0, %gs:CPU_TLB_INVALID
308 testl $(1<<16), %ecx /* Global? */
309 jz 11f
310 mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/
311 and $(~CR4_PGE), %rcx
312 mov %rcx, %cr4
313 or $(CR4_PGE), %rcx
314 mov %rcx, %cr4
315 jmp 2f
316
31711: mov %cr3, %rcx
b0d623f7
A
318 mov %rcx, %cr3
3192:
320 mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */
6d2010ae 321 cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */
b0d623f7 322 je 3f
316670eb 323 xor %ecx, %ecx /* If so, reset DR7 (the control) */
b0d623f7
A
324 mov %rcx, %dr7
3253:
6d2010ae 326 incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
b0d623f7 327 /* Dispatch the designated handler */
b0d623f7
A
328 jmp *%rdx
329
330/*
331 * Control is passed here to return to user.
332 */
333Entry(return_to_user)
334 TIME_TRAP_UEXIT
335
336Entry(ret_to_user)
337// XXX 'Be nice to tidy up this debug register restore sequence...
338 mov %gs:CPU_ACTIVE_THREAD, %rdx
6d2010ae 339 movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */
b0d623f7 340
316670eb 341 test %rax, %rax /* Is there a debug register context? */
b0d623f7
A
342 je 2f /* branch if not */
343 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
344 jne 1f
345 movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */
346 movq %rcx, %dr0
347 movl DS_DR1(%rax), %ecx
348 movq %rcx, %dr1
349 movl DS_DR2(%rax), %ecx
350 movq %rcx, %dr2
351 movl DS_DR3(%rax), %ecx
352 movq %rcx, %dr3
353 movl DS_DR7(%rax), %ecx
354 movq %rcx, %gs:CPU_DR7
355 jmp 2f
3561:
357 mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/
358 mov %rcx, %dr0
359 mov DS64_DR1(%rax), %rcx
360 mov %rcx, %dr1
361 mov DS64_DR2(%rax), %rcx
362 mov %rcx, %dr2
363 mov DS64_DR3(%rax), %rcx
364 mov %rcx, %dr3
365 mov DS64_DR7(%rax), %rcx
366 mov %rcx, %gs:CPU_DR7
3672:
368 /*
369 * On exiting the kernel there's no need to switch cr3 since we're
370 * already running in the user's address space which includes the
371 * kernel. Nevertheless, we now mark the task's cr3 as active.
b0d623f7
A
372 * But, if no_shared_cr3 is set, we do need to switch cr3 at this point.
373 */
374 mov %gs:CPU_TASK_CR3, %rcx
375 mov %rcx, %gs:CPU_ACTIVE_CR3
6d2010ae
A
376 movl EXT(no_shared_cr3)(%rip), %eax
377 test %eax, %eax /* -no_shared_cr3 */
b0d623f7 378 jz 3f
b0d623f7
A
379 mov %rcx, %cr3
3803:
b0d623f7
A
381 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
382 cmp $0, %rax
383 je 4f
384 mov %rax, %dr7 /* Set DR7 */
385 movq $0, %gs:CPU_DR7
3864:
387 cmpl $(SS_64), SS_FLAVOR(%rsp) /* 64-bit state? */
388 je L_64bit_return
389
390L_32bit_return:
391#if DEBUG_IDT64
392 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
393 je 1f
394 cli
395 POSTCODE2(0x6432)
396 CCALL1(panic_idt64, %rsp)
3971:
398#endif /* DEBUG_IDT64 */
399
400 /*
401 * Restore registers into the machine state for iret.
402 */
403 movl R32_EIP(%rsp), %eax
404 movl %eax, ISC32_RIP(%rsp)
405 movl R32_EFLAGS(%rsp), %eax
406 movl %eax, ISC32_RFLAGS(%rsp)
407 movl R32_CS(%rsp), %eax
408 movl %eax, ISC32_CS(%rsp)
409 movl R32_UESP(%rsp), %eax
410 movl %eax, ISC32_RSP(%rsp)
411 movl R32_SS(%rsp), %eax
412 movl %eax, ISC32_SS(%rsp)
413
414 /*
415 * Restore general 32-bit registers
416 */
417 movl R32_EAX(%rsp), %eax
418 movl R32_EBX(%rsp), %ebx
419 movl R32_ECX(%rsp), %ecx
420 movl R32_EDX(%rsp), %edx
421 movl R32_EBP(%rsp), %ebp
422 movl R32_ESI(%rsp), %esi
423 movl R32_EDI(%rsp), %edi
424
425 /*
426 * Restore segment registers. We make take an exception here but
427 * we've got enough space left in the save frame area to absorb
428 * a hardware frame plus the trapfn and trapno
429 */
430 swapgs
431EXT(ret32_set_ds):
316670eb 432 movl R32_DS(%rsp), %ds
b0d623f7 433EXT(ret32_set_es):
316670eb 434 movl R32_ES(%rsp), %es
b0d623f7 435EXT(ret32_set_fs):
316670eb 436 movl R32_FS(%rsp), %fs
b0d623f7 437EXT(ret32_set_gs):
316670eb 438 movl R32_GS(%rsp), %gs
b0d623f7
A
439
440 /* pop compat frame + trapno, trapfn and error */
441 add $(ISC32_OFFSET)+8+8+8, %rsp
316670eb 442 cmpl $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
b0d623f7 443 /* test for fast entry/exit */
316670eb 444 je L_fast_exit
b0d623f7 445EXT(ret32_iret):
316670eb 446 iretq /* return from interrupt */
b0d623f7
A
447
448L_fast_exit:
449 pop %rdx /* user return eip */
450 pop %rcx /* pop and toss cs */
451 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
452 popf /* flags - carry denotes failure */
453 pop %rcx /* user return esp */
454 sti /* interrupts enabled after sysexit */
316670eb 455 .byte 0x0f,0x35 /* 32-bit sysexit */
b0d623f7
A
456
457ret_to_kernel:
458#if DEBUG_IDT64
459 cmpl $(SS_64), SS_FLAVOR(%rsp) /* 64-bit state? */
460 je 1f
461 cli
462 POSTCODE2(0x6464)
463 CCALL1(panic_idt64, %rsp)
464 hlt
4651:
6d2010ae 466 cmpl $(KERNEL64_CS), R64_CS(%rsp)
b0d623f7
A
467 je 2f
468 CCALL1(panic_idt64, %rsp)
469 hlt
4702:
471#endif
472
473L_64bit_return:
474 testb $3, R64_CS(%rsp) /* returning to user-space? */
475 jz 1f
476 swapgs
4771:
478
479 /*
480 * Restore general 64-bit registers
481 */
482 mov R64_R15(%rsp), %r15
483 mov R64_R14(%rsp), %r14
484 mov R64_R13(%rsp), %r13
485 mov R64_R12(%rsp), %r12
486 mov R64_R11(%rsp), %r11
487 mov R64_R10(%rsp), %r10
488 mov R64_R9(%rsp), %r9
489 mov R64_R8(%rsp), %r8
490 mov R64_RSI(%rsp), %rsi
491 mov R64_RDI(%rsp), %rdi
492 mov R64_RBP(%rsp), %rbp
493 mov R64_RDX(%rsp), %rdx
494 mov R64_RBX(%rsp), %rbx
495 mov R64_RCX(%rsp), %rcx
496 mov R64_RAX(%rsp), %rax
497
498 add $(ISS64_OFFSET)+24, %rsp /* pop saved state frame +
499 trapno + trapfn and error */
500 cmpl $(SYSCALL_CS),ISF64_CS-24(%rsp)
501 /* test for fast entry/exit */
502 je L_sysret
503.globl _dump_iretq
504EXT(ret64_iret):
505 iretq /* return from interrupt */
506
507L_sysret:
508 /*
509 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
510 * rcx user rip
511 * r1 user rflags
512 * rsp user stack pointer
513 */
514 mov ISF64_RIP-24(%rsp), %rcx
515 mov ISF64_RFLAGS-24(%rsp), %r11
516 mov ISF64_RSP-24(%rsp), %rsp
517 sysretq /* return from systen call */
518
519
520
521/*
522 * System call handlers.
523 * These are entered via a syscall interrupt. The system call number in %rax
524 * is saved to the error code slot in the stack frame. We then branch to the
525 * common state saving code.
526 */
527
528#ifndef UNIX_INT
529#error NO UNIX INT!!!
530#endif
531Entry(idt64_unix_scall)
532 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
533 pushq %rax /* save system call number */
534 PUSH_FUNCTION(HNDL_UNIX_SCALL)
535 pushq $(UNIX_INT)
536 jmp L_32bit_entry_check
537
538
539Entry(idt64_mach_scall)
540 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
541 pushq %rax /* save system call number */
542 PUSH_FUNCTION(HNDL_MACH_SCALL)
543 pushq $(MACH_INT)
544 jmp L_32bit_entry_check
545
546
547Entry(idt64_mdep_scall)
548 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
549 pushq %rax /* save system call number */
550 PUSH_FUNCTION(HNDL_MDEP_SCALL)
551 pushq $(MACHDEP_INT)
552 jmp L_32bit_entry_check
553
b0d623f7
A
554Entry(hi64_syscall)
555Entry(idt64_syscall)
b0d623f7 556L_syscall_continue:
6d2010ae 557 swapgs /* Kapow! get per-cpu data area */
b0d623f7
A
558 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
559 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
560
561 /*
562 * Save values in the ISF frame in the PCB
563 * to cons up the saved machine state.
564 */
565 movl $(USER_DS), ISF64_SS(%rsp)
566 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
567 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
568 mov %rcx, ISF64_RIP(%rsp) /* rip */
569 mov %gs:CPU_UBER_TMP, %rcx
570 mov %rcx, ISF64_RSP(%rsp) /* user stack */
571 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
572 movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
573 leaq HNDL_SYSCALL(%rip), %r11;
574 movq %r11, ISF64_TRAPFN(%rsp)
316670eb 575 mov ISF64_RFLAGS(%rsp), %r11 /* Avoid info leak,restore R11 */
b0d623f7
A
576 jmp L_64bit_dispatch /* this can only be a 64-bit task */
577
578/*
579 * sysenter entry point
580 * Requires user code to set up:
581 * edx: user instruction pointer (return address)
582 * ecx: user stack pointer
583 * on which is pushed stub ret addr and saved ebx
584 * Return to user-space is made using sysexit.
585 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
586 * or requiring ecx to be preserved.
587 */
588Entry(hi64_sysenter)
589Entry(idt64_sysenter)
590 movq (%rsp), %rsp
591 /*
592 * Push values on to the PCB stack
593 * to cons up the saved machine state.
594 */
595 push $(USER_DS) /* ss */
596 push %rcx /* uesp */
597 pushf /* flags */
6d2010ae
A
598 /*
599 * Clear, among others, the Nested Task (NT) flags bit;
600 * this is zeroed by INT, but not by SYSENTER.
601 */
602 push $0
603 popf
b0d623f7 604 push $(SYSENTER_CS) /* cs */
b0d623f7 605L_sysenter_continue:
6d2010ae 606 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
607 push %rdx /* eip */
608 push %rax /* err/eax - syscall code */
609 PUSH_FUNCTION(HNDL_SYSENTER)
610 pushq $(T_SYSENTER)
611 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
612 jmp L_32bit_entry_check
613
614
615Entry(idt64_page_fault)
616 PUSH_FUNCTION(HNDL_ALLTRAPS)
6d2010ae
A
617 push $(T_PAGE_FAULT)
618 push %rax /* save %rax temporarily */
b0d623f7 619 leaq EXT(idt64_unix_scall_copy_args)(%rip), %rax
6d2010ae
A
620 cmp %rax, 8+ISF64_RIP(%rsp) /* fault during copy args? */
621 je 1f /* - yes, handle copy arg fault */
622 testb $3, 8+ISF64_CS(%rsp) /* was trap from kernel? */
623 jz L_kernel_trap /* - yes, handle with care */
624 pop %rax /* restore %rax, swapgs, and continue */
625 swapgs
626 jmp L_dispatch_continue
b0d623f7 6271:
6d2010ae
A
628 add $(8+ISF64_SIZE), %rsp /* remove entire intr stack frame */
629 jmp L_copy_args_continue /* continue system call entry */
b0d623f7
A
630
631
632/*
633 * Debug trap. Check for single-stepping across system call into
634 * kernel. If this is the case, taking the debug trap has turned
635 * off single-stepping - save the flags register with the trace
636 * bit set.
637 */
638Entry(idt64_debug)
639 push $0 /* error code */
640 PUSH_FUNCTION(HNDL_ALLTRAPS)
641 pushq $(T_DEBUG)
642
643 testb $3, ISF64_CS(%rsp)
644 jnz L_dispatch
645
646 /*
647 * trap came from kernel mode
648 */
649
650 push %rax /* save %rax temporarily */
b0d623f7 651 lea EXT(idt64_sysenter)(%rip), %rax
6d2010ae 652 cmp %rax, ISF64_RIP+8(%rsp)
b0d623f7 653 pop %rax
6d2010ae 654 jne L_dispatch
b0d623f7
A
655 /*
656 * Interrupt stack frame has been pushed on the temporary stack.
6d2010ae 657 * We have to switch to pcb stack and patch up the saved state.
b0d623f7 658 */
6d2010ae
A
659 mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */
660 mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */
b0d623f7
A
661 xchg %rcx,%rsp /* switch to pcb stack */
662 push $(USER_DS) /* ss */
6d2010ae
A
663 push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */
664 push ISF64_RFLAGS(%rcx) /* rflags */
b0d623f7 665 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
6d2010ae 666 mov ISF64_ERR(%rcx),%rcx /* restore %rcx */
b0d623f7 667 jmp L_sysenter_continue /* continue sysenter entry */
b0d623f7
A
668
669
670Entry(idt64_double_fault)
671 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
672 pushq $(T_DOUBLE_FAULT)
673
674 push %rax
675 leaq EXT(idt64_syscall)(%rip), %rax
6d2010ae 676 cmp %rax, ISF64_RIP+8(%rsp)
b0d623f7 677 pop %rax
6d2010ae 678 jne L_64bit_dispatch
b0d623f7
A
679
680 mov ISF64_RSP(%rsp), %rsp
681 jmp L_syscall_continue
682
683
684/*
685 * General protection or segment-not-present fault.
686 * Check for a GP/NP fault in the kernel_return
687 * sequence; if there, report it as a GP/NP fault on the user's instruction.
688 *
6d2010ae
A
689 * rsp-> 0 ISF64_TRAPNO: trap code (NP or GP)
690 * 8 ISF64_TRAPFN: trap function
691 * 16 ISF64_ERR: segment number in error (error code)
692 * 24 ISF64_RIP: rip
693 * 32 ISF64_CS: cs
694 * 40 ISF64_RFLAGS: rflags
695 * 48 ISF64_RIP: rsp
696 * 56 ISF64_SS: ss
697 * 64: old registers (trap is from kernel)
b0d623f7
A
698 */
699Entry(idt64_gen_prot)
700 PUSH_FUNCTION(HNDL_ALLTRAPS)
701 pushq $(T_GENERAL_PROTECTION)
702 jmp trap_check_kernel_exit /* check for kernel exit sequence */
703
704Entry(idt64_stack_fault)
705 PUSH_FUNCTION(HNDL_ALLTRAPS)
706 pushq $(T_STACK_FAULT)
707 jmp trap_check_kernel_exit /* check for kernel exit sequence */
708
709Entry(idt64_segnp)
710 PUSH_FUNCTION(HNDL_ALLTRAPS)
711 pushq $(T_SEGMENT_NOT_PRESENT)
712 /* indicate fault type */
713trap_check_kernel_exit:
6d2010ae 714 testb $3,ISF64_CS(%rsp)
b0d623f7
A
715 jnz L_dispatch
716 /*
717 * trap was from kernel mode,
718 * so check for the kernel exit sequence
719 */
720 push %rax
721
722 leaq EXT(ret32_iret)(%rip), %rax
6d2010ae 723 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
724 je L_fault_iret
725 leaq EXT(ret64_iret)(%rip), %rax
6d2010ae 726 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
727 je L_fault_iret
728 leaq EXT(ret32_set_ds)(%rip), %rax
6d2010ae 729 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
730 je L_32bit_fault_set_seg
731 leaq EXT(ret32_set_es)(%rip), %rax
6d2010ae 732 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
733 je L_32bit_fault_set_seg
734 leaq EXT(ret32_set_fs)(%rip), %rax
6d2010ae 735 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
736 je L_32bit_fault_set_seg
737 leaq EXT(ret32_set_gs)(%rip), %rax
6d2010ae 738 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
739 je L_32bit_fault_set_seg
740
741 leaq EXT(idt64_unix_scall_copy_args)(%rip), %rax
6d2010ae
A
742 cmp %rax, 8+ISF64_RIP(%rsp)
743 cmove 8+ISF64_RSP(%rsp), %rsp
b0d623f7
A
744 je L_copy_args_continue
745
6d2010ae
A
746 /* fall through */
747
748L_kernel_trap:
749 /*
750 * Here after taking an unexpected trap from kernel mode - perhaps
751 * while running in the trampolines hereabouts.
752 * Note: %rax has been pushed on stack.
753 * Make sure we're not on the PCB stack, if so move to the kernel stack.
754 * This is likely a fatal condition.
755 * But first, try to ensure we have the kernel gs base active...
756 */
757 movq %gs:CPU_THIS, %rax /* get gs_base into %rax */
758 test %rax, %rax /* test sign bit (MSB) */
759 js 1f /* -ve kernel addr, no swap */
760 swapgs /* +ve user addr, swap */
7611:
762 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
763 subq %rsp, %rax
764 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
765 jb 2f /* - yes, deal with it */
766 pop %rax /* - no, restore %rax */
767 jmp L_64bit_dispatch
7682:
769 /*
770 * Here if %rsp is in the PCB
771 * Copy the interrupt stack frame from PCB stack to kernel stack
772 */
773 movq %gs:CPU_KERNEL_STACK, %rax
774 xchgq %rax, %rsp
775 pushq 8+ISF64_SS(%rax)
776 pushq 8+ISF64_RSP(%rax)
777 pushq 8+ISF64_RFLAGS(%rax)
778 pushq 8+ISF64_CS(%rax)
779 pushq 8+ISF64_RIP(%rax)
780 pushq 8+ISF64_ERR(%rax)
781 pushq 8+ISF64_TRAPFN(%rax)
782 pushq 8+ISF64_TRAPNO(%rax)
783 movq (%rax), %rax
784 jmp L_64bit_dispatch
b0d623f7 785
b0d623f7
A
786/*
787 * GP/NP fault on IRET: CS or SS is in error.
788 * Note that the user ss is originally 16-byte aligned, we'd popped the
789 * stack back to contain just the rip/cs/rflags/rsp/ss before issuing the iret.
790 * On taking the GP/NP fault on the iret instruction, the stack is 16-byte
791 * aligned before pushed the interrupt frame. Hence, an 8-byte padding exists.
792 *
793 * on SP is
794 * (- rax saved above, which is immediately popped)
6d2010ae
A
795 * 0 ISF64_TRAPNO: trap code (NP or GP)
796 * 8 ISF64_TRAPFN: trap function
797 * 16 ISF64_ERR: segment number in error (error code)
798 * 24 ISF64_RIP: rip
799 * 32 ISF64_CS: cs
800 * 40 ISF64_RFLAGS: rflags
316670eb
A
801 * 48 ISF64_RSP: rsp <-- new trapno
802 * 56 ISF64_SS: ss <-- new trapfn
803 * 64 pad8 <-- new errcode
6d2010ae
A
804 * 72 user rip
805 * 80 user cs
806 * 88 user rflags
807 * 96 user rsp
808 * 104 user ss (16-byte aligned)
b0d623f7
A
809 */
810L_fault_iret:
811 pop %rax /* recover saved %rax */
6d2010ae
A
812 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
813 mov ISF64_TRAPNO(%rsp), %rax
316670eb 814 mov %rax, ISF64_RSP(%rsp) /* put in user trap number */
6d2010ae
A
815 mov ISF64_TRAPFN(%rsp), %rax
816 mov %rax, ISF64_SS(%rsp) /* put in user trap function */
817 mov ISF64_ERR(%rsp), %rax /* get error code */
818 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
819 mov ISF64_RIP(%rsp), %rax /* restore rax */
820 add $(ISF64_RSP),%rsp /* reset to new trapfn */
b0d623f7
A
821 /* now treat as fault from user */
822 jmp L_dispatch
823
824/*
825 * Fault restoring a segment register. All of the saved state is still
826 * on the stack untouched since we haven't yet moved the stack pointer.
827 */
828L_32bit_fault_set_seg:
6d2010ae
A
829 swapgs
830 pop %rax /* toss saved %rax from stack */
831 mov ISF64_TRAPNO(%rsp), %rax
832 mov ISF64_TRAPFN(%rsp), %rcx
833 mov ISF64_ERR(%rsp), %rdx
834 mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */
835 mov %rax,ISC32_TRAPNO(%rsp)
836 mov %rcx,ISC32_TRAPFN(%rsp)
b0d623f7
A
837 mov %rdx,ISC32_ERR(%rsp)
838 /* now treat as fault from user */
839 /* except that all the state is */
840 /* already saved - we just have to */
841 /* move the trapno and error into */
842 /* the compatibility frame */
843 jmp L_32bit_dispatch_after_fault
844
845
846/*
847 * Fatal exception handlers:
848 */
849Entry(idt64_db_task_dbl_fault)
850 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
851 pushq $(T_DOUBLE_FAULT)
852 jmp L_dispatch
853
854Entry(idt64_db_task_stk_fault)
855 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
856 pushq $(T_STACK_FAULT)
857 jmp L_dispatch
858
859Entry(idt64_mc)
860 push $(0) /* Error */
861 PUSH_FUNCTION(HNDL_MACHINE_CHECK)
862 pushq $(T_MACHINE_CHECK)
863 jmp L_dispatch
864
865
866/* All 'exceptions' enter hndl_alltraps:
867 * rsp -> x86_saved_state_t
868 * esi cs at trap
869 *
870 * The rest of the state is set up as:
871 * interrupts disabled
872 * direction flag cleared
873 */
874Entry(hndl_alltraps)
875 mov %esi, %eax
876 testb $3, %al
877 jz trap_from_kernel
878
879 TIME_TRAP_UENTRY
880
6d2010ae
A
881 /* Check for active vtimers in the current task */
882 mov %gs:CPU_ACTIVE_THREAD, %rcx
883 mov TH_TASK(%rcx), %rbx
884 TASK_VTIMER_CHECK(%rbx, %rcx)
885
b0d623f7
A
886 movq %rsp, %rdi /* also pass it as arg0 */
887 movq %gs:CPU_KERNEL_STACK,%rsp /* switch to kernel stack */
b0d623f7
A
888
889 CCALL(user_trap) /* call user trap routine */
6d2010ae 890 /* user_trap() unmasks interrupts */
b0d623f7 891 cli /* hold off intrs - critical section */
b0d623f7
A
892 xorl %ecx, %ecx /* don't check if we're in the PFZ */
893
894#define CLI cli
895#define STI sti
896
897Entry(return_from_trap)
6d2010ae
A
898 movq %gs:CPU_ACTIVE_THREAD,%rsp
899 movq TH_PCB_ISS(%rsp), %rsp /* switch back to PCB stack */
b0d623f7
A
900 movl %gs:CPU_PENDING_AST,%eax
901 testl %eax,%eax
902 je EXT(return_to_user) /* branch if no AST */
903
904L_return_from_trap_with_ast:
905 movq %rsp, %r13
906 movq %gs:CPU_KERNEL_STACK, %rsp
907
908 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
909 je 2f /* no, go handle the AST */
910 cmpl $(SS_64), SS_FLAVOR(%r13) /* are we a 64-bit task? */
911 je 1f
912 /* no... 32-bit user mode */
913 movl R32_EIP(%r13), %edi
6d2010ae 914 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
915 CCALL(commpage_is_in_pfz32)
916 testl %eax, %eax
917 je 2f /* not in the PFZ... go service AST */
918 movl %eax, R32_EBX(%r13) /* let the PFZ know we've pended an AST */
919 movq %r13, %rsp /* switch back to PCB stack */
920 jmp EXT(return_to_user)
9211:
922 movq R64_RIP(%r13), %rdi
6d2010ae 923 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
924 CCALL(commpage_is_in_pfz64)
925 testl %eax, %eax
926 je 2f /* not in the PFZ... go service AST */
927 movl %eax, R64_RBX(%r13) /* let the PFZ know we've pended an AST */
928 movq %r13, %rsp /* switch back to PCB stack */
929 jmp EXT(return_to_user)
9302:
931 STI /* interrupts always enabled on return to user mode */
932
6d2010ae
A
933 xor %edi, %edi /* zero %rdi */
934 xorq %rbp, %rbp /* clear framepointer */
935 CCALL(i386_astintr) /* take the AST */
b0d623f7
A
936
937 CLI
b0d623f7
A
938 xorl %ecx, %ecx /* don't check if we're in the PFZ */
939 jmp EXT(return_from_trap) /* and check again (rare) */
940
941/*
942 * Trap from kernel mode. No need to switch stacks.
943 * Interrupts must be off here - we will set them to state at time of trap
944 * as soon as it's safe for us to do so and not recurse doing preemption
945 */
946hndl_kerntrap:
947trap_from_kernel:
948
949 movq %rsp, %rdi /* saved state addr */
950 pushq R64_RIP(%rsp) /* Simulate a CALL from fault point */
951 pushq %rbp /* Extend framepointer chain */
952 movq %rsp, %rbp
6d2010ae 953 CCALLWITHSP(kernel_trap) /* to kernel trap routine */
b0d623f7
A
954 popq %rbp
955 addq $8, %rsp
956 cli
957
958 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
959 testl $(AST_URGENT),%eax /* any urgent preemption? */
960 je ret_to_kernel /* no, nothing to do */
961 cmpl $(T_PREEMPT),R64_TRAPNO(%rsp)
962 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
963 testl $(EFL_IF),R64_RFLAGS(%rsp) /* interrupts disabled? */
964 je ret_to_kernel
965 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
966 jne ret_to_kernel
967 movq %gs:CPU_KERNEL_STACK,%rax
968 movq %rsp,%rcx
969 xorq %rax,%rcx
970 andq EXT(kernel_stack_mask)(%rip),%rcx
971 testq %rcx,%rcx /* are we on the kernel stack? */
972 jne ret_to_kernel /* no, skip it */
973
974 CCALL1(i386_astintr, $1) /* take the AST */
975 jmp ret_to_kernel
976
977
978/*
979 * All interrupts on all tasks enter here with:
980 * rsp-> x86_saved_state_t
981 * esi cs at trap
982 *
983 * interrupts disabled
984 * direction flag cleared
985 */
986Entry(hndl_allintrs)
987 /*
988 * test whether already on interrupt stack
989 */
990 movq %gs:CPU_INT_STACK_TOP,%rcx
991 cmpq %rsp,%rcx
992 jb 1f
993 leaq -INTSTACK_SIZE(%rcx),%rdx
994 cmpq %rsp,%rdx
995 jb int_from_intstack
060df5ea 9961:
b0d623f7
A
997 xchgq %rcx,%rsp /* switch to interrupt stack */
998
999 mov %cr0,%rax /* get cr0 */
1000 orl $(CR0_TS),%eax /* or in TS bit */
1001 mov %rax,%cr0 /* set cr0 */
1002
1003 subq $8, %rsp /* for 16-byte stack alignment */
1004 pushq %rcx /* save pointer to old stack */
1005 movq %rcx,%gs:CPU_INT_STATE /* save intr state */
1006
1007 TIME_INT_ENTRY /* do timing */
1008
6d2010ae
A
1009 /* Check for active vtimers in the current task */
1010 mov %gs:CPU_ACTIVE_THREAD, %rcx
1011 mov TH_TASK(%rcx), %rbx
1012 TASK_VTIMER_CHECK(%rbx, %rcx)
1013
b0d623f7
A
1014 incl %gs:CPU_PREEMPTION_LEVEL
1015 incl %gs:CPU_INTERRUPT_LEVEL
1016
1017 movq %gs:CPU_INT_STATE, %rdi
316670eb 1018
b0d623f7
A
1019 CCALL(interrupt) /* call generic interrupt routine */
1020
1021 cli /* just in case we returned with intrs enabled */
1022 xor %rax,%rax
1023 movq %rax,%gs:CPU_INT_STATE /* clear intr state pointer */
1024
1025 .globl EXT(return_to_iret)
1026LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1027
1028 decl %gs:CPU_INTERRUPT_LEVEL
1029 decl %gs:CPU_PREEMPTION_LEVEL
1030
1031 TIME_INT_EXIT /* do timing */
1032
1033 movq %gs:CPU_ACTIVE_THREAD,%rax
6d2010ae 1034 movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */
b0d623f7
A
1035 cmpq $0,%rax /* Is there a context */
1036 je 1f /* Branch if not */
1037 movl FP_VALID(%rax),%eax /* Load fp_valid */
1038 cmpl $0,%eax /* Check if valid */
1039 jne 1f /* Branch if valid */
1040 clts /* Clear TS */
1041 jmp 2f
10421:
1043 mov %cr0,%rax /* get cr0 */
1044 orl $(CR0_TS),%eax /* or in TS bit */
1045 mov %rax,%cr0 /* set cr0 */
10462:
1047 popq %rsp /* switch back to old stack */
1048
1049 /* Load interrupted code segment into %eax */
1050 movl R32_CS(%rsp),%eax /* assume 32-bit state */
1051 cmpl $(SS_64),SS_FLAVOR(%rsp)/* 64-bit? */
1052#if DEBUG_IDT64
1053 jne 4f
1054 movl R64_CS(%rsp),%eax /* 64-bit user mode */
1055 jmp 3f
10564:
1057 cmpl $(SS_32),SS_FLAVOR(%rsp)
1058 je 3f
1059 POSTCODE2(0x6431)
1060 CCALL1(panic_idt64, %rsp)
1061 hlt
1062#else
1063 jne 3f
1064 movl R64_CS(%rsp),%eax /* 64-bit user mode */
1065#endif
10663:
1067 testb $3,%al /* user mode, */
1068 jnz ast_from_interrupt_user /* go handle potential ASTs */
1069 /*
1070 * we only want to handle preemption requests if
1071 * the interrupt fell in the kernel context
1072 * and preemption isn't disabled
1073 */
1074 movl %gs:CPU_PENDING_AST,%eax
1075 testl $(AST_URGENT),%eax /* any urgent requests? */
1076 je ret_to_kernel /* no, nothing to do */
1077
1078 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1079 jne ret_to_kernel /* yes, skip it */
1080
1081 movq %gs:CPU_KERNEL_STACK,%rax
1082 movq %rsp,%rcx
1083 xorq %rax,%rcx
1084 andq EXT(kernel_stack_mask)(%rip),%rcx
1085 testq %rcx,%rcx /* are we on the kernel stack? */
1086 jne ret_to_kernel /* no, skip it */
1087
1088 /*
1089 * Take an AST from kernel space. We don't need (and don't want)
1090 * to do as much as the case where the interrupt came from user
1091 * space.
1092 */
1093 CCALL1(i386_astintr, $1)
1094
1095 jmp ret_to_kernel
1096
1097
1098/*
1099 * nested int - simple path, can't preempt etc on way out
1100 */
1101int_from_intstack:
1102 incl %gs:CPU_PREEMPTION_LEVEL
1103 incl %gs:CPU_INTERRUPT_LEVEL
060df5ea 1104 incl %gs:CPU_NESTED_ISTACK
b0d623f7
A
1105 mov %rsp, %rdi /* x86_saved_state */
1106 CCALL(interrupt)
1107
1108 decl %gs:CPU_INTERRUPT_LEVEL
1109 decl %gs:CPU_PREEMPTION_LEVEL
060df5ea 1110 decl %gs:CPU_NESTED_ISTACK
b0d623f7
A
1111#if DEBUG_IDT64
1112 CCALL1(panic_idt64, %rsp)
1113 POSTCODE2(0x6411)
1114 hlt
1115#endif
1116 jmp ret_to_kernel
1117
1118/*
1119 * Take an AST from an interrupted user
1120 */
1121ast_from_interrupt_user:
1122 movl %gs:CPU_PENDING_AST,%eax
1123 testl %eax,%eax /* pending ASTs? */
1124 je EXT(ret_to_user) /* no, nothing to do */
1125
1126 TIME_TRAP_UENTRY
1127
1128 movl $1, %ecx /* check if we're in the PFZ */
1129 jmp L_return_from_trap_with_ast /* return */
1130
1131
1132/* Syscall dispatch routines! */
1133
1134/*
1135 *
1136 * 32bit Tasks
1137 * System call entries via INTR_GATE or sysenter:
1138 *
1139 * rsp -> x86_saved_state32_t
1140 * interrupts disabled
1141 * direction flag cleared
1142 */
1143
1144Entry(hndl_sysenter)
1145 /*
1146 * We can be here either for a mach syscall or a unix syscall,
1147 * as indicated by the sign of the code:
1148 */
1149 movl R32_EAX(%rsp),%eax
1150 testl %eax,%eax
1151 js EXT(hndl_mach_scall) /* < 0 => mach */
1152 /* > 0 => unix */
1153
1154Entry(hndl_unix_scall)
1155/* If the caller (typically LibSystem) has recorded the cumulative size of
1156 * the arguments in EAX, copy them over from the user stack directly.
1157 * We recover from exceptions inline--if the copy loop doesn't complete
1158 * due to an exception, we fall back to copyin from compatibility mode.
1159 * We can potentially extend this mechanism to mach traps as well (DRK).
1160 */
1161 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
1162 jz L_copy_args_continue
1163 movl %eax, %ecx
1164 mov %gs:CPU_UBER_ARG_STORE_VALID, %rbx
1165 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %ecx
1166 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %ecx
1167 mov %gs:CPU_UBER_ARG_STORE, %rdi
1168 mov ISC32_RSP(%rsp), %rsi
1169 add $4, %rsi
1170 movl $0, (%rbx)
1171
1172EXT(idt64_unix_scall_copy_args):
1173 rep movsl
1174 movl $1, (%rbx)
1175L_copy_args_continue:
1176
1177 TIME_TRAP_UENTRY
1178
1179 movq %gs:CPU_KERNEL_STACK,%rdi
1180 xchgq %rdi,%rsp /* switch to kernel stack */
1181 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1182 movq TH_TASK(%rcx),%rbx /* point to current task */
1183 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1184
1185 /* Check for active vtimers in the current task */
1186 TASK_VTIMER_CHECK(%rbx,%rcx)
1187
1188 sti
1189
1190 CCALL(unix_syscall)
1191 /*
1192 * always returns through thread_exception_return
1193 */
1194
1195
1196Entry(hndl_mach_scall)
1197 TIME_TRAP_UENTRY
1198
1199 movq %gs:CPU_KERNEL_STACK,%rdi
1200 xchgq %rdi,%rsp /* switch to kernel stack */
1201 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1202 movq TH_TASK(%rcx),%rbx /* point to current task */
1203 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1204
1205 /* Check for active vtimers in the current task */
1206 TASK_VTIMER_CHECK(%rbx,%rcx)
1207
1208 sti
1209
1210 CCALL(mach_call_munger)
1211 /*
1212 * always returns through thread_exception_return
1213 */
1214
1215
1216Entry(hndl_mdep_scall)
1217 TIME_TRAP_UENTRY
1218
1219 movq %gs:CPU_KERNEL_STACK,%rdi
1220 xchgq %rdi,%rsp /* switch to kernel stack */
1221
1222 /* Check for active vtimers in the current task */
1223 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1224 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1225 TASK_VTIMER_CHECK(%rbx,%rcx)
1226
1227 sti
1228
1229 CCALL(machdep_syscall)
1230 /*
1231 * always returns through thread_exception_return
1232 */
1233
b0d623f7
A
1234/*
1235 * 64bit Tasks
1236 * System call entries via syscall only:
1237 *
1238 * rsp -> x86_saved_state64_t
1239 * interrupts disabled
1240 * direction flag cleared
1241 */
1242
1243Entry(hndl_syscall)
1244 TIME_TRAP_UENTRY
1245
1246 movq %gs:CPU_KERNEL_STACK,%rdi
1247 xchgq %rdi,%rsp /* switch to kernel stack */
1248 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1249 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1250
1251 /* Check for active vtimers in the current task */
1252 TASK_VTIMER_CHECK(%rbx,%rcx)
1253
1254 /*
1255 * We can be here either for a mach, unix machdep or diag syscall,
1256 * as indicated by the syscall class:
1257 */
1258 movl R64_RAX(%rdi), %eax /* syscall number/class */
1259 movl %eax, %edx
1260 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1261 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1262 je EXT(hndl_mach_scall64)
1263 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1264 je EXT(hndl_unix_scall64)
1265 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1266 je EXT(hndl_mdep_scall64)
1267 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1268 je EXT(hndl_diag_scall64)
1269
1270 /* Syscall class unknown */
316670eb 1271 sti
b0d623f7
A
1272 CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1)
1273 /* no return */
1274
1275
1276Entry(hndl_unix_scall64)
6d2010ae 1277 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1278 sti
1279
1280 CCALL(unix_syscall64)
1281 /*
1282 * always returns through thread_exception_return
1283 */
1284
1285
1286Entry(hndl_mach_scall64)
6d2010ae 1287 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1288 sti
1289
1290 CCALL(mach_call_munger64)
1291 /*
1292 * always returns through thread_exception_return
1293 */
1294
1295
1296
1297Entry(hndl_mdep_scall64)
1298 sti
1299
1300 CCALL(machdep_syscall64)
1301 /*
1302 * always returns through thread_exception_return
1303 */
1304
b0d623f7 1305Entry(hndl_diag_scall64)
060df5ea 1306 pushq %rdi // Push the previous stack
060df5ea 1307 CCALL(diagCall64) // Call diagnostics
060df5ea 1308 cli // Disable interruptions just in case
316670eb 1309 test %eax, %eax // What kind of return is this?
060df5ea 1310 je 1f // - branch if bad (zero)
6d2010ae 1311 popq %rsp // Get back the pcb stack
060df5ea
A
1312 jmp EXT(return_to_user) // Normal return, do not check asts...
13131:
316670eb 1314 sti
b0d623f7
A
1315 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
1316 /* no return */
1317
1318Entry(hndl_machine_check)
1319 CCALL1(panic_machine_check64, %rsp)
1320 hlt
1321
1322Entry(hndl_double_fault)
1323 CCALL1(panic_double_fault64, %rsp)
1324 hlt