]> git.saurik.com Git - apple/xnu.git/blame - osfmk/x86_64/idt64.s
xnu-1699.24.8.tar.gz
[apple/xnu.git] / osfmk / x86_64 / idt64.s
CommitLineData
b0d623f7 1/*
6d2010ae 2 * Copyright (c) 2010 Apple Inc. All rights reserved.
b0d623f7
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <i386/asm.h>
29#include <assym.s>
30#include <mach_kdb.h>
31#include <i386/eflags.h>
6d2010ae 32#include <i386/rtclock_asm.h>
b0d623f7
A
33#include <i386/trap.h>
34#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35#include <mach/i386/syscall_sw.h>
36#include <i386/postcode.h>
37#include <i386/proc_reg.h>
38#include <mach/exception_types.h>
39
40#if DEBUG
41#define DEBUG_IDT64 1
42#endif
43
44/*
45 * This is the low-level trap and interrupt handling code associated with
46 * the IDT. It also includes system call handlers for sysenter/syscall.
47 * The IDT itself is defined in mp_desc.c.
48 *
49 * Code here is structured as follows:
50 *
51 * stubs Code called directly from an IDT vector.
52 * All entry points have the "idt64_" prefix and they are built
53 * using macros expanded by the inclusion of idt_table.h.
54 * This code performs vector-dependent identification and jumps
55 * into the dispatch code.
56 *
57 * dispatch The dispatch code is responsible for saving the thread state
58 * (which is either 64-bit or 32-bit) and then jumping to the
59 * class handler identified by the stub.
60 *
61 * returns Code to restore state and return to the previous context.
62 *
63 * handlers There are several classes of handlers:
64 * interrupt - asynchronous events typically from external devices
65 * trap - synchronous events due to thread execution
66 * syscall - synchronous system call request
67 * fatal - fatal traps
68 */
69
70/*
71 * Handlers:
72 */
73#define HNDL_ALLINTRS EXT(hndl_allintrs)
74#define HNDL_ALLTRAPS EXT(hndl_alltraps)
75#define HNDL_SYSENTER EXT(hndl_sysenter)
76#define HNDL_SYSCALL EXT(hndl_syscall)
77#define HNDL_UNIX_SCALL EXT(hndl_unix_scall)
78#define HNDL_MACH_SCALL EXT(hndl_mach_scall)
79#define HNDL_MDEP_SCALL EXT(hndl_mdep_scall)
80#define HNDL_DIAG_SCALL EXT(hndl_diag_scall)
81#define HNDL_DOUBLE_FAULT EXT(hndl_double_fault)
82#define HNDL_MACHINE_CHECK EXT(hndl_machine_check)
83
b0d623f7
A
84
85#if 1
86#define PUSH_FUNCTION(func) \
87 sub $8, %rsp ;\
88 push %rax ;\
89 leaq func(%rip), %rax ;\
90 movq %rax, 8(%rsp) ;\
91 pop %rax
92#else
93#define PUSH_FUNCTION(func) pushq func
94#endif
95
96/* The wrapper for all non-special traps/interrupts */
97/* Everything up to PUSH_FUNCTION is just to output
98 * the interrupt number out to the postcode display
99 */
100#if DEBUG_IDT64
101#define IDT_ENTRY_WRAPPER(n, f) \
102 push %rax ;\
103 POSTCODE2(0x6400+n) ;\
104 pop %rax ;\
105 PUSH_FUNCTION(f) ;\
106 pushq $(n) ;\
107 jmp L_dispatch
108#else
109#define IDT_ENTRY_WRAPPER(n, f) \
110 PUSH_FUNCTION(f) ;\
111 pushq $(n) ;\
112 jmp L_dispatch
113#endif
114
115/* A trap that comes with an error code already on the stack */
116#define TRAP_ERR(n, f) \
117 Entry(f) ;\
118 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
119
120/* A normal trap */
121#define TRAP(n, f) \
122 Entry(f) ;\
123 pushq $0 ;\
124 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
125
126#define USER_TRAP TRAP
127
128/* An interrupt */
129#define INTERRUPT(n) \
130 Entry(_intr_ ## n) ;\
131 pushq $0 ;\
132 IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS)
133
134/* A trap with a special-case handler, hence we don't need to define anything */
135#define TRAP_SPC(n, f)
136#define TRAP_IST(n, f)
137#define USER_TRAP_SPC(n, f)
138
139/* Generate all the stubs */
140#include "idt_table.h"
141
142/*
143 * Common dispatch point.
144 * Determine what mode has been interrupted and save state accordingly.
145 */
146L_dispatch:
6d2010ae 147 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
b0d623f7
A
148 je L_64bit_dispatch
149
150 swapgs
151
6d2010ae
A
152 /*
153 * Check for trap from EFI32, and restore cr3 and rsp if so.
154 * A trap from EFI32 is fatal.
155 */
156 cmpl $(KERNEL32_CS), ISF64_CS(%rsp)
157 jne L_dispatch_continue
158 push %rcx
159 mov EXT(pal_efi_saved_cr3)(%rip), %rcx
160 mov %rcx, %cr3
161 leaq 0(%rip), %rcx
162 shr $32, %rcx /* splice the upper 32-bits of rip */
163 shl $32, %rsp /* .. and the lower 32-bits of rsp */
164 shrd $32, %rcx, %rsp /* to recover the full 64-bits of rsp */
165 pop %rcx
166
167L_dispatch_continue:
060df5ea
A
168 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
169 je L_32bit_dispatch /* 32-bit user task */
b0d623f7
A
170 /* fall through to 64bit user dispatch */
171
172/*
173 * Here for 64-bit user task or kernel
174 */
175L_64bit_dispatch:
176 subq $(ISS64_OFFSET), %rsp
177 movl $(SS_64), SS_FLAVOR(%rsp)
178
6d2010ae
A
179 cld
180
b0d623f7
A
181 /*
182 * Save segment regs - for completeness since theyre not used.
183 */
184 mov %fs, R64_FS(%rsp)
185 mov %gs, R64_GS(%rsp)
186
187 /* Save general-purpose registers */
188 mov %rax, R64_RAX(%rsp)
189 mov %rcx, R64_RCX(%rsp)
190 mov %rbx, R64_RBX(%rsp)
191 mov %rbp, R64_RBP(%rsp)
192 mov %r11, R64_R11(%rsp)
193 mov %r12, R64_R12(%rsp)
194 mov %r13, R64_R13(%rsp)
195 mov %r14, R64_R14(%rsp)
196 mov %r15, R64_R15(%rsp)
197
198 /* cr2 is significant only for page-faults */
199 mov %cr2, %rax
200 mov %rax, R64_CR2(%rsp)
201
202 /* Other registers (which may contain syscall args) */
203 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
204 mov %rsi, R64_RSI(%rsp)
205 mov %rdx, R64_RDX(%rsp)
206 mov %r10, R64_R10(%rsp)
207 mov %r8, R64_R8(%rsp)
208 mov %r9, R64_R9(%rsp) /* .. arg5 */
209
210 mov R64_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
211 mov R64_TRAPFN(%rsp), %rdx /* %rdx := trapfn for later */
212 mov R64_CS(%rsp), %esi /* %esi := cs for later */
213
214 jmp L_common_dispatch
215
216L_64bit_entry_reject:
217 /*
218 * Here for a 64-bit user attempting an invalid kernel entry.
219 */
220 pushq %rax
221 leaq HNDL_ALLTRAPS(%rip), %rax
222 movq %rax, ISF64_TRAPFN+8(%rsp)
223 popq %rax
224 movq $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
225 jmp L_64bit_dispatch
226
227L_32bit_entry_check:
228 /*
229 * Check we're not a confused 64-bit user.
230 */
231 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
232 jne L_64bit_entry_reject
233 /* fall through to 32-bit handler: */
234
235L_32bit_dispatch: /* 32-bit user task */
236 subq $(ISC32_OFFSET), %rsp
237 movl $(SS_32), SS_FLAVOR(%rsp)
238
6d2010ae 239 cld
b0d623f7
A
240 /*
241 * Save segment regs
242 */
243 mov %ds, R32_DS(%rsp)
244 mov %es, R32_ES(%rsp)
245 mov %fs, R32_FS(%rsp)
246 mov %gs, R32_GS(%rsp)
247
248 /*
249 * Save general 32-bit registers
250 */
251 mov %eax, R32_EAX(%rsp)
252 mov %ebx, R32_EBX(%rsp)
253 mov %ecx, R32_ECX(%rsp)
254 mov %edx, R32_EDX(%rsp)
255 mov %ebp, R32_EBP(%rsp)
256 mov %esi, R32_ESI(%rsp)
257 mov %edi, R32_EDI(%rsp)
258
259 /* Unconditionally save cr2; only meaningful on page faults */
260 mov %cr2, %rax
261 mov %eax, R32_CR2(%rsp)
262
263 /*
264 * Copy registers already saved in the machine state
265 * (in the interrupt stack frame) into the compat save area.
266 */
267 mov ISC32_RIP(%rsp), %eax
268 mov %eax, R32_EIP(%rsp)
269 mov ISC32_RFLAGS(%rsp), %eax
270 mov %eax, R32_EFLAGS(%rsp)
271 mov ISC32_CS(%rsp), %esi /* %esi := %cs for later */
272
273 mov %esi, R32_CS(%rsp)
274 mov ISC32_RSP(%rsp), %eax
275 mov %eax, R32_UESP(%rsp)
276 mov ISC32_SS(%rsp), %eax
277 mov %eax, R32_SS(%rsp)
278L_32bit_dispatch_after_fault:
279 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
280 mov %ebx, R32_TRAPNO(%rsp)
281 mov ISC32_ERR(%rsp), %eax
282 mov %eax, R32_ERR(%rsp)
283 mov ISC32_TRAPFN(%rsp), %rdx /* %rdx := trapfn for later */
284
285L_common_dispatch:
286 /*
287 * On entering the kernel, we don't need to switch cr3
288 * because the kernel shares the user's address space.
289 * But we mark the kernel's cr3 as "active".
290 * If, however, the invalid cr3 flag is set, we have to flush tlbs
291 * since the kernel's mapping was changed while we were in userspace.
292 *
293 * But: if global no_shared_cr3 is TRUE we do switch to the kernel's cr3
294 * so that illicit accesses to userspace can be trapped.
295 */
296 mov %gs:CPU_KERNEL_CR3, %rcx
297 mov %rcx, %gs:CPU_ACTIVE_CR3
298 test $3, %esi /* user/kernel? */
299 jz 1f /* skip cr3 reload from kernel */
300 xor %rbp, %rbp
301 cmpl $0, EXT(no_shared_cr3)(%rip)
302 je 1f
303 mov %rcx, %cr3 /* load kernel cr3 */
304 jmp 2f /* and skip tlb flush test */
6d2010ae
A
3051:
306 mov %gs:CPU_ACTIVE_CR3+4, %rcx
307 shr $32, %rcx
308 testl %ecx, %ecx
309 jz 2f
310 movl $0, %gs:CPU_TLB_INVALID
311 testl $(1<<16), %ecx /* Global? */
312 jz 11f
313 mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/
314 and $(~CR4_PGE), %rcx
315 mov %rcx, %cr4
316 or $(CR4_PGE), %rcx
317 mov %rcx, %cr4
318 jmp 2f
319
32011: mov %cr3, %rcx
b0d623f7
A
321 mov %rcx, %cr3
3222:
323 mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */
6d2010ae 324 cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */
b0d623f7
A
325 je 3f
326 mov $0, %rcx /* If so, reset DR7 (the control) */
327 mov %rcx, %dr7
3283:
6d2010ae 329 incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
b0d623f7 330 /* Dispatch the designated handler */
b0d623f7
A
331 jmp *%rdx
332
333/*
334 * Control is passed here to return to user.
335 */
336Entry(return_to_user)
337 TIME_TRAP_UEXIT
338
339Entry(ret_to_user)
340// XXX 'Be nice to tidy up this debug register restore sequence...
341 mov %gs:CPU_ACTIVE_THREAD, %rdx
6d2010ae 342 movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */
b0d623f7
A
343
344 cmpq $0,%rax /* Is there a debug register context? */
345 je 2f /* branch if not */
346 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
347 jne 1f
348 movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */
349 movq %rcx, %dr0
350 movl DS_DR1(%rax), %ecx
351 movq %rcx, %dr1
352 movl DS_DR2(%rax), %ecx
353 movq %rcx, %dr2
354 movl DS_DR3(%rax), %ecx
355 movq %rcx, %dr3
356 movl DS_DR7(%rax), %ecx
357 movq %rcx, %gs:CPU_DR7
358 jmp 2f
3591:
360 mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/
361 mov %rcx, %dr0
362 mov DS64_DR1(%rax), %rcx
363 mov %rcx, %dr1
364 mov DS64_DR2(%rax), %rcx
365 mov %rcx, %dr2
366 mov DS64_DR3(%rax), %rcx
367 mov %rcx, %dr3
368 mov DS64_DR7(%rax), %rcx
369 mov %rcx, %gs:CPU_DR7
3702:
371 /*
372 * On exiting the kernel there's no need to switch cr3 since we're
373 * already running in the user's address space which includes the
374 * kernel. Nevertheless, we now mark the task's cr3 as active.
b0d623f7
A
375 * But, if no_shared_cr3 is set, we do need to switch cr3 at this point.
376 */
377 mov %gs:CPU_TASK_CR3, %rcx
378 mov %rcx, %gs:CPU_ACTIVE_CR3
6d2010ae
A
379 movl EXT(no_shared_cr3)(%rip), %eax
380 test %eax, %eax /* -no_shared_cr3 */
b0d623f7 381 jz 3f
b0d623f7
A
382 mov %rcx, %cr3
3833:
b0d623f7
A
384 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
385 cmp $0, %rax
386 je 4f
387 mov %rax, %dr7 /* Set DR7 */
388 movq $0, %gs:CPU_DR7
3894:
390 cmpl $(SS_64), SS_FLAVOR(%rsp) /* 64-bit state? */
391 je L_64bit_return
392
393L_32bit_return:
394#if DEBUG_IDT64
395 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
396 je 1f
397 cli
398 POSTCODE2(0x6432)
399 CCALL1(panic_idt64, %rsp)
4001:
401#endif /* DEBUG_IDT64 */
402
403 /*
404 * Restore registers into the machine state for iret.
405 */
406 movl R32_EIP(%rsp), %eax
407 movl %eax, ISC32_RIP(%rsp)
408 movl R32_EFLAGS(%rsp), %eax
409 movl %eax, ISC32_RFLAGS(%rsp)
410 movl R32_CS(%rsp), %eax
411 movl %eax, ISC32_CS(%rsp)
412 movl R32_UESP(%rsp), %eax
413 movl %eax, ISC32_RSP(%rsp)
414 movl R32_SS(%rsp), %eax
415 movl %eax, ISC32_SS(%rsp)
416
417 /*
418 * Restore general 32-bit registers
419 */
420 movl R32_EAX(%rsp), %eax
421 movl R32_EBX(%rsp), %ebx
422 movl R32_ECX(%rsp), %ecx
423 movl R32_EDX(%rsp), %edx
424 movl R32_EBP(%rsp), %ebp
425 movl R32_ESI(%rsp), %esi
426 movl R32_EDI(%rsp), %edi
427
428 /*
429 * Restore segment registers. We make take an exception here but
430 * we've got enough space left in the save frame area to absorb
431 * a hardware frame plus the trapfn and trapno
432 */
433 swapgs
434EXT(ret32_set_ds):
435 movw R32_DS(%rsp), %ds
436EXT(ret32_set_es):
437 movw R32_ES(%rsp), %es
438EXT(ret32_set_fs):
439 movw R32_FS(%rsp), %fs
440EXT(ret32_set_gs):
441 movw R32_GS(%rsp), %gs
442
443 /* pop compat frame + trapno, trapfn and error */
444 add $(ISC32_OFFSET)+8+8+8, %rsp
445 cmp $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
446 /* test for fast entry/exit */
447 je L_fast_exit
448EXT(ret32_iret):
449 iretq /* return from interrupt */
450
451L_fast_exit:
452 pop %rdx /* user return eip */
453 pop %rcx /* pop and toss cs */
454 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
455 popf /* flags - carry denotes failure */
456 pop %rcx /* user return esp */
457 sti /* interrupts enabled after sysexit */
458 sysexit /* 32-bit sysexit */
459
460ret_to_kernel:
461#if DEBUG_IDT64
462 cmpl $(SS_64), SS_FLAVOR(%rsp) /* 64-bit state? */
463 je 1f
464 cli
465 POSTCODE2(0x6464)
466 CCALL1(panic_idt64, %rsp)
467 hlt
4681:
6d2010ae 469 cmpl $(KERNEL64_CS), R64_CS(%rsp)
b0d623f7
A
470 je 2f
471 CCALL1(panic_idt64, %rsp)
472 hlt
4732:
474#endif
475
476L_64bit_return:
477 testb $3, R64_CS(%rsp) /* returning to user-space? */
478 jz 1f
479 swapgs
4801:
481
482 /*
483 * Restore general 64-bit registers
484 */
485 mov R64_R15(%rsp), %r15
486 mov R64_R14(%rsp), %r14
487 mov R64_R13(%rsp), %r13
488 mov R64_R12(%rsp), %r12
489 mov R64_R11(%rsp), %r11
490 mov R64_R10(%rsp), %r10
491 mov R64_R9(%rsp), %r9
492 mov R64_R8(%rsp), %r8
493 mov R64_RSI(%rsp), %rsi
494 mov R64_RDI(%rsp), %rdi
495 mov R64_RBP(%rsp), %rbp
496 mov R64_RDX(%rsp), %rdx
497 mov R64_RBX(%rsp), %rbx
498 mov R64_RCX(%rsp), %rcx
499 mov R64_RAX(%rsp), %rax
500
501 add $(ISS64_OFFSET)+24, %rsp /* pop saved state frame +
502 trapno + trapfn and error */
503 cmpl $(SYSCALL_CS),ISF64_CS-24(%rsp)
504 /* test for fast entry/exit */
505 je L_sysret
506.globl _dump_iretq
507EXT(ret64_iret):
508 iretq /* return from interrupt */
509
510L_sysret:
511 /*
512 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
513 * rcx user rip
514 * r1 user rflags
515 * rsp user stack pointer
516 */
517 mov ISF64_RIP-24(%rsp), %rcx
518 mov ISF64_RFLAGS-24(%rsp), %r11
519 mov ISF64_RSP-24(%rsp), %rsp
520 sysretq /* return from systen call */
521
522
523
524/*
525 * System call handlers.
526 * These are entered via a syscall interrupt. The system call number in %rax
527 * is saved to the error code slot in the stack frame. We then branch to the
528 * common state saving code.
529 */
530
531#ifndef UNIX_INT
532#error NO UNIX INT!!!
533#endif
534Entry(idt64_unix_scall)
535 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
536 pushq %rax /* save system call number */
537 PUSH_FUNCTION(HNDL_UNIX_SCALL)
538 pushq $(UNIX_INT)
539 jmp L_32bit_entry_check
540
541
542Entry(idt64_mach_scall)
543 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
544 pushq %rax /* save system call number */
545 PUSH_FUNCTION(HNDL_MACH_SCALL)
546 pushq $(MACH_INT)
547 jmp L_32bit_entry_check
548
549
550Entry(idt64_mdep_scall)
551 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
552 pushq %rax /* save system call number */
553 PUSH_FUNCTION(HNDL_MDEP_SCALL)
554 pushq $(MACHDEP_INT)
555 jmp L_32bit_entry_check
556
557
558Entry(idt64_diag_scall)
559 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
560 push %rax /* save system call number */
561 PUSH_FUNCTION(HNDL_DIAG_SCALL)
562 pushq $(DIAG_INT)
563 jmp L_32bit_entry_check
564
565Entry(hi64_syscall)
566Entry(idt64_syscall)
b0d623f7 567L_syscall_continue:
6d2010ae 568 swapgs /* Kapow! get per-cpu data area */
b0d623f7
A
569 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
570 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
571
572 /*
573 * Save values in the ISF frame in the PCB
574 * to cons up the saved machine state.
575 */
576 movl $(USER_DS), ISF64_SS(%rsp)
577 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
578 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
579 mov %rcx, ISF64_RIP(%rsp) /* rip */
580 mov %gs:CPU_UBER_TMP, %rcx
581 mov %rcx, ISF64_RSP(%rsp) /* user stack */
582 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
583 movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
584 leaq HNDL_SYSCALL(%rip), %r11;
585 movq %r11, ISF64_TRAPFN(%rsp)
586 jmp L_64bit_dispatch /* this can only be a 64-bit task */
587
588/*
589 * sysenter entry point
590 * Requires user code to set up:
591 * edx: user instruction pointer (return address)
592 * ecx: user stack pointer
593 * on which is pushed stub ret addr and saved ebx
594 * Return to user-space is made using sysexit.
595 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
596 * or requiring ecx to be preserved.
597 */
598Entry(hi64_sysenter)
599Entry(idt64_sysenter)
600 movq (%rsp), %rsp
601 /*
602 * Push values on to the PCB stack
603 * to cons up the saved machine state.
604 */
605 push $(USER_DS) /* ss */
606 push %rcx /* uesp */
607 pushf /* flags */
6d2010ae
A
608 /*
609 * Clear, among others, the Nested Task (NT) flags bit;
610 * this is zeroed by INT, but not by SYSENTER.
611 */
612 push $0
613 popf
b0d623f7 614 push $(SYSENTER_CS) /* cs */
b0d623f7 615L_sysenter_continue:
6d2010ae 616 swapgs /* switch to kernel gs (cpu_data) */
b0d623f7
A
617 push %rdx /* eip */
618 push %rax /* err/eax - syscall code */
619 PUSH_FUNCTION(HNDL_SYSENTER)
620 pushq $(T_SYSENTER)
621 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
622 jmp L_32bit_entry_check
623
624
625Entry(idt64_page_fault)
626 PUSH_FUNCTION(HNDL_ALLTRAPS)
6d2010ae
A
627 push $(T_PAGE_FAULT)
628 push %rax /* save %rax temporarily */
b0d623f7 629 leaq EXT(idt64_unix_scall_copy_args)(%rip), %rax
6d2010ae
A
630 cmp %rax, 8+ISF64_RIP(%rsp) /* fault during copy args? */
631 je 1f /* - yes, handle copy arg fault */
632 testb $3, 8+ISF64_CS(%rsp) /* was trap from kernel? */
633 jz L_kernel_trap /* - yes, handle with care */
634 pop %rax /* restore %rax, swapgs, and continue */
635 swapgs
636 jmp L_dispatch_continue
b0d623f7 6371:
6d2010ae
A
638 add $(8+ISF64_SIZE), %rsp /* remove entire intr stack frame */
639 jmp L_copy_args_continue /* continue system call entry */
b0d623f7
A
640
641
642/*
643 * Debug trap. Check for single-stepping across system call into
644 * kernel. If this is the case, taking the debug trap has turned
645 * off single-stepping - save the flags register with the trace
646 * bit set.
647 */
648Entry(idt64_debug)
649 push $0 /* error code */
650 PUSH_FUNCTION(HNDL_ALLTRAPS)
651 pushq $(T_DEBUG)
652
653 testb $3, ISF64_CS(%rsp)
654 jnz L_dispatch
655
656 /*
657 * trap came from kernel mode
658 */
659
660 push %rax /* save %rax temporarily */
b0d623f7 661 lea EXT(idt64_sysenter)(%rip), %rax
6d2010ae 662 cmp %rax, ISF64_RIP+8(%rsp)
b0d623f7 663 pop %rax
6d2010ae 664 jne L_dispatch
b0d623f7
A
665 /*
666 * Interrupt stack frame has been pushed on the temporary stack.
6d2010ae 667 * We have to switch to pcb stack and patch up the saved state.
b0d623f7 668 */
6d2010ae
A
669 mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */
670 mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */
b0d623f7
A
671 xchg %rcx,%rsp /* switch to pcb stack */
672 push $(USER_DS) /* ss */
6d2010ae
A
673 push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */
674 push ISF64_RFLAGS(%rcx) /* rflags */
b0d623f7 675 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
6d2010ae 676 mov ISF64_ERR(%rcx),%rcx /* restore %rcx */
b0d623f7 677 jmp L_sysenter_continue /* continue sysenter entry */
b0d623f7
A
678
679
680Entry(idt64_double_fault)
681 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
682 pushq $(T_DOUBLE_FAULT)
683
684 push %rax
685 leaq EXT(idt64_syscall)(%rip), %rax
6d2010ae 686 cmp %rax, ISF64_RIP+8(%rsp)
b0d623f7 687 pop %rax
6d2010ae 688 jne L_64bit_dispatch
b0d623f7
A
689
690 mov ISF64_RSP(%rsp), %rsp
691 jmp L_syscall_continue
692
693
694/*
695 * General protection or segment-not-present fault.
696 * Check for a GP/NP fault in the kernel_return
697 * sequence; if there, report it as a GP/NP fault on the user's instruction.
698 *
6d2010ae
A
699 * rsp-> 0 ISF64_TRAPNO: trap code (NP or GP)
700 * 8 ISF64_TRAPFN: trap function
701 * 16 ISF64_ERR: segment number in error (error code)
702 * 24 ISF64_RIP: rip
703 * 32 ISF64_CS: cs
704 * 40 ISF64_RFLAGS: rflags
705 * 48 ISF64_RIP: rsp
706 * 56 ISF64_SS: ss
707 * 64: old registers (trap is from kernel)
b0d623f7
A
708 */
709Entry(idt64_gen_prot)
710 PUSH_FUNCTION(HNDL_ALLTRAPS)
711 pushq $(T_GENERAL_PROTECTION)
712 jmp trap_check_kernel_exit /* check for kernel exit sequence */
713
714Entry(idt64_stack_fault)
715 PUSH_FUNCTION(HNDL_ALLTRAPS)
716 pushq $(T_STACK_FAULT)
717 jmp trap_check_kernel_exit /* check for kernel exit sequence */
718
719Entry(idt64_segnp)
720 PUSH_FUNCTION(HNDL_ALLTRAPS)
721 pushq $(T_SEGMENT_NOT_PRESENT)
722 /* indicate fault type */
723trap_check_kernel_exit:
6d2010ae 724 testb $3,ISF64_CS(%rsp)
b0d623f7
A
725 jnz L_dispatch
726 /*
727 * trap was from kernel mode,
728 * so check for the kernel exit sequence
729 */
730 push %rax
731
732 leaq EXT(ret32_iret)(%rip), %rax
6d2010ae 733 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
734 je L_fault_iret
735 leaq EXT(ret64_iret)(%rip), %rax
6d2010ae 736 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
737 je L_fault_iret
738 leaq EXT(ret32_set_ds)(%rip), %rax
6d2010ae 739 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
740 je L_32bit_fault_set_seg
741 leaq EXT(ret32_set_es)(%rip), %rax
6d2010ae 742 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
743 je L_32bit_fault_set_seg
744 leaq EXT(ret32_set_fs)(%rip), %rax
6d2010ae 745 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
746 je L_32bit_fault_set_seg
747 leaq EXT(ret32_set_gs)(%rip), %rax
6d2010ae 748 cmp %rax, 8+ISF64_RIP(%rsp)
b0d623f7
A
749 je L_32bit_fault_set_seg
750
751 leaq EXT(idt64_unix_scall_copy_args)(%rip), %rax
6d2010ae
A
752 cmp %rax, 8+ISF64_RIP(%rsp)
753 cmove 8+ISF64_RSP(%rsp), %rsp
b0d623f7
A
754 je L_copy_args_continue
755
6d2010ae
A
756 /* fall through */
757
758L_kernel_trap:
759 /*
760 * Here after taking an unexpected trap from kernel mode - perhaps
761 * while running in the trampolines hereabouts.
762 * Note: %rax has been pushed on stack.
763 * Make sure we're not on the PCB stack, if so move to the kernel stack.
764 * This is likely a fatal condition.
765 * But first, try to ensure we have the kernel gs base active...
766 */
767 movq %gs:CPU_THIS, %rax /* get gs_base into %rax */
768 test %rax, %rax /* test sign bit (MSB) */
769 js 1f /* -ve kernel addr, no swap */
770 swapgs /* +ve user addr, swap */
7711:
772 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
773 subq %rsp, %rax
774 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
775 jb 2f /* - yes, deal with it */
776 pop %rax /* - no, restore %rax */
777 jmp L_64bit_dispatch
7782:
779 /*
780 * Here if %rsp is in the PCB
781 * Copy the interrupt stack frame from PCB stack to kernel stack
782 */
783 movq %gs:CPU_KERNEL_STACK, %rax
784 xchgq %rax, %rsp
785 pushq 8+ISF64_SS(%rax)
786 pushq 8+ISF64_RSP(%rax)
787 pushq 8+ISF64_RFLAGS(%rax)
788 pushq 8+ISF64_CS(%rax)
789 pushq 8+ISF64_RIP(%rax)
790 pushq 8+ISF64_ERR(%rax)
791 pushq 8+ISF64_TRAPFN(%rax)
792 pushq 8+ISF64_TRAPNO(%rax)
793 movq (%rax), %rax
794 jmp L_64bit_dispatch
b0d623f7 795
b0d623f7
A
796/*
797 * GP/NP fault on IRET: CS or SS is in error.
798 * Note that the user ss is originally 16-byte aligned, we'd popped the
799 * stack back to contain just the rip/cs/rflags/rsp/ss before issuing the iret.
800 * On taking the GP/NP fault on the iret instruction, the stack is 16-byte
801 * aligned before pushed the interrupt frame. Hence, an 8-byte padding exists.
802 *
803 * on SP is
804 * (- rax saved above, which is immediately popped)
6d2010ae
A
805 * 0 ISF64_TRAPNO: trap code (NP or GP)
806 * 8 ISF64_TRAPFN: trap function
807 * 16 ISF64_ERR: segment number in error (error code)
808 * 24 ISF64_RIP: rip
809 * 32 ISF64_CS: cs
810 * 40 ISF64_RFLAGS: rflags
811 * 48 ISF64_RSP: rsp --> new trapno
812 * 56 ISF64_SS: ss --> new trapfn
813 * 64 pad --> new errcode
814 * 72 user rip
815 * 80 user cs
816 * 88 user rflags
817 * 96 user rsp
818 * 104 user ss (16-byte aligned)
b0d623f7
A
819 */
820L_fault_iret:
821 pop %rax /* recover saved %rax */
6d2010ae
A
822 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
823 mov ISF64_TRAPNO(%rsp), %rax
824 mov %rax, ISF64_TRAPNO(%rsp)/* put in user trap number */
825 mov ISF64_TRAPFN(%rsp), %rax
826 mov %rax, ISF64_SS(%rsp) /* put in user trap function */
827 mov ISF64_ERR(%rsp), %rax /* get error code */
828 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
829 mov ISF64_RIP(%rsp), %rax /* restore rax */
830 add $(ISF64_RSP),%rsp /* reset to new trapfn */
b0d623f7
A
831 /* now treat as fault from user */
832 jmp L_dispatch
833
834/*
835 * Fault restoring a segment register. All of the saved state is still
836 * on the stack untouched since we haven't yet moved the stack pointer.
837 */
838L_32bit_fault_set_seg:
6d2010ae
A
839 swapgs
840 pop %rax /* toss saved %rax from stack */
841 mov ISF64_TRAPNO(%rsp), %rax
842 mov ISF64_TRAPFN(%rsp), %rcx
843 mov ISF64_ERR(%rsp), %rdx
844 mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */
845 mov %rax,ISC32_TRAPNO(%rsp)
846 mov %rcx,ISC32_TRAPFN(%rsp)
b0d623f7
A
847 mov %rdx,ISC32_ERR(%rsp)
848 /* now treat as fault from user */
849 /* except that all the state is */
850 /* already saved - we just have to */
851 /* move the trapno and error into */
852 /* the compatibility frame */
853 jmp L_32bit_dispatch_after_fault
854
855
856/*
857 * Fatal exception handlers:
858 */
859Entry(idt64_db_task_dbl_fault)
860 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
861 pushq $(T_DOUBLE_FAULT)
862 jmp L_dispatch
863
864Entry(idt64_db_task_stk_fault)
865 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
866 pushq $(T_STACK_FAULT)
867 jmp L_dispatch
868
869Entry(idt64_mc)
870 push $(0) /* Error */
871 PUSH_FUNCTION(HNDL_MACHINE_CHECK)
872 pushq $(T_MACHINE_CHECK)
873 jmp L_dispatch
874
875
876/* All 'exceptions' enter hndl_alltraps:
877 * rsp -> x86_saved_state_t
878 * esi cs at trap
879 *
880 * The rest of the state is set up as:
881 * interrupts disabled
882 * direction flag cleared
883 */
884Entry(hndl_alltraps)
885 mov %esi, %eax
886 testb $3, %al
887 jz trap_from_kernel
888
889 TIME_TRAP_UENTRY
890
6d2010ae
A
891 /* Check for active vtimers in the current task */
892 mov %gs:CPU_ACTIVE_THREAD, %rcx
893 mov TH_TASK(%rcx), %rbx
894 TASK_VTIMER_CHECK(%rbx, %rcx)
895
b0d623f7
A
896 movq %rsp, %rdi /* also pass it as arg0 */
897 movq %gs:CPU_KERNEL_STACK,%rsp /* switch to kernel stack */
b0d623f7
A
898
899 CCALL(user_trap) /* call user trap routine */
6d2010ae 900 /* user_trap() unmasks interrupts */
b0d623f7 901 cli /* hold off intrs - critical section */
b0d623f7
A
902 xorl %ecx, %ecx /* don't check if we're in the PFZ */
903
904#define CLI cli
905#define STI sti
906
907Entry(return_from_trap)
6d2010ae
A
908 movq %gs:CPU_ACTIVE_THREAD,%rsp
909 movq TH_PCB_ISS(%rsp), %rsp /* switch back to PCB stack */
b0d623f7
A
910 movl %gs:CPU_PENDING_AST,%eax
911 testl %eax,%eax
912 je EXT(return_to_user) /* branch if no AST */
913
914L_return_from_trap_with_ast:
915 movq %rsp, %r13
916 movq %gs:CPU_KERNEL_STACK, %rsp
917
918 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
919 je 2f /* no, go handle the AST */
920 cmpl $(SS_64), SS_FLAVOR(%r13) /* are we a 64-bit task? */
921 je 1f
922 /* no... 32-bit user mode */
923 movl R32_EIP(%r13), %edi
6d2010ae 924 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
925 CCALL(commpage_is_in_pfz32)
926 testl %eax, %eax
927 je 2f /* not in the PFZ... go service AST */
928 movl %eax, R32_EBX(%r13) /* let the PFZ know we've pended an AST */
929 movq %r13, %rsp /* switch back to PCB stack */
930 jmp EXT(return_to_user)
9311:
932 movq R64_RIP(%r13), %rdi
6d2010ae 933 xorq %rbp, %rbp /* clear framepointer */
b0d623f7
A
934 CCALL(commpage_is_in_pfz64)
935 testl %eax, %eax
936 je 2f /* not in the PFZ... go service AST */
937 movl %eax, R64_RBX(%r13) /* let the PFZ know we've pended an AST */
938 movq %r13, %rsp /* switch back to PCB stack */
939 jmp EXT(return_to_user)
9402:
941 STI /* interrupts always enabled on return to user mode */
942
6d2010ae
A
943 xor %edi, %edi /* zero %rdi */
944 xorq %rbp, %rbp /* clear framepointer */
945 CCALL(i386_astintr) /* take the AST */
b0d623f7
A
946
947 CLI
b0d623f7
A
948 xorl %ecx, %ecx /* don't check if we're in the PFZ */
949 jmp EXT(return_from_trap) /* and check again (rare) */
950
951/*
952 * Trap from kernel mode. No need to switch stacks.
953 * Interrupts must be off here - we will set them to state at time of trap
954 * as soon as it's safe for us to do so and not recurse doing preemption
955 */
956hndl_kerntrap:
957trap_from_kernel:
958
959 movq %rsp, %rdi /* saved state addr */
960 pushq R64_RIP(%rsp) /* Simulate a CALL from fault point */
961 pushq %rbp /* Extend framepointer chain */
962 movq %rsp, %rbp
6d2010ae 963 CCALLWITHSP(kernel_trap) /* to kernel trap routine */
b0d623f7
A
964 popq %rbp
965 addq $8, %rsp
966 cli
967
968 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
969 testl $(AST_URGENT),%eax /* any urgent preemption? */
970 je ret_to_kernel /* no, nothing to do */
971 cmpl $(T_PREEMPT),R64_TRAPNO(%rsp)
972 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
973 testl $(EFL_IF),R64_RFLAGS(%rsp) /* interrupts disabled? */
974 je ret_to_kernel
975 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
976 jne ret_to_kernel
977 movq %gs:CPU_KERNEL_STACK,%rax
978 movq %rsp,%rcx
979 xorq %rax,%rcx
980 andq EXT(kernel_stack_mask)(%rip),%rcx
981 testq %rcx,%rcx /* are we on the kernel stack? */
982 jne ret_to_kernel /* no, skip it */
983
984 CCALL1(i386_astintr, $1) /* take the AST */
985 jmp ret_to_kernel
986
987
988/*
989 * All interrupts on all tasks enter here with:
990 * rsp-> x86_saved_state_t
991 * esi cs at trap
992 *
993 * interrupts disabled
994 * direction flag cleared
995 */
996Entry(hndl_allintrs)
997 /*
998 * test whether already on interrupt stack
999 */
1000 movq %gs:CPU_INT_STACK_TOP,%rcx
1001 cmpq %rsp,%rcx
1002 jb 1f
1003 leaq -INTSTACK_SIZE(%rcx),%rdx
1004 cmpq %rsp,%rdx
1005 jb int_from_intstack
060df5ea 10061:
b0d623f7
A
1007 xchgq %rcx,%rsp /* switch to interrupt stack */
1008
1009 mov %cr0,%rax /* get cr0 */
1010 orl $(CR0_TS),%eax /* or in TS bit */
1011 mov %rax,%cr0 /* set cr0 */
1012
1013 subq $8, %rsp /* for 16-byte stack alignment */
1014 pushq %rcx /* save pointer to old stack */
1015 movq %rcx,%gs:CPU_INT_STATE /* save intr state */
1016
1017 TIME_INT_ENTRY /* do timing */
1018
6d2010ae
A
1019 /* Check for active vtimers in the current task */
1020 mov %gs:CPU_ACTIVE_THREAD, %rcx
1021 mov TH_TASK(%rcx), %rbx
1022 TASK_VTIMER_CHECK(%rbx, %rcx)
1023
b0d623f7
A
1024 incl %gs:CPU_PREEMPTION_LEVEL
1025 incl %gs:CPU_INTERRUPT_LEVEL
1026
1027 movq %gs:CPU_INT_STATE, %rdi
1028
1029 CCALL(interrupt) /* call generic interrupt routine */
1030
1031 cli /* just in case we returned with intrs enabled */
1032 xor %rax,%rax
1033 movq %rax,%gs:CPU_INT_STATE /* clear intr state pointer */
1034
1035 .globl EXT(return_to_iret)
1036LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1037
1038 decl %gs:CPU_INTERRUPT_LEVEL
1039 decl %gs:CPU_PREEMPTION_LEVEL
1040
1041 TIME_INT_EXIT /* do timing */
1042
1043 movq %gs:CPU_ACTIVE_THREAD,%rax
6d2010ae 1044 movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */
b0d623f7
A
1045 cmpq $0,%rax /* Is there a context */
1046 je 1f /* Branch if not */
1047 movl FP_VALID(%rax),%eax /* Load fp_valid */
1048 cmpl $0,%eax /* Check if valid */
1049 jne 1f /* Branch if valid */
1050 clts /* Clear TS */
1051 jmp 2f
10521:
1053 mov %cr0,%rax /* get cr0 */
1054 orl $(CR0_TS),%eax /* or in TS bit */
1055 mov %rax,%cr0 /* set cr0 */
10562:
1057 popq %rsp /* switch back to old stack */
1058
1059 /* Load interrupted code segment into %eax */
1060 movl R32_CS(%rsp),%eax /* assume 32-bit state */
1061 cmpl $(SS_64),SS_FLAVOR(%rsp)/* 64-bit? */
1062#if DEBUG_IDT64
1063 jne 4f
1064 movl R64_CS(%rsp),%eax /* 64-bit user mode */
1065 jmp 3f
10664:
1067 cmpl $(SS_32),SS_FLAVOR(%rsp)
1068 je 3f
1069 POSTCODE2(0x6431)
1070 CCALL1(panic_idt64, %rsp)
1071 hlt
1072#else
1073 jne 3f
1074 movl R64_CS(%rsp),%eax /* 64-bit user mode */
1075#endif
10763:
1077 testb $3,%al /* user mode, */
1078 jnz ast_from_interrupt_user /* go handle potential ASTs */
1079 /*
1080 * we only want to handle preemption requests if
1081 * the interrupt fell in the kernel context
1082 * and preemption isn't disabled
1083 */
1084 movl %gs:CPU_PENDING_AST,%eax
1085 testl $(AST_URGENT),%eax /* any urgent requests? */
1086 je ret_to_kernel /* no, nothing to do */
1087
1088 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1089 jne ret_to_kernel /* yes, skip it */
1090
1091 movq %gs:CPU_KERNEL_STACK,%rax
1092 movq %rsp,%rcx
1093 xorq %rax,%rcx
1094 andq EXT(kernel_stack_mask)(%rip),%rcx
1095 testq %rcx,%rcx /* are we on the kernel stack? */
1096 jne ret_to_kernel /* no, skip it */
1097
1098 /*
1099 * Take an AST from kernel space. We don't need (and don't want)
1100 * to do as much as the case where the interrupt came from user
1101 * space.
1102 */
1103 CCALL1(i386_astintr, $1)
1104
1105 jmp ret_to_kernel
1106
1107
1108/*
1109 * nested int - simple path, can't preempt etc on way out
1110 */
1111int_from_intstack:
1112 incl %gs:CPU_PREEMPTION_LEVEL
1113 incl %gs:CPU_INTERRUPT_LEVEL
060df5ea 1114 incl %gs:CPU_NESTED_ISTACK
b0d623f7
A
1115 mov %rsp, %rdi /* x86_saved_state */
1116 CCALL(interrupt)
1117
1118 decl %gs:CPU_INTERRUPT_LEVEL
1119 decl %gs:CPU_PREEMPTION_LEVEL
060df5ea 1120 decl %gs:CPU_NESTED_ISTACK
b0d623f7
A
1121#if DEBUG_IDT64
1122 CCALL1(panic_idt64, %rsp)
1123 POSTCODE2(0x6411)
1124 hlt
1125#endif
1126 jmp ret_to_kernel
1127
1128/*
1129 * Take an AST from an interrupted user
1130 */
1131ast_from_interrupt_user:
1132 movl %gs:CPU_PENDING_AST,%eax
1133 testl %eax,%eax /* pending ASTs? */
1134 je EXT(ret_to_user) /* no, nothing to do */
1135
1136 TIME_TRAP_UENTRY
1137
1138 movl $1, %ecx /* check if we're in the PFZ */
1139 jmp L_return_from_trap_with_ast /* return */
1140
1141
1142/* Syscall dispatch routines! */
1143
1144/*
1145 *
1146 * 32bit Tasks
1147 * System call entries via INTR_GATE or sysenter:
1148 *
1149 * rsp -> x86_saved_state32_t
1150 * interrupts disabled
1151 * direction flag cleared
1152 */
1153
1154Entry(hndl_sysenter)
1155 /*
1156 * We can be here either for a mach syscall or a unix syscall,
1157 * as indicated by the sign of the code:
1158 */
1159 movl R32_EAX(%rsp),%eax
1160 testl %eax,%eax
1161 js EXT(hndl_mach_scall) /* < 0 => mach */
1162 /* > 0 => unix */
1163
1164Entry(hndl_unix_scall)
1165/* If the caller (typically LibSystem) has recorded the cumulative size of
1166 * the arguments in EAX, copy them over from the user stack directly.
1167 * We recover from exceptions inline--if the copy loop doesn't complete
1168 * due to an exception, we fall back to copyin from compatibility mode.
1169 * We can potentially extend this mechanism to mach traps as well (DRK).
1170 */
1171 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
1172 jz L_copy_args_continue
1173 movl %eax, %ecx
1174 mov %gs:CPU_UBER_ARG_STORE_VALID, %rbx
1175 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %ecx
1176 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %ecx
1177 mov %gs:CPU_UBER_ARG_STORE, %rdi
1178 mov ISC32_RSP(%rsp), %rsi
1179 add $4, %rsi
1180 movl $0, (%rbx)
1181
1182EXT(idt64_unix_scall_copy_args):
1183 rep movsl
1184 movl $1, (%rbx)
1185L_copy_args_continue:
1186
1187 TIME_TRAP_UENTRY
1188
1189 movq %gs:CPU_KERNEL_STACK,%rdi
1190 xchgq %rdi,%rsp /* switch to kernel stack */
1191 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1192 movq TH_TASK(%rcx),%rbx /* point to current task */
1193 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1194
1195 /* Check for active vtimers in the current task */
1196 TASK_VTIMER_CHECK(%rbx,%rcx)
1197
1198 sti
1199
1200 CCALL(unix_syscall)
1201 /*
1202 * always returns through thread_exception_return
1203 */
1204
1205
1206Entry(hndl_mach_scall)
1207 TIME_TRAP_UENTRY
1208
1209 movq %gs:CPU_KERNEL_STACK,%rdi
1210 xchgq %rdi,%rsp /* switch to kernel stack */
1211 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae
A
1212 movq TH_TASK(%rcx),%rbx /* point to current task */
1213 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1214
1215 /* Check for active vtimers in the current task */
1216 TASK_VTIMER_CHECK(%rbx,%rcx)
1217
1218 sti
1219
1220 CCALL(mach_call_munger)
1221 /*
1222 * always returns through thread_exception_return
1223 */
1224
1225
1226Entry(hndl_mdep_scall)
1227 TIME_TRAP_UENTRY
1228
1229 movq %gs:CPU_KERNEL_STACK,%rdi
1230 xchgq %rdi,%rsp /* switch to kernel stack */
1231
1232 /* Check for active vtimers in the current task */
1233 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1234 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1235 TASK_VTIMER_CHECK(%rbx,%rcx)
1236
1237 sti
1238
1239 CCALL(machdep_syscall)
1240 /*
1241 * always returns through thread_exception_return
1242 */
1243
1244
1245Entry(hndl_diag_scall)
1246 TIME_TRAP_UENTRY
1247
1248 movq %gs:CPU_KERNEL_STACK,%rdi
1249 xchgq %rdi,%rsp /* switch to kernel stack */
1250
1251 /* Check for active vtimers in the current task */
1252 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1253 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1254 TASK_VTIMER_CHECK(%rbx,%rcx)
1255
060df5ea 1256 pushq %rdi /* push pcb stack */
b0d623f7 1257
060df5ea
A
1258 CCALL(diagCall) // Call diagnostics
1259
1260 cli // Disable interruptions just in case
b0d623f7 1261 cmpl $0,%eax // What kind of return is this?
060df5ea 1262 je 1f // - branch if bad (zero)
6d2010ae 1263 popq %rsp // Get back the pcb stack
060df5ea
A
1264 jmp EXT(return_to_user) // Normal return, do not check asts...
12651:
b0d623f7
A
1266 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
1267 // pass what would be the diag syscall
1268 // error return - cause an exception
1269 /* no return */
b0d623f7
A
1270
1271
1272/*
1273 * 64bit Tasks
1274 * System call entries via syscall only:
1275 *
1276 * rsp -> x86_saved_state64_t
1277 * interrupts disabled
1278 * direction flag cleared
1279 */
1280
1281Entry(hndl_syscall)
1282 TIME_TRAP_UENTRY
1283
1284 movq %gs:CPU_KERNEL_STACK,%rdi
1285 xchgq %rdi,%rsp /* switch to kernel stack */
1286 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
6d2010ae 1287 movq TH_TASK(%rcx),%rbx /* point to current task */
b0d623f7
A
1288
1289 /* Check for active vtimers in the current task */
1290 TASK_VTIMER_CHECK(%rbx,%rcx)
1291
1292 /*
1293 * We can be here either for a mach, unix machdep or diag syscall,
1294 * as indicated by the syscall class:
1295 */
1296 movl R64_RAX(%rdi), %eax /* syscall number/class */
1297 movl %eax, %edx
1298 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1299 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1300 je EXT(hndl_mach_scall64)
1301 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1302 je EXT(hndl_unix_scall64)
1303 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1304 je EXT(hndl_mdep_scall64)
1305 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1306 je EXT(hndl_diag_scall64)
1307
1308 /* Syscall class unknown */
1309 CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1)
1310 /* no return */
1311
1312
1313Entry(hndl_unix_scall64)
6d2010ae 1314 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
b0d623f7
A
1315 sti
1316
1317 CCALL(unix_syscall64)
1318 /*
1319 * always returns through thread_exception_return
1320 */
1321
1322
1323Entry(hndl_mach_scall64)
6d2010ae 1324 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
b0d623f7
A
1325 sti
1326
1327 CCALL(mach_call_munger64)
1328 /*
1329 * always returns through thread_exception_return
1330 */
1331
1332
1333
1334Entry(hndl_mdep_scall64)
1335 sti
1336
1337 CCALL(machdep_syscall64)
1338 /*
1339 * always returns through thread_exception_return
1340 */
1341
1342
1343Entry(hndl_diag_scall64)
060df5ea 1344 pushq %rdi // Push the previous stack
060df5ea 1345 CCALL(diagCall64) // Call diagnostics
060df5ea
A
1346 cli // Disable interruptions just in case
1347 cmpl $0,%eax // What kind of return is this?
1348 je 1f // - branch if bad (zero)
6d2010ae 1349 popq %rsp // Get back the pcb stack
060df5ea
A
1350 jmp EXT(return_to_user) // Normal return, do not check asts...
13511:
b0d623f7
A
1352 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
1353 /* no return */
1354
1355Entry(hndl_machine_check)
1356 CCALL1(panic_machine_check64, %rsp)
1357 hlt
1358
1359Entry(hndl_double_fault)
1360 CCALL1(panic_double_fault64, %rsp)
1361 hlt