]> git.saurik.com Git - apple/xnu.git/blob - osfmk/x86_64/idt64.s
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / x86_64 / idt64.s
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <i386/asm.h>
29 #include <assym.s>
30 #include <mach_kdb.h>
31 #include <i386/eflags.h>
32 #include <i386/rtclock_asm.h>
33 #include <i386/trap.h>
34 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35 #include <mach/i386/syscall_sw.h>
36 #include <i386/postcode.h>
37 #include <i386/proc_reg.h>
38 #include <mach/exception_types.h>
39
40 #if DEBUG
41 #define DEBUG_IDT64 1
42 #endif
43
44 /*
45 * This is the low-level trap and interrupt handling code associated with
46 * the IDT. It also includes system call handlers for sysenter/syscall.
47 * The IDT itself is defined in mp_desc.c.
48 *
49 * Code here is structured as follows:
50 *
51 * stubs Code called directly from an IDT vector.
52 * All entry points have the "idt64_" prefix and they are built
53 * using macros expanded by the inclusion of idt_table.h.
54 * This code performs vector-dependent identification and jumps
55 * into the dispatch code.
56 *
57 * dispatch The dispatch code is responsible for saving the thread state
58 * (which is either 64-bit or 32-bit) and then jumping to the
59 * class handler identified by the stub.
60 *
61 * returns Code to restore state and return to the previous context.
62 *
63 * handlers There are several classes of handlers:
64 * interrupt - asynchronous events typically from external devices
65 * trap - synchronous events due to thread execution
66 * syscall - synchronous system call request
67 * fatal - fatal traps
68 */
69
70 /*
71 * Handlers:
72 */
73 #define HNDL_ALLINTRS EXT(hndl_allintrs)
74 #define HNDL_ALLTRAPS EXT(hndl_alltraps)
75 #define HNDL_SYSENTER EXT(hndl_sysenter)
76 #define HNDL_SYSCALL EXT(hndl_syscall)
77 #define HNDL_UNIX_SCALL EXT(hndl_unix_scall)
78 #define HNDL_MACH_SCALL EXT(hndl_mach_scall)
79 #define HNDL_MDEP_SCALL EXT(hndl_mdep_scall)
80 #define HNDL_DIAG_SCALL EXT(hndl_diag_scall)
81 #define HNDL_DOUBLE_FAULT EXT(hndl_double_fault)
82 #define HNDL_MACHINE_CHECK EXT(hndl_machine_check)
83
84
85 #if 1
86 #define PUSH_FUNCTION(func) \
87 sub $8, %rsp ;\
88 push %rax ;\
89 leaq func(%rip), %rax ;\
90 movq %rax, 8(%rsp) ;\
91 pop %rax
92 #else
93 #define PUSH_FUNCTION(func) pushq func
94 #endif
95
96 /* The wrapper for all non-special traps/interrupts */
97 /* Everything up to PUSH_FUNCTION is just to output
98 * the interrupt number out to the postcode display
99 */
100 #if DEBUG_IDT64
101 #define IDT_ENTRY_WRAPPER(n, f) \
102 push %rax ;\
103 POSTCODE2(0x6400+n) ;\
104 pop %rax ;\
105 PUSH_FUNCTION(f) ;\
106 pushq $(n) ;\
107 jmp L_dispatch
108 #else
109 #define IDT_ENTRY_WRAPPER(n, f) \
110 PUSH_FUNCTION(f) ;\
111 pushq $(n) ;\
112 jmp L_dispatch
113 #endif
114
115 /* A trap that comes with an error code already on the stack */
116 #define TRAP_ERR(n, f) \
117 Entry(f) ;\
118 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
119
120 /* A normal trap */
121 #define TRAP(n, f) \
122 Entry(f) ;\
123 pushq $0 ;\
124 IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS)
125
126 #define USER_TRAP TRAP
127
128 /* An interrupt */
129 #define INTERRUPT(n) \
130 Entry(_intr_ ## n) ;\
131 pushq $0 ;\
132 IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS)
133
134 /* A trap with a special-case handler, hence we don't need to define anything */
135 #define TRAP_SPC(n, f)
136 #define TRAP_IST(n, f)
137 #define USER_TRAP_SPC(n, f)
138
139 /* Generate all the stubs */
140 #include "idt_table.h"
141
142 /*
143 * Common dispatch point.
144 * Determine what mode has been interrupted and save state accordingly.
145 */
146 L_dispatch:
147 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
148 je L_64bit_dispatch
149
150 swapgs
151
152 /*
153 * Check for trap from EFI32, and restore cr3 and rsp if so.
154 * A trap from EFI32 is fatal.
155 */
156 cmpl $(KERNEL32_CS), ISF64_CS(%rsp)
157 jne L_dispatch_continue
158 push %rcx
159 mov EXT(pal_efi_saved_cr3)(%rip), %rcx
160 mov %rcx, %cr3
161 leaq 0(%rip), %rcx
162 shr $32, %rcx /* splice the upper 32-bits of rip */
163 shl $32, %rsp /* .. and the lower 32-bits of rsp */
164 shrd $32, %rcx, %rsp /* to recover the full 64-bits of rsp */
165 pop %rcx
166
167 L_dispatch_continue:
168 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
169 je L_32bit_dispatch /* 32-bit user task */
170 /* fall through to 64bit user dispatch */
171
172 /*
173 * Here for 64-bit user task or kernel
174 */
175 L_64bit_dispatch:
176 subq $(ISS64_OFFSET), %rsp
177 movl $(SS_64), SS_FLAVOR(%rsp)
178
179 cld
180
181 /*
182 * Save segment regs - for completeness since theyre not used.
183 */
184 mov %fs, R64_FS(%rsp)
185 mov %gs, R64_GS(%rsp)
186
187 /* Save general-purpose registers */
188 mov %rax, R64_RAX(%rsp)
189 mov %rcx, R64_RCX(%rsp)
190 mov %rbx, R64_RBX(%rsp)
191 mov %rbp, R64_RBP(%rsp)
192 mov %r11, R64_R11(%rsp)
193 mov %r12, R64_R12(%rsp)
194 mov %r13, R64_R13(%rsp)
195 mov %r14, R64_R14(%rsp)
196 mov %r15, R64_R15(%rsp)
197
198 /* cr2 is significant only for page-faults */
199 mov %cr2, %rax
200 mov %rax, R64_CR2(%rsp)
201
202 /* Other registers (which may contain syscall args) */
203 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
204 mov %rsi, R64_RSI(%rsp)
205 mov %rdx, R64_RDX(%rsp)
206 mov %r10, R64_R10(%rsp)
207 mov %r8, R64_R8(%rsp)
208 mov %r9, R64_R9(%rsp) /* .. arg5 */
209
210 mov R64_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
211 mov R64_TRAPFN(%rsp), %rdx /* %rdx := trapfn for later */
212 mov R64_CS(%rsp), %esi /* %esi := cs for later */
213
214 jmp L_common_dispatch
215
216 L_64bit_entry_reject:
217 /*
218 * Here for a 64-bit user attempting an invalid kernel entry.
219 */
220 pushq %rax
221 leaq HNDL_ALLTRAPS(%rip), %rax
222 movq %rax, ISF64_TRAPFN+8(%rsp)
223 popq %rax
224 movq $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
225 jmp L_64bit_dispatch
226
227 L_32bit_entry_check:
228 /*
229 * Check we're not a confused 64-bit user.
230 */
231 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
232 jne L_64bit_entry_reject
233 /* fall through to 32-bit handler: */
234
235 L_32bit_dispatch: /* 32-bit user task */
236 subq $(ISC32_OFFSET), %rsp
237 movl $(SS_32), SS_FLAVOR(%rsp)
238
239 cld
240 /*
241 * Save segment regs
242 */
243 mov %ds, R32_DS(%rsp)
244 mov %es, R32_ES(%rsp)
245 mov %fs, R32_FS(%rsp)
246 mov %gs, R32_GS(%rsp)
247
248 /*
249 * Save general 32-bit registers
250 */
251 mov %eax, R32_EAX(%rsp)
252 mov %ebx, R32_EBX(%rsp)
253 mov %ecx, R32_ECX(%rsp)
254 mov %edx, R32_EDX(%rsp)
255 mov %ebp, R32_EBP(%rsp)
256 mov %esi, R32_ESI(%rsp)
257 mov %edi, R32_EDI(%rsp)
258
259 /* Unconditionally save cr2; only meaningful on page faults */
260 mov %cr2, %rax
261 mov %eax, R32_CR2(%rsp)
262
263 /*
264 * Copy registers already saved in the machine state
265 * (in the interrupt stack frame) into the compat save area.
266 */
267 mov ISC32_RIP(%rsp), %eax
268 mov %eax, R32_EIP(%rsp)
269 mov ISC32_RFLAGS(%rsp), %eax
270 mov %eax, R32_EFLAGS(%rsp)
271 mov ISC32_RSP(%rsp), %eax
272 mov %eax, R32_UESP(%rsp)
273 mov ISC32_SS(%rsp), %eax
274 mov %eax, R32_SS(%rsp)
275 L_32bit_dispatch_after_fault:
276 mov ISC32_CS(%rsp), %esi /* %esi := %cs for later */
277 mov %esi, R32_CS(%rsp)
278 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
279 mov %ebx, R32_TRAPNO(%rsp)
280 mov ISC32_ERR(%rsp), %eax
281 mov %eax, R32_ERR(%rsp)
282 mov ISC32_TRAPFN(%rsp), %rdx /* %rdx := trapfn for later */
283
284 L_common_dispatch:
285 /*
286 * On entering the kernel, we don't need to switch cr3
287 * because the kernel shares the user's address space.
288 * But we mark the kernel's cr3 as "active".
289 * If, however, the invalid cr3 flag is set, we have to flush tlbs
290 * since the kernel's mapping was changed while we were in userspace.
291 *
292 * But: if global no_shared_cr3 is TRUE we do switch to the kernel's cr3
293 * so that illicit accesses to userspace can be trapped.
294 */
295 mov %gs:CPU_KERNEL_CR3, %rcx
296 mov %rcx, %gs:CPU_ACTIVE_CR3
297 test $3, %esi /* user/kernel? */
298 jz 1f /* skip cr3 reload from kernel */
299 xor %rbp, %rbp
300 cmpl $0, EXT(no_shared_cr3)(%rip)
301 je 1f
302 mov %rcx, %cr3 /* load kernel cr3 */
303 jmp 2f /* and skip tlb flush test */
304 1:
305 mov %gs:CPU_ACTIVE_CR3+4, %rcx
306 shr $32, %rcx
307 testl %ecx, %ecx
308 jz 2f
309 movl $0, %gs:CPU_TLB_INVALID
310 testl $(1<<16), %ecx /* Global? */
311 jz 11f
312 mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/
313 and $(~CR4_PGE), %rcx
314 mov %rcx, %cr4
315 or $(CR4_PGE), %rcx
316 mov %rcx, %cr4
317 jmp 2f
318
319 11: mov %cr3, %rcx
320 mov %rcx, %cr3
321 2:
322 mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */
323 cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */
324 je 3f
325 mov $0, %rcx /* If so, reset DR7 (the control) */
326 mov %rcx, %dr7
327 3:
328 incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
329 /* Dispatch the designated handler */
330 jmp *%rdx
331
332 /*
333 * Control is passed here to return to user.
334 */
335 Entry(return_to_user)
336 TIME_TRAP_UEXIT
337
338 Entry(ret_to_user)
339 // XXX 'Be nice to tidy up this debug register restore sequence...
340 mov %gs:CPU_ACTIVE_THREAD, %rdx
341 movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */
342
343 cmpq $0,%rax /* Is there a debug register context? */
344 je 2f /* branch if not */
345 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
346 jne 1f
347 movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */
348 movq %rcx, %dr0
349 movl DS_DR1(%rax), %ecx
350 movq %rcx, %dr1
351 movl DS_DR2(%rax), %ecx
352 movq %rcx, %dr2
353 movl DS_DR3(%rax), %ecx
354 movq %rcx, %dr3
355 movl DS_DR7(%rax), %ecx
356 movq %rcx, %gs:CPU_DR7
357 jmp 2f
358 1:
359 mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/
360 mov %rcx, %dr0
361 mov DS64_DR1(%rax), %rcx
362 mov %rcx, %dr1
363 mov DS64_DR2(%rax), %rcx
364 mov %rcx, %dr2
365 mov DS64_DR3(%rax), %rcx
366 mov %rcx, %dr3
367 mov DS64_DR7(%rax), %rcx
368 mov %rcx, %gs:CPU_DR7
369 2:
370 /*
371 * On exiting the kernel there's no need to switch cr3 since we're
372 * already running in the user's address space which includes the
373 * kernel. Nevertheless, we now mark the task's cr3 as active.
374 * But, if no_shared_cr3 is set, we do need to switch cr3 at this point.
375 */
376 mov %gs:CPU_TASK_CR3, %rcx
377 mov %rcx, %gs:CPU_ACTIVE_CR3
378 movl EXT(no_shared_cr3)(%rip), %eax
379 test %eax, %eax /* -no_shared_cr3 */
380 jz 3f
381 mov %rcx, %cr3
382 3:
383 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
384 cmp $0, %rax
385 je 4f
386 mov %rax, %dr7 /* Set DR7 */
387 movq $0, %gs:CPU_DR7
388 4:
389 cmpl $(SS_64), SS_FLAVOR(%rsp) /* 64-bit state? */
390 je L_64bit_return
391
392 L_32bit_return:
393 #if DEBUG_IDT64
394 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
395 je 1f
396 cli
397 POSTCODE2(0x6432)
398 CCALL1(panic_idt64, %rsp)
399 1:
400 #endif /* DEBUG_IDT64 */
401
402 /*
403 * Restore registers into the machine state for iret.
404 */
405 movl R32_EIP(%rsp), %eax
406 movl %eax, ISC32_RIP(%rsp)
407 movl R32_EFLAGS(%rsp), %eax
408 movl %eax, ISC32_RFLAGS(%rsp)
409 movl R32_CS(%rsp), %eax
410 movl %eax, ISC32_CS(%rsp)
411 movl R32_UESP(%rsp), %eax
412 movl %eax, ISC32_RSP(%rsp)
413 movl R32_SS(%rsp), %eax
414 movl %eax, ISC32_SS(%rsp)
415
416 /*
417 * Restore general 32-bit registers
418 */
419 movl R32_EAX(%rsp), %eax
420 movl R32_EBX(%rsp), %ebx
421 movl R32_ECX(%rsp), %ecx
422 movl R32_EDX(%rsp), %edx
423 movl R32_EBP(%rsp), %ebp
424 movl R32_ESI(%rsp), %esi
425 movl R32_EDI(%rsp), %edi
426
427 /*
428 * Restore segment registers. We make take an exception here but
429 * we've got enough space left in the save frame area to absorb
430 * a hardware frame plus the trapfn and trapno
431 */
432 swapgs
433 EXT(ret32_set_ds):
434 movw R32_DS(%rsp), %ds
435 EXT(ret32_set_es):
436 movw R32_ES(%rsp), %es
437 EXT(ret32_set_fs):
438 movw R32_FS(%rsp), %fs
439 EXT(ret32_set_gs):
440 movw R32_GS(%rsp), %gs
441
442 /* pop compat frame + trapno, trapfn and error */
443 add $(ISC32_OFFSET)+8+8+8, %rsp
444 cmp $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
445 /* test for fast entry/exit */
446 je L_fast_exit
447 EXT(ret32_iret):
448 iretq /* return from interrupt */
449
450 L_fast_exit:
451 pop %rdx /* user return eip */
452 pop %rcx /* pop and toss cs */
453 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
454 popf /* flags - carry denotes failure */
455 pop %rcx /* user return esp */
456 sti /* interrupts enabled after sysexit */
457 sysexit /* 32-bit sysexit */
458
459 ret_to_kernel:
460 #if DEBUG_IDT64
461 cmpl $(SS_64), SS_FLAVOR(%rsp) /* 64-bit state? */
462 je 1f
463 cli
464 POSTCODE2(0x6464)
465 CCALL1(panic_idt64, %rsp)
466 hlt
467 1:
468 cmpl $(KERNEL64_CS), R64_CS(%rsp)
469 je 2f
470 CCALL1(panic_idt64, %rsp)
471 hlt
472 2:
473 #endif
474
475 L_64bit_return:
476 testb $3, R64_CS(%rsp) /* returning to user-space? */
477 jz 1f
478 swapgs
479 1:
480
481 /*
482 * Restore general 64-bit registers
483 */
484 mov R64_R15(%rsp), %r15
485 mov R64_R14(%rsp), %r14
486 mov R64_R13(%rsp), %r13
487 mov R64_R12(%rsp), %r12
488 mov R64_R11(%rsp), %r11
489 mov R64_R10(%rsp), %r10
490 mov R64_R9(%rsp), %r9
491 mov R64_R8(%rsp), %r8
492 mov R64_RSI(%rsp), %rsi
493 mov R64_RDI(%rsp), %rdi
494 mov R64_RBP(%rsp), %rbp
495 mov R64_RDX(%rsp), %rdx
496 mov R64_RBX(%rsp), %rbx
497 mov R64_RCX(%rsp), %rcx
498 mov R64_RAX(%rsp), %rax
499
500 add $(ISS64_OFFSET)+24, %rsp /* pop saved state frame +
501 trapno + trapfn and error */
502 cmpl $(SYSCALL_CS),ISF64_CS-24(%rsp)
503 /* test for fast entry/exit */
504 je L_sysret
505 .globl _dump_iretq
506 EXT(ret64_iret):
507 iretq /* return from interrupt */
508
509 L_sysret:
510 /*
511 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
512 * rcx user rip
513 * r1 user rflags
514 * rsp user stack pointer
515 */
516 mov ISF64_RIP-24(%rsp), %rcx
517 mov ISF64_RFLAGS-24(%rsp), %r11
518 mov ISF64_RSP-24(%rsp), %rsp
519 sysretq /* return from systen call */
520
521
522
523 /*
524 * System call handlers.
525 * These are entered via a syscall interrupt. The system call number in %rax
526 * is saved to the error code slot in the stack frame. We then branch to the
527 * common state saving code.
528 */
529
530 #ifndef UNIX_INT
531 #error NO UNIX INT!!!
532 #endif
533 Entry(idt64_unix_scall)
534 swapgs /* switch to kernel gs (cpu_data) */
535 pushq %rax /* save system call number */
536 PUSH_FUNCTION(HNDL_UNIX_SCALL)
537 pushq $(UNIX_INT)
538 jmp L_32bit_entry_check
539
540
541 Entry(idt64_mach_scall)
542 swapgs /* switch to kernel gs (cpu_data) */
543 pushq %rax /* save system call number */
544 PUSH_FUNCTION(HNDL_MACH_SCALL)
545 pushq $(MACH_INT)
546 jmp L_32bit_entry_check
547
548
549 Entry(idt64_mdep_scall)
550 swapgs /* switch to kernel gs (cpu_data) */
551 pushq %rax /* save system call number */
552 PUSH_FUNCTION(HNDL_MDEP_SCALL)
553 pushq $(MACHDEP_INT)
554 jmp L_32bit_entry_check
555
556
557 Entry(idt64_diag_scall)
558 swapgs /* switch to kernel gs (cpu_data) */
559 push %rax /* save system call number */
560 PUSH_FUNCTION(HNDL_DIAG_SCALL)
561 pushq $(DIAG_INT)
562 jmp L_32bit_entry_check
563
564 Entry(hi64_syscall)
565 Entry(idt64_syscall)
566 L_syscall_continue:
567 swapgs /* Kapow! get per-cpu data area */
568 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
569 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
570
571 /*
572 * Save values in the ISF frame in the PCB
573 * to cons up the saved machine state.
574 */
575 movl $(USER_DS), ISF64_SS(%rsp)
576 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
577 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
578 mov %rcx, ISF64_RIP(%rsp) /* rip */
579 mov %gs:CPU_UBER_TMP, %rcx
580 mov %rcx, ISF64_RSP(%rsp) /* user stack */
581 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
582 movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
583 leaq HNDL_SYSCALL(%rip), %r11;
584 movq %r11, ISF64_TRAPFN(%rsp)
585 jmp L_64bit_dispatch /* this can only be a 64-bit task */
586
587 /*
588 * sysenter entry point
589 * Requires user code to set up:
590 * edx: user instruction pointer (return address)
591 * ecx: user stack pointer
592 * on which is pushed stub ret addr and saved ebx
593 * Return to user-space is made using sysexit.
594 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
595 * or requiring ecx to be preserved.
596 */
597 Entry(hi64_sysenter)
598 Entry(idt64_sysenter)
599 movq (%rsp), %rsp
600 /*
601 * Push values on to the PCB stack
602 * to cons up the saved machine state.
603 */
604 push $(USER_DS) /* ss */
605 push %rcx /* uesp */
606 pushf /* flags */
607 /*
608 * Clear, among others, the Nested Task (NT) flags bit;
609 * this is zeroed by INT, but not by SYSENTER.
610 */
611 push $0
612 popf
613 push $(SYSENTER_CS) /* cs */
614 L_sysenter_continue:
615 swapgs /* switch to kernel gs (cpu_data) */
616 push %rdx /* eip */
617 push %rax /* err/eax - syscall code */
618 PUSH_FUNCTION(HNDL_SYSENTER)
619 pushq $(T_SYSENTER)
620 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
621 jmp L_32bit_entry_check
622
623
624 Entry(idt64_page_fault)
625 PUSH_FUNCTION(HNDL_ALLTRAPS)
626 push $(T_PAGE_FAULT)
627 push %rax /* save %rax temporarily */
628 leaq EXT(idt64_unix_scall_copy_args)(%rip), %rax
629 cmp %rax, 8+ISF64_RIP(%rsp) /* fault during copy args? */
630 je 1f /* - yes, handle copy arg fault */
631 testb $3, 8+ISF64_CS(%rsp) /* was trap from kernel? */
632 jz L_kernel_trap /* - yes, handle with care */
633 pop %rax /* restore %rax, swapgs, and continue */
634 swapgs
635 jmp L_dispatch_continue
636 1:
637 add $(8+ISF64_SIZE), %rsp /* remove entire intr stack frame */
638 jmp L_copy_args_continue /* continue system call entry */
639
640
641 /*
642 * Debug trap. Check for single-stepping across system call into
643 * kernel. If this is the case, taking the debug trap has turned
644 * off single-stepping - save the flags register with the trace
645 * bit set.
646 */
647 Entry(idt64_debug)
648 push $0 /* error code */
649 PUSH_FUNCTION(HNDL_ALLTRAPS)
650 pushq $(T_DEBUG)
651
652 testb $3, ISF64_CS(%rsp)
653 jnz L_dispatch
654
655 /*
656 * trap came from kernel mode
657 */
658
659 push %rax /* save %rax temporarily */
660 lea EXT(idt64_sysenter)(%rip), %rax
661 cmp %rax, ISF64_RIP+8(%rsp)
662 pop %rax
663 jne L_dispatch
664 /*
665 * Interrupt stack frame has been pushed on the temporary stack.
666 * We have to switch to pcb stack and patch up the saved state.
667 */
668 mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */
669 mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */
670 xchg %rcx,%rsp /* switch to pcb stack */
671 push $(USER_DS) /* ss */
672 push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */
673 push ISF64_RFLAGS(%rcx) /* rflags */
674 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
675 mov ISF64_ERR(%rcx),%rcx /* restore %rcx */
676 jmp L_sysenter_continue /* continue sysenter entry */
677
678
679 Entry(idt64_double_fault)
680 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
681 pushq $(T_DOUBLE_FAULT)
682
683 push %rax
684 leaq EXT(idt64_syscall)(%rip), %rax
685 cmp %rax, ISF64_RIP+8(%rsp)
686 pop %rax
687 jne L_64bit_dispatch
688
689 mov ISF64_RSP(%rsp), %rsp
690 jmp L_syscall_continue
691
692
693 /*
694 * General protection or segment-not-present fault.
695 * Check for a GP/NP fault in the kernel_return
696 * sequence; if there, report it as a GP/NP fault on the user's instruction.
697 *
698 * rsp-> 0 ISF64_TRAPNO: trap code (NP or GP)
699 * 8 ISF64_TRAPFN: trap function
700 * 16 ISF64_ERR: segment number in error (error code)
701 * 24 ISF64_RIP: rip
702 * 32 ISF64_CS: cs
703 * 40 ISF64_RFLAGS: rflags
704 * 48 ISF64_RIP: rsp
705 * 56 ISF64_SS: ss
706 * 64: old registers (trap is from kernel)
707 */
708 Entry(idt64_gen_prot)
709 PUSH_FUNCTION(HNDL_ALLTRAPS)
710 pushq $(T_GENERAL_PROTECTION)
711 jmp trap_check_kernel_exit /* check for kernel exit sequence */
712
713 Entry(idt64_stack_fault)
714 PUSH_FUNCTION(HNDL_ALLTRAPS)
715 pushq $(T_STACK_FAULT)
716 jmp trap_check_kernel_exit /* check for kernel exit sequence */
717
718 Entry(idt64_segnp)
719 PUSH_FUNCTION(HNDL_ALLTRAPS)
720 pushq $(T_SEGMENT_NOT_PRESENT)
721 /* indicate fault type */
722 trap_check_kernel_exit:
723 testb $3,ISF64_CS(%rsp)
724 jnz L_dispatch
725 /*
726 * trap was from kernel mode,
727 * so check for the kernel exit sequence
728 */
729 push %rax
730
731 leaq EXT(ret32_iret)(%rip), %rax
732 cmp %rax, 8+ISF64_RIP(%rsp)
733 je L_fault_iret
734 leaq EXT(ret64_iret)(%rip), %rax
735 cmp %rax, 8+ISF64_RIP(%rsp)
736 je L_fault_iret
737 leaq EXT(ret32_set_ds)(%rip), %rax
738 cmp %rax, 8+ISF64_RIP(%rsp)
739 je L_32bit_fault_set_seg
740 leaq EXT(ret32_set_es)(%rip), %rax
741 cmp %rax, 8+ISF64_RIP(%rsp)
742 je L_32bit_fault_set_seg
743 leaq EXT(ret32_set_fs)(%rip), %rax
744 cmp %rax, 8+ISF64_RIP(%rsp)
745 je L_32bit_fault_set_seg
746 leaq EXT(ret32_set_gs)(%rip), %rax
747 cmp %rax, 8+ISF64_RIP(%rsp)
748 je L_32bit_fault_set_seg
749
750 leaq EXT(idt64_unix_scall_copy_args)(%rip), %rax
751 cmp %rax, 8+ISF64_RIP(%rsp)
752 cmove 8+ISF64_RSP(%rsp), %rsp
753 je L_copy_args_continue
754
755 /* fall through */
756
757 L_kernel_trap:
758 /*
759 * Here after taking an unexpected trap from kernel mode - perhaps
760 * while running in the trampolines hereabouts.
761 * Note: %rax has been pushed on stack.
762 * Make sure we're not on the PCB stack, if so move to the kernel stack.
763 * This is likely a fatal condition.
764 * But first, try to ensure we have the kernel gs base active...
765 */
766 movq %gs:CPU_THIS, %rax /* get gs_base into %rax */
767 test %rax, %rax /* test sign bit (MSB) */
768 js 1f /* -ve kernel addr, no swap */
769 swapgs /* +ve user addr, swap */
770 1:
771 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
772 subq %rsp, %rax
773 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
774 jb 2f /* - yes, deal with it */
775 pop %rax /* - no, restore %rax */
776 jmp L_64bit_dispatch
777 2:
778 /*
779 * Here if %rsp is in the PCB
780 * Copy the interrupt stack frame from PCB stack to kernel stack
781 */
782 movq %gs:CPU_KERNEL_STACK, %rax
783 xchgq %rax, %rsp
784 pushq 8+ISF64_SS(%rax)
785 pushq 8+ISF64_RSP(%rax)
786 pushq 8+ISF64_RFLAGS(%rax)
787 pushq 8+ISF64_CS(%rax)
788 pushq 8+ISF64_RIP(%rax)
789 pushq 8+ISF64_ERR(%rax)
790 pushq 8+ISF64_TRAPFN(%rax)
791 pushq 8+ISF64_TRAPNO(%rax)
792 movq (%rax), %rax
793 jmp L_64bit_dispatch
794
795 /*
796 * GP/NP fault on IRET: CS or SS is in error.
797 * Note that the user ss is originally 16-byte aligned, we'd popped the
798 * stack back to contain just the rip/cs/rflags/rsp/ss before issuing the iret.
799 * On taking the GP/NP fault on the iret instruction, the stack is 16-byte
800 * aligned before pushed the interrupt frame. Hence, an 8-byte padding exists.
801 *
802 * on SP is
803 * (- rax saved above, which is immediately popped)
804 * 0 ISF64_TRAPNO: trap code (NP or GP)
805 * 8 ISF64_TRAPFN: trap function
806 * 16 ISF64_ERR: segment number in error (error code)
807 * 24 ISF64_RIP: rip
808 * 32 ISF64_CS: cs
809 * 40 ISF64_RFLAGS: rflags
810 * 48 ISF64_RSP: rsp --> new trapno
811 * 56 ISF64_SS: ss --> new trapfn
812 * 64 pad --> new errcode
813 * 72 user rip
814 * 80 user cs
815 * 88 user rflags
816 * 96 user rsp
817 * 104 user ss (16-byte aligned)
818 */
819 L_fault_iret:
820 pop %rax /* recover saved %rax */
821 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
822 mov ISF64_TRAPNO(%rsp), %rax
823 mov %rax, ISF64_TRAPNO(%rsp)/* put in user trap number */
824 mov ISF64_TRAPFN(%rsp), %rax
825 mov %rax, ISF64_SS(%rsp) /* put in user trap function */
826 mov ISF64_ERR(%rsp), %rax /* get error code */
827 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
828 mov ISF64_RIP(%rsp), %rax /* restore rax */
829 add $(ISF64_RSP),%rsp /* reset to new trapfn */
830 /* now treat as fault from user */
831 jmp L_dispatch
832
833 /*
834 * Fault restoring a segment register. All of the saved state is still
835 * on the stack untouched since we haven't yet moved the stack pointer.
836 */
837 L_32bit_fault_set_seg:
838 swapgs
839 pop %rax /* toss saved %rax from stack */
840 mov ISF64_TRAPNO(%rsp), %rax
841 mov ISF64_TRAPFN(%rsp), %rcx
842 mov ISF64_ERR(%rsp), %rdx
843 mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */
844 mov %rax,ISC32_TRAPNO(%rsp)
845 mov %rcx,ISC32_TRAPFN(%rsp)
846 mov %rdx,ISC32_ERR(%rsp)
847 /* now treat as fault from user */
848 /* except that all the state is */
849 /* already saved - we just have to */
850 /* move the trapno and error into */
851 /* the compatibility frame */
852 jmp L_32bit_dispatch_after_fault
853
854
855 /*
856 * Fatal exception handlers:
857 */
858 Entry(idt64_db_task_dbl_fault)
859 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
860 pushq $(T_DOUBLE_FAULT)
861 jmp L_dispatch
862
863 Entry(idt64_db_task_stk_fault)
864 PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
865 pushq $(T_STACK_FAULT)
866 jmp L_dispatch
867
868 Entry(idt64_mc)
869 push $(0) /* Error */
870 PUSH_FUNCTION(HNDL_MACHINE_CHECK)
871 pushq $(T_MACHINE_CHECK)
872 jmp L_dispatch
873
874
875 /* All 'exceptions' enter hndl_alltraps:
876 * rsp -> x86_saved_state_t
877 * esi cs at trap
878 *
879 * The rest of the state is set up as:
880 * interrupts disabled
881 * direction flag cleared
882 */
883 Entry(hndl_alltraps)
884 mov %esi, %eax
885 testb $3, %al
886 jz trap_from_kernel
887
888 TIME_TRAP_UENTRY
889
890 /* Check for active vtimers in the current task */
891 mov %gs:CPU_ACTIVE_THREAD, %rcx
892 mov TH_TASK(%rcx), %rbx
893 TASK_VTIMER_CHECK(%rbx, %rcx)
894
895 movq %rsp, %rdi /* also pass it as arg0 */
896 movq %gs:CPU_KERNEL_STACK,%rsp /* switch to kernel stack */
897
898 CCALL(user_trap) /* call user trap routine */
899 /* user_trap() unmasks interrupts */
900 cli /* hold off intrs - critical section */
901 xorl %ecx, %ecx /* don't check if we're in the PFZ */
902
903 #define CLI cli
904 #define STI sti
905
906 Entry(return_from_trap)
907 movq %gs:CPU_ACTIVE_THREAD,%rsp
908 movq TH_PCB_ISS(%rsp), %rsp /* switch back to PCB stack */
909 movl %gs:CPU_PENDING_AST,%eax
910 testl %eax,%eax
911 je EXT(return_to_user) /* branch if no AST */
912
913 L_return_from_trap_with_ast:
914 movq %rsp, %r13
915 movq %gs:CPU_KERNEL_STACK, %rsp
916
917 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
918 je 2f /* no, go handle the AST */
919 cmpl $(SS_64), SS_FLAVOR(%r13) /* are we a 64-bit task? */
920 je 1f
921 /* no... 32-bit user mode */
922 movl R32_EIP(%r13), %edi
923 xorq %rbp, %rbp /* clear framepointer */
924 CCALL(commpage_is_in_pfz32)
925 testl %eax, %eax
926 je 2f /* not in the PFZ... go service AST */
927 movl %eax, R32_EBX(%r13) /* let the PFZ know we've pended an AST */
928 movq %r13, %rsp /* switch back to PCB stack */
929 jmp EXT(return_to_user)
930 1:
931 movq R64_RIP(%r13), %rdi
932 xorq %rbp, %rbp /* clear framepointer */
933 CCALL(commpage_is_in_pfz64)
934 testl %eax, %eax
935 je 2f /* not in the PFZ... go service AST */
936 movl %eax, R64_RBX(%r13) /* let the PFZ know we've pended an AST */
937 movq %r13, %rsp /* switch back to PCB stack */
938 jmp EXT(return_to_user)
939 2:
940 STI /* interrupts always enabled on return to user mode */
941
942 xor %edi, %edi /* zero %rdi */
943 xorq %rbp, %rbp /* clear framepointer */
944 CCALL(i386_astintr) /* take the AST */
945
946 CLI
947 xorl %ecx, %ecx /* don't check if we're in the PFZ */
948 jmp EXT(return_from_trap) /* and check again (rare) */
949
950 /*
951 * Trap from kernel mode. No need to switch stacks.
952 * Interrupts must be off here - we will set them to state at time of trap
953 * as soon as it's safe for us to do so and not recurse doing preemption
954 */
955 hndl_kerntrap:
956 trap_from_kernel:
957
958 movq %rsp, %rdi /* saved state addr */
959 pushq R64_RIP(%rsp) /* Simulate a CALL from fault point */
960 pushq %rbp /* Extend framepointer chain */
961 movq %rsp, %rbp
962 CCALLWITHSP(kernel_trap) /* to kernel trap routine */
963 popq %rbp
964 addq $8, %rsp
965 cli
966
967 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
968 testl $(AST_URGENT),%eax /* any urgent preemption? */
969 je ret_to_kernel /* no, nothing to do */
970 cmpl $(T_PREEMPT),R64_TRAPNO(%rsp)
971 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
972 testl $(EFL_IF),R64_RFLAGS(%rsp) /* interrupts disabled? */
973 je ret_to_kernel
974 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
975 jne ret_to_kernel
976 movq %gs:CPU_KERNEL_STACK,%rax
977 movq %rsp,%rcx
978 xorq %rax,%rcx
979 andq EXT(kernel_stack_mask)(%rip),%rcx
980 testq %rcx,%rcx /* are we on the kernel stack? */
981 jne ret_to_kernel /* no, skip it */
982
983 CCALL1(i386_astintr, $1) /* take the AST */
984 jmp ret_to_kernel
985
986
987 /*
988 * All interrupts on all tasks enter here with:
989 * rsp-> x86_saved_state_t
990 * esi cs at trap
991 *
992 * interrupts disabled
993 * direction flag cleared
994 */
995 Entry(hndl_allintrs)
996 /*
997 * test whether already on interrupt stack
998 */
999 movq %gs:CPU_INT_STACK_TOP,%rcx
1000 cmpq %rsp,%rcx
1001 jb 1f
1002 leaq -INTSTACK_SIZE(%rcx),%rdx
1003 cmpq %rsp,%rdx
1004 jb int_from_intstack
1005 1:
1006 xchgq %rcx,%rsp /* switch to interrupt stack */
1007
1008 mov %cr0,%rax /* get cr0 */
1009 orl $(CR0_TS),%eax /* or in TS bit */
1010 mov %rax,%cr0 /* set cr0 */
1011
1012 subq $8, %rsp /* for 16-byte stack alignment */
1013 pushq %rcx /* save pointer to old stack */
1014 movq %rcx,%gs:CPU_INT_STATE /* save intr state */
1015
1016 TIME_INT_ENTRY /* do timing */
1017
1018 /* Check for active vtimers in the current task */
1019 mov %gs:CPU_ACTIVE_THREAD, %rcx
1020 mov TH_TASK(%rcx), %rbx
1021 TASK_VTIMER_CHECK(%rbx, %rcx)
1022
1023 incl %gs:CPU_PREEMPTION_LEVEL
1024 incl %gs:CPU_INTERRUPT_LEVEL
1025
1026 movq %gs:CPU_INT_STATE, %rdi
1027
1028 CCALL(interrupt) /* call generic interrupt routine */
1029
1030 cli /* just in case we returned with intrs enabled */
1031 xor %rax,%rax
1032 movq %rax,%gs:CPU_INT_STATE /* clear intr state pointer */
1033
1034 .globl EXT(return_to_iret)
1035 LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
1036
1037 decl %gs:CPU_INTERRUPT_LEVEL
1038 decl %gs:CPU_PREEMPTION_LEVEL
1039
1040 TIME_INT_EXIT /* do timing */
1041
1042 movq %gs:CPU_ACTIVE_THREAD,%rax
1043 movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */
1044 cmpq $0,%rax /* Is there a context */
1045 je 1f /* Branch if not */
1046 movl FP_VALID(%rax),%eax /* Load fp_valid */
1047 cmpl $0,%eax /* Check if valid */
1048 jne 1f /* Branch if valid */
1049 clts /* Clear TS */
1050 jmp 2f
1051 1:
1052 mov %cr0,%rax /* get cr0 */
1053 orl $(CR0_TS),%eax /* or in TS bit */
1054 mov %rax,%cr0 /* set cr0 */
1055 2:
1056 popq %rsp /* switch back to old stack */
1057
1058 /* Load interrupted code segment into %eax */
1059 movl R32_CS(%rsp),%eax /* assume 32-bit state */
1060 cmpl $(SS_64),SS_FLAVOR(%rsp)/* 64-bit? */
1061 #if DEBUG_IDT64
1062 jne 4f
1063 movl R64_CS(%rsp),%eax /* 64-bit user mode */
1064 jmp 3f
1065 4:
1066 cmpl $(SS_32),SS_FLAVOR(%rsp)
1067 je 3f
1068 POSTCODE2(0x6431)
1069 CCALL1(panic_idt64, %rsp)
1070 hlt
1071 #else
1072 jne 3f
1073 movl R64_CS(%rsp),%eax /* 64-bit user mode */
1074 #endif
1075 3:
1076 testb $3,%al /* user mode, */
1077 jnz ast_from_interrupt_user /* go handle potential ASTs */
1078 /*
1079 * we only want to handle preemption requests if
1080 * the interrupt fell in the kernel context
1081 * and preemption isn't disabled
1082 */
1083 movl %gs:CPU_PENDING_AST,%eax
1084 testl $(AST_URGENT),%eax /* any urgent requests? */
1085 je ret_to_kernel /* no, nothing to do */
1086
1087 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1088 jne ret_to_kernel /* yes, skip it */
1089
1090 movq %gs:CPU_KERNEL_STACK,%rax
1091 movq %rsp,%rcx
1092 xorq %rax,%rcx
1093 andq EXT(kernel_stack_mask)(%rip),%rcx
1094 testq %rcx,%rcx /* are we on the kernel stack? */
1095 jne ret_to_kernel /* no, skip it */
1096
1097 /*
1098 * Take an AST from kernel space. We don't need (and don't want)
1099 * to do as much as the case where the interrupt came from user
1100 * space.
1101 */
1102 CCALL1(i386_astintr, $1)
1103
1104 jmp ret_to_kernel
1105
1106
1107 /*
1108 * nested int - simple path, can't preempt etc on way out
1109 */
1110 int_from_intstack:
1111 incl %gs:CPU_PREEMPTION_LEVEL
1112 incl %gs:CPU_INTERRUPT_LEVEL
1113 incl %gs:CPU_NESTED_ISTACK
1114 mov %rsp, %rdi /* x86_saved_state */
1115 CCALL(interrupt)
1116
1117 decl %gs:CPU_INTERRUPT_LEVEL
1118 decl %gs:CPU_PREEMPTION_LEVEL
1119 decl %gs:CPU_NESTED_ISTACK
1120 #if DEBUG_IDT64
1121 CCALL1(panic_idt64, %rsp)
1122 POSTCODE2(0x6411)
1123 hlt
1124 #endif
1125 jmp ret_to_kernel
1126
1127 /*
1128 * Take an AST from an interrupted user
1129 */
1130 ast_from_interrupt_user:
1131 movl %gs:CPU_PENDING_AST,%eax
1132 testl %eax,%eax /* pending ASTs? */
1133 je EXT(ret_to_user) /* no, nothing to do */
1134
1135 TIME_TRAP_UENTRY
1136
1137 movl $1, %ecx /* check if we're in the PFZ */
1138 jmp L_return_from_trap_with_ast /* return */
1139
1140
1141 /* Syscall dispatch routines! */
1142
1143 /*
1144 *
1145 * 32bit Tasks
1146 * System call entries via INTR_GATE or sysenter:
1147 *
1148 * rsp -> x86_saved_state32_t
1149 * interrupts disabled
1150 * direction flag cleared
1151 */
1152
1153 Entry(hndl_sysenter)
1154 /*
1155 * We can be here either for a mach syscall or a unix syscall,
1156 * as indicated by the sign of the code:
1157 */
1158 movl R32_EAX(%rsp),%eax
1159 testl %eax,%eax
1160 js EXT(hndl_mach_scall) /* < 0 => mach */
1161 /* > 0 => unix */
1162
1163 Entry(hndl_unix_scall)
1164 /* If the caller (typically LibSystem) has recorded the cumulative size of
1165 * the arguments in EAX, copy them over from the user stack directly.
1166 * We recover from exceptions inline--if the copy loop doesn't complete
1167 * due to an exception, we fall back to copyin from compatibility mode.
1168 * We can potentially extend this mechanism to mach traps as well (DRK).
1169 */
1170 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
1171 jz L_copy_args_continue
1172 movl %eax, %ecx
1173 mov %gs:CPU_UBER_ARG_STORE_VALID, %rbx
1174 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %ecx
1175 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %ecx
1176 mov %gs:CPU_UBER_ARG_STORE, %rdi
1177 mov ISC32_RSP(%rsp), %rsi
1178 add $4, %rsi
1179 movl $0, (%rbx)
1180
1181 EXT(idt64_unix_scall_copy_args):
1182 rep movsl
1183 movl $1, (%rbx)
1184 L_copy_args_continue:
1185
1186 TIME_TRAP_UENTRY
1187
1188 movq %gs:CPU_KERNEL_STACK,%rdi
1189 xchgq %rdi,%rsp /* switch to kernel stack */
1190 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
1191 movq TH_TASK(%rcx),%rbx /* point to current task */
1192 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
1193
1194 /* Check for active vtimers in the current task */
1195 TASK_VTIMER_CHECK(%rbx,%rcx)
1196
1197 sti
1198
1199 CCALL(unix_syscall)
1200 /*
1201 * always returns through thread_exception_return
1202 */
1203
1204
1205 Entry(hndl_mach_scall)
1206 TIME_TRAP_UENTRY
1207
1208 movq %gs:CPU_KERNEL_STACK,%rdi
1209 xchgq %rdi,%rsp /* switch to kernel stack */
1210 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
1211 movq TH_TASK(%rcx),%rbx /* point to current task */
1212 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
1213
1214 /* Check for active vtimers in the current task */
1215 TASK_VTIMER_CHECK(%rbx,%rcx)
1216
1217 sti
1218
1219 CCALL(mach_call_munger)
1220 /*
1221 * always returns through thread_exception_return
1222 */
1223
1224
1225 Entry(hndl_mdep_scall)
1226 TIME_TRAP_UENTRY
1227
1228 movq %gs:CPU_KERNEL_STACK,%rdi
1229 xchgq %rdi,%rsp /* switch to kernel stack */
1230
1231 /* Check for active vtimers in the current task */
1232 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
1233 movq TH_TASK(%rcx),%rbx /* point to current task */
1234 TASK_VTIMER_CHECK(%rbx,%rcx)
1235
1236 sti
1237
1238 CCALL(machdep_syscall)
1239 /*
1240 * always returns through thread_exception_return
1241 */
1242
1243
1244 Entry(hndl_diag_scall)
1245 TIME_TRAP_UENTRY
1246
1247 movq %gs:CPU_KERNEL_STACK,%rdi
1248 xchgq %rdi,%rsp /* switch to kernel stack */
1249
1250 /* Check for active vtimers in the current task */
1251 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
1252 movq TH_TASK(%rcx),%rbx /* point to current task */
1253 TASK_VTIMER_CHECK(%rbx,%rcx)
1254
1255 pushq %rdi /* push pcb stack */
1256
1257 CCALL(diagCall) // Call diagnostics
1258
1259 cli // Disable interruptions just in case
1260 cmpl $0,%eax // What kind of return is this?
1261 je 1f // - branch if bad (zero)
1262 popq %rsp // Get back the pcb stack
1263 jmp EXT(return_to_user) // Normal return, do not check asts...
1264 1:
1265 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
1266 // pass what would be the diag syscall
1267 // error return - cause an exception
1268 /* no return */
1269
1270
1271 /*
1272 * 64bit Tasks
1273 * System call entries via syscall only:
1274 *
1275 * rsp -> x86_saved_state64_t
1276 * interrupts disabled
1277 * direction flag cleared
1278 */
1279
1280 Entry(hndl_syscall)
1281 TIME_TRAP_UENTRY
1282
1283 movq %gs:CPU_KERNEL_STACK,%rdi
1284 xchgq %rdi,%rsp /* switch to kernel stack */
1285 movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */
1286 movq TH_TASK(%rcx),%rbx /* point to current task */
1287
1288 /* Check for active vtimers in the current task */
1289 TASK_VTIMER_CHECK(%rbx,%rcx)
1290
1291 /*
1292 * We can be here either for a mach, unix machdep or diag syscall,
1293 * as indicated by the syscall class:
1294 */
1295 movl R64_RAX(%rdi), %eax /* syscall number/class */
1296 movl %eax, %edx
1297 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1298 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1299 je EXT(hndl_mach_scall64)
1300 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1301 je EXT(hndl_unix_scall64)
1302 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1303 je EXT(hndl_mdep_scall64)
1304 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1305 je EXT(hndl_diag_scall64)
1306
1307 /* Syscall class unknown */
1308 CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1)
1309 /* no return */
1310
1311
1312 Entry(hndl_unix_scall64)
1313 incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */
1314 sti
1315
1316 CCALL(unix_syscall64)
1317 /*
1318 * always returns through thread_exception_return
1319 */
1320
1321
1322 Entry(hndl_mach_scall64)
1323 incl TH_SYSCALLS_MACH(%rcx) /* increment call count */
1324 sti
1325
1326 CCALL(mach_call_munger64)
1327 /*
1328 * always returns through thread_exception_return
1329 */
1330
1331
1332
1333 Entry(hndl_mdep_scall64)
1334 sti
1335
1336 CCALL(machdep_syscall64)
1337 /*
1338 * always returns through thread_exception_return
1339 */
1340
1341
1342 Entry(hndl_diag_scall64)
1343 pushq %rdi // Push the previous stack
1344 CCALL(diagCall64) // Call diagnostics
1345 cli // Disable interruptions just in case
1346 cmpl $0,%eax // What kind of return is this?
1347 je 1f // - branch if bad (zero)
1348 popq %rsp // Get back the pcb stack
1349 jmp EXT(return_to_user) // Normal return, do not check asts...
1350 1:
1351 CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
1352 /* no return */
1353
1354 Entry(hndl_machine_check)
1355 CCALL1(panic_machine_check64, %rsp)
1356 hlt
1357
1358 Entry(hndl_double_fault)
1359 CCALL1(panic_double_fault64, %rsp)
1360 hlt