]>
Commit | Line | Data |
---|---|---|
b0d623f7 | 1 | /* |
6d2010ae | 2 | * Copyright (c) 2010 Apple Inc. All rights reserved. |
b0d623f7 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <i386/asm.h> | |
29 | #include <assym.s> | |
39236c6e | 30 | #include <debug.h> |
b0d623f7 | 31 | #include <i386/eflags.h> |
6d2010ae | 32 | #include <i386/rtclock_asm.h> |
b0d623f7 A |
33 | #include <i386/trap.h> |
34 | #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */ | |
35 | #include <mach/i386/syscall_sw.h> | |
36 | #include <i386/postcode.h> | |
37 | #include <i386/proc_reg.h> | |
38 | #include <mach/exception_types.h> | |
39 | ||
40 | #if DEBUG | |
41 | #define DEBUG_IDT64 1 | |
42 | #endif | |
43 | ||
44 | /* | |
45 | * This is the low-level trap and interrupt handling code associated with | |
46 | * the IDT. It also includes system call handlers for sysenter/syscall. | |
47 | * The IDT itself is defined in mp_desc.c. | |
48 | * | |
49 | * Code here is structured as follows: | |
50 | * | |
51 | * stubs Code called directly from an IDT vector. | |
52 | * All entry points have the "idt64_" prefix and they are built | |
53 | * using macros expanded by the inclusion of idt_table.h. | |
54 | * This code performs vector-dependent identification and jumps | |
55 | * into the dispatch code. | |
56 | * | |
57 | * dispatch The dispatch code is responsible for saving the thread state | |
58 | * (which is either 64-bit or 32-bit) and then jumping to the | |
59 | * class handler identified by the stub. | |
60 | * | |
61 | * returns Code to restore state and return to the previous context. | |
62 | * | |
63 | * handlers There are several classes of handlers: | |
64 | * interrupt - asynchronous events typically from external devices | |
65 | * trap - synchronous events due to thread execution | |
66 | * syscall - synchronous system call request | |
67 | * fatal - fatal traps | |
68 | */ | |
69 | ||
70 | /* | |
71 | * Handlers: | |
72 | */ | |
73 | #define HNDL_ALLINTRS EXT(hndl_allintrs) | |
74 | #define HNDL_ALLTRAPS EXT(hndl_alltraps) | |
75 | #define HNDL_SYSENTER EXT(hndl_sysenter) | |
76 | #define HNDL_SYSCALL EXT(hndl_syscall) | |
77 | #define HNDL_UNIX_SCALL EXT(hndl_unix_scall) | |
78 | #define HNDL_MACH_SCALL EXT(hndl_mach_scall) | |
79 | #define HNDL_MDEP_SCALL EXT(hndl_mdep_scall) | |
b0d623f7 A |
80 | #define HNDL_DOUBLE_FAULT EXT(hndl_double_fault) |
81 | #define HNDL_MACHINE_CHECK EXT(hndl_machine_check) | |
82 | ||
b0d623f7 A |
83 | |
84 | #if 1 | |
85 | #define PUSH_FUNCTION(func) \ | |
86 | sub $8, %rsp ;\ | |
87 | push %rax ;\ | |
88 | leaq func(%rip), %rax ;\ | |
89 | movq %rax, 8(%rsp) ;\ | |
90 | pop %rax | |
91 | #else | |
92 | #define PUSH_FUNCTION(func) pushq func | |
93 | #endif | |
94 | ||
95 | /* The wrapper for all non-special traps/interrupts */ | |
96 | /* Everything up to PUSH_FUNCTION is just to output | |
97 | * the interrupt number out to the postcode display | |
98 | */ | |
99 | #if DEBUG_IDT64 | |
100 | #define IDT_ENTRY_WRAPPER(n, f) \ | |
101 | push %rax ;\ | |
102 | POSTCODE2(0x6400+n) ;\ | |
103 | pop %rax ;\ | |
104 | PUSH_FUNCTION(f) ;\ | |
105 | pushq $(n) ;\ | |
106 | jmp L_dispatch | |
107 | #else | |
108 | #define IDT_ENTRY_WRAPPER(n, f) \ | |
109 | PUSH_FUNCTION(f) ;\ | |
110 | pushq $(n) ;\ | |
111 | jmp L_dispatch | |
112 | #endif | |
113 | ||
114 | /* A trap that comes with an error code already on the stack */ | |
115 | #define TRAP_ERR(n, f) \ | |
116 | Entry(f) ;\ | |
117 | IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS) | |
118 | ||
119 | /* A normal trap */ | |
120 | #define TRAP(n, f) \ | |
121 | Entry(f) ;\ | |
122 | pushq $0 ;\ | |
123 | IDT_ENTRY_WRAPPER(n, HNDL_ALLTRAPS) | |
124 | ||
125 | #define USER_TRAP TRAP | |
126 | ||
127 | /* An interrupt */ | |
128 | #define INTERRUPT(n) \ | |
129 | Entry(_intr_ ## n) ;\ | |
130 | pushq $0 ;\ | |
131 | IDT_ENTRY_WRAPPER(n, HNDL_ALLINTRS) | |
132 | ||
133 | /* A trap with a special-case handler, hence we don't need to define anything */ | |
134 | #define TRAP_SPC(n, f) | |
39236c6e A |
135 | #define TRAP_IST1(n, f) |
136 | #define TRAP_IST2(n, f) | |
b0d623f7 A |
137 | #define USER_TRAP_SPC(n, f) |
138 | ||
139 | /* Generate all the stubs */ | |
140 | #include "idt_table.h" | |
141 | ||
142 | /* | |
143 | * Common dispatch point. | |
144 | * Determine what mode has been interrupted and save state accordingly. | |
39236c6e A |
145 | * Here with: |
146 | * rsp from user-space: interrupt state in PCB, or | |
147 | * from kernel-space: interrupt state in kernel or interrupt stack | |
148 | * GSBASE from user-space: pthread area, or | |
149 | * from kernel-space: cpu_data | |
b0d623f7 A |
150 | */ |
151 | L_dispatch: | |
6d2010ae | 152 | cmpl $(KERNEL64_CS), ISF64_CS(%rsp) |
39236c6e | 153 | je L_dispatch_kernel |
b0d623f7 A |
154 | |
155 | swapgs | |
156 | ||
39236c6e | 157 | L_dispatch_user: |
060df5ea | 158 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP |
39236c6e A |
159 | je L_dispatch_U32 /* 32-bit user task */ |
160 | ||
161 | L_dispatch_U64: | |
162 | subq $(ISS64_OFFSET), %rsp | |
163 | mov %r15, R64_R15(%rsp) | |
164 | mov %rsp, %r15 | |
165 | mov %gs:CPU_KERNEL_STACK, %rsp | |
166 | jmp L_dispatch_64bit | |
167 | ||
168 | L_dispatch_kernel: | |
169 | subq $(ISS64_OFFSET), %rsp | |
170 | mov %r15, R64_R15(%rsp) | |
171 | mov %rsp, %r15 | |
b0d623f7 A |
172 | |
173 | /* | |
174 | * Here for 64-bit user task or kernel | |
175 | */ | |
39236c6e A |
176 | L_dispatch_64bit: |
177 | movl $(SS_64), SS_FLAVOR(%r15) | |
b0d623f7 A |
178 | |
179 | /* | |
180 | * Save segment regs - for completeness since theyre not used. | |
181 | */ | |
39236c6e A |
182 | movl %fs, R64_FS(%r15) |
183 | movl %gs, R64_GS(%r15) | |
b0d623f7 A |
184 | |
185 | /* Save general-purpose registers */ | |
39236c6e A |
186 | mov %rax, R64_RAX(%r15) |
187 | mov %rbx, R64_RBX(%r15) | |
188 | mov %rcx, R64_RCX(%r15) | |
189 | mov %rdx, R64_RDX(%r15) | |
190 | mov %rbp, R64_RBP(%r15) | |
191 | mov %rdi, R64_RDI(%r15) | |
192 | mov %rsi, R64_RSI(%r15) | |
193 | mov %r8, R64_R8(%r15) | |
194 | mov %r9, R64_R9(%r15) | |
195 | mov %r10, R64_R10(%r15) | |
196 | mov %r11, R64_R11(%r15) | |
197 | mov %r12, R64_R12(%r15) | |
198 | mov %r13, R64_R13(%r15) | |
199 | mov %r14, R64_R14(%r15) | |
b0d623f7 A |
200 | |
201 | /* cr2 is significant only for page-faults */ | |
202 | mov %cr2, %rax | |
39236c6e | 203 | mov %rax, R64_CR2(%r15) |
b0d623f7 | 204 | |
39236c6e A |
205 | mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */ |
206 | mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */ | |
207 | mov R64_CS(%r15), %esi /* %esi := cs for later */ | |
b0d623f7 | 208 | |
39236c6e | 209 | jmp L_common_dispatch |
b0d623f7 A |
210 | |
211 | L_64bit_entry_reject: | |
212 | /* | |
213 | * Here for a 64-bit user attempting an invalid kernel entry. | |
214 | */ | |
215 | pushq %rax | |
216 | leaq HNDL_ALLTRAPS(%rip), %rax | |
217 | movq %rax, ISF64_TRAPFN+8(%rsp) | |
218 | popq %rax | |
219 | movq $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp) | |
39236c6e | 220 | jmp L_dispatch_U64 |
b0d623f7 A |
221 | |
222 | L_32bit_entry_check: | |
223 | /* | |
224 | * Check we're not a confused 64-bit user. | |
225 | */ | |
226 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP | |
227 | jne L_64bit_entry_reject | |
228 | /* fall through to 32-bit handler: */ | |
229 | ||
39236c6e A |
230 | L_dispatch_U32: /* 32-bit user task */ |
231 | subq $(ISS64_OFFSET), %rsp | |
232 | mov %rsp, %r15 | |
233 | mov %gs:CPU_KERNEL_STACK, %rsp | |
234 | movl $(SS_32), SS_FLAVOR(%r15) | |
b0d623f7 A |
235 | |
236 | /* | |
237 | * Save segment regs | |
238 | */ | |
39236c6e A |
239 | movl %ds, R32_DS(%r15) |
240 | movl %es, R32_ES(%r15) | |
241 | movl %fs, R32_FS(%r15) | |
242 | movl %gs, R32_GS(%r15) | |
b0d623f7 A |
243 | |
244 | /* | |
245 | * Save general 32-bit registers | |
246 | */ | |
39236c6e A |
247 | mov %eax, R32_EAX(%r15) |
248 | mov %ebx, R32_EBX(%r15) | |
249 | mov %ecx, R32_ECX(%r15) | |
250 | mov %edx, R32_EDX(%r15) | |
251 | mov %ebp, R32_EBP(%r15) | |
252 | mov %esi, R32_ESI(%r15) | |
253 | mov %edi, R32_EDI(%r15) | |
b0d623f7 A |
254 | |
255 | /* Unconditionally save cr2; only meaningful on page faults */ | |
256 | mov %cr2, %rax | |
39236c6e | 257 | mov %eax, R32_CR2(%r15) |
b0d623f7 A |
258 | |
259 | /* | |
260 | * Copy registers already saved in the machine state | |
261 | * (in the interrupt stack frame) into the compat save area. | |
262 | */ | |
39236c6e A |
263 | mov R64_RIP(%r15), %eax |
264 | mov %eax, R32_EIP(%r15) | |
265 | mov R64_RFLAGS(%r15), %eax | |
266 | mov %eax, R32_EFLAGS(%r15) | |
267 | mov R64_RSP(%r15), %eax | |
268 | mov %eax, R32_UESP(%r15) | |
269 | mov R64_SS(%r15), %eax | |
270 | mov %eax, R32_SS(%r15) | |
271 | L_dispatch_U32_after_fault: | |
272 | mov R64_CS(%r15), %esi /* %esi := %cs for later */ | |
273 | mov %esi, R32_CS(%r15) | |
274 | mov R64_TRAPNO(%r15), %ebx /* %ebx := trapno for later */ | |
275 | mov %ebx, R32_TRAPNO(%r15) | |
276 | mov R64_ERR(%r15), %eax | |
277 | mov %eax, R32_ERR(%r15) | |
278 | mov R64_TRAPFN(%r15), %rdx /* %rdx := trapfn for later */ | |
b0d623f7 A |
279 | |
280 | L_common_dispatch: | |
fe8ab488 A |
281 | cld /* Ensure the direction flag is clear in the kernel */ |
282 | cmpl $0, EXT(pmap_smap_enabled)(%rip) | |
283 | je 1f | |
284 | clac /* Clear EFLAGS.AC if SMAP is present/enabled */ | |
285 | 1: | |
b0d623f7 | 286 | /* |
39037602 | 287 | * On entering the kernel, we typically don't switch CR3 |
b0d623f7 | 288 | * because the kernel shares the user's address space. |
39037602 A |
289 | * But we mark the kernel's cr3 as "active" for TLB coherency evaluation |
290 | * If, however, the CPU's invalid TLB flag is set, we have to invalidate the TLB | |
291 | * since the kernel pagetables were changed while we were in userspace. | |
b0d623f7 | 292 | * |
39037602 A |
293 | * For threads with a mapped pagezero (some WINE games) on non-SMAP platforms, |
294 | * we switch to the kernel's address space on entry. Also, | |
295 | * if the global no_shared_cr3 is TRUE we do switch to the kernel's cr3 | |
b0d623f7 A |
296 | * so that illicit accesses to userspace can be trapped. |
297 | */ | |
298 | mov %gs:CPU_KERNEL_CR3, %rcx | |
299 | mov %rcx, %gs:CPU_ACTIVE_CR3 | |
300 | test $3, %esi /* user/kernel? */ | |
fe8ab488 | 301 | jz 2f /* skip cr3 reload from kernel */ |
b0d623f7 | 302 | xor %rbp, %rbp |
39037602 A |
303 | cmpl $0, %gs:CPU_PAGEZERO_MAPPED |
304 | jnz 11f | |
b0d623f7 | 305 | cmpl $0, EXT(no_shared_cr3)(%rip) |
fe8ab488 | 306 | je 2f |
39037602 A |
307 | 11: |
308 | xor %eax, %eax | |
309 | movw %gs:CPU_KERNEL_PCID, %ax | |
310 | or %rax, %rcx | |
b0d623f7 | 311 | mov %rcx, %cr3 /* load kernel cr3 */ |
fe8ab488 A |
312 | jmp 4f /* and skip tlb flush test */ |
313 | 2: | |
6d2010ae A |
314 | mov %gs:CPU_ACTIVE_CR3+4, %rcx |
315 | shr $32, %rcx | |
316 | testl %ecx, %ecx | |
fe8ab488 | 317 | jz 4f |
6d2010ae | 318 | testl $(1<<16), %ecx /* Global? */ |
fe8ab488 | 319 | jz 3f |
4bd07ac2 | 320 | movl $0, %gs:CPU_TLB_INVALID |
6d2010ae A |
321 | mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/ |
322 | and $(~CR4_PGE), %rcx | |
323 | mov %rcx, %cr4 | |
324 | or $(CR4_PGE), %rcx | |
325 | mov %rcx, %cr4 | |
fe8ab488 A |
326 | jmp 4f |
327 | 3: | |
4bd07ac2 | 328 | movb $0, %gs:CPU_TLB_INVALID_LOCAL |
fe8ab488 | 329 | mov %cr3, %rcx |
b0d623f7 | 330 | mov %rcx, %cr3 |
fe8ab488 | 331 | 4: |
b0d623f7 | 332 | mov %gs:CPU_ACTIVE_THREAD, %rcx /* Get the active thread */ |
fe8ab488 | 333 | movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap */ |
6d2010ae | 334 | cmpq $0, TH_PCB_IDS(%rcx) /* Is there a debug register state? */ |
fe8ab488 | 335 | je 5f |
316670eb | 336 | xor %ecx, %ecx /* If so, reset DR7 (the control) */ |
b0d623f7 | 337 | mov %rcx, %dr7 |
fe8ab488 | 338 | 5: |
6d2010ae | 339 | incl %gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count |
b0d623f7 | 340 | /* Dispatch the designated handler */ |
b0d623f7 A |
341 | jmp *%rdx |
342 | ||
343 | /* | |
344 | * Control is passed here to return to user. | |
345 | */ | |
346 | Entry(return_to_user) | |
347 | TIME_TRAP_UEXIT | |
348 | ||
349 | Entry(ret_to_user) | |
350 | // XXX 'Be nice to tidy up this debug register restore sequence... | |
351 | mov %gs:CPU_ACTIVE_THREAD, %rdx | |
6d2010ae | 352 | movq TH_PCB_IDS(%rdx),%rax /* Obtain this thread's debug state */ |
b0d623f7 | 353 | |
316670eb | 354 | test %rax, %rax /* Is there a debug register context? */ |
b0d623f7 A |
355 | je 2f /* branch if not */ |
356 | cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */ | |
357 | jne 1f | |
358 | movl DS_DR0(%rax), %ecx /* If so, load the 32 bit DRs */ | |
359 | movq %rcx, %dr0 | |
360 | movl DS_DR1(%rax), %ecx | |
361 | movq %rcx, %dr1 | |
362 | movl DS_DR2(%rax), %ecx | |
363 | movq %rcx, %dr2 | |
364 | movl DS_DR3(%rax), %ecx | |
365 | movq %rcx, %dr3 | |
366 | movl DS_DR7(%rax), %ecx | |
367 | movq %rcx, %gs:CPU_DR7 | |
368 | jmp 2f | |
369 | 1: | |
370 | mov DS64_DR0(%rax), %rcx /* Load the full width DRs*/ | |
371 | mov %rcx, %dr0 | |
372 | mov DS64_DR1(%rax), %rcx | |
373 | mov %rcx, %dr1 | |
374 | mov DS64_DR2(%rax), %rcx | |
375 | mov %rcx, %dr2 | |
376 | mov DS64_DR3(%rax), %rcx | |
377 | mov %rcx, %dr3 | |
378 | mov DS64_DR7(%rax), %rcx | |
379 | mov %rcx, %gs:CPU_DR7 | |
380 | 2: | |
381 | /* | |
39037602 | 382 | * On exiting the kernel there's typically no need to switch cr3 since we're |
b0d623f7 | 383 | * already running in the user's address space which includes the |
39037602 A |
384 | * kernel. We now mark the task's cr3 as active, for TLB coherency. |
385 | * If the target address space has a pagezero mapping present, or | |
386 | * if no_shared_cr3 is set, we do need to switch cr3 at this point. | |
b0d623f7 A |
387 | */ |
388 | mov %gs:CPU_TASK_CR3, %rcx | |
389 | mov %rcx, %gs:CPU_ACTIVE_CR3 | |
39037602 A |
390 | cmpl $0, %gs:CPU_PAGEZERO_MAPPED |
391 | jnz L_cr3_switch_island | |
6d2010ae A |
392 | movl EXT(no_shared_cr3)(%rip), %eax |
393 | test %eax, %eax /* -no_shared_cr3 */ | |
39037602 A |
394 | jnz L_cr3_switch_island |
395 | ||
396 | L_cr3_switch_return: | |
b0d623f7 A |
397 | mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/ |
398 | cmp $0, %rax | |
399 | je 4f | |
400 | mov %rax, %dr7 /* Set DR7 */ | |
401 | movq $0, %gs:CPU_DR7 | |
402 | 4: | |
39236c6e | 403 | cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */ |
b0d623f7 A |
404 | je L_64bit_return |
405 | ||
406 | L_32bit_return: | |
407 | #if DEBUG_IDT64 | |
39236c6e | 408 | cmpl $(SS_32), SS_FLAVOR(%r15) /* 32-bit state? */ |
b0d623f7 A |
409 | je 1f |
410 | cli | |
411 | POSTCODE2(0x6432) | |
fe8ab488 | 412 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
413 | 1: |
414 | #endif /* DEBUG_IDT64 */ | |
415 | ||
416 | /* | |
417 | * Restore registers into the machine state for iret. | |
39236c6e | 418 | * Here on fault stack and PCB address in R11. |
b0d623f7 | 419 | */ |
39236c6e A |
420 | movl R32_EIP(%r15), %eax |
421 | movl %eax, R64_RIP(%r15) | |
422 | movl R32_EFLAGS(%r15), %eax | |
423 | movl %eax, R64_RFLAGS(%r15) | |
424 | movl R32_CS(%r15), %eax | |
425 | movl %eax, R64_CS(%r15) | |
426 | movl R32_UESP(%r15), %eax | |
427 | movl %eax, R64_RSP(%r15) | |
428 | movl R32_SS(%r15), %eax | |
429 | movl %eax, R64_SS(%r15) | |
b0d623f7 A |
430 | |
431 | /* | |
432 | * Restore general 32-bit registers | |
433 | */ | |
39236c6e A |
434 | movl R32_EAX(%r15), %eax |
435 | movl R32_EBX(%r15), %ebx | |
436 | movl R32_ECX(%r15), %ecx | |
437 | movl R32_EDX(%r15), %edx | |
438 | movl R32_EBP(%r15), %ebp | |
439 | movl R32_ESI(%r15), %esi | |
440 | movl R32_EDI(%r15), %edi | |
b0d623f7 A |
441 | |
442 | /* | |
39236c6e A |
443 | * Restore segment registers. A segment exception taken here will |
444 | * push state on the IST1 stack and will not affect the "PCB stack". | |
b0d623f7 | 445 | */ |
39236c6e | 446 | mov %r15, %rsp /* Set the PCB as the stack */ |
b0d623f7 A |
447 | swapgs |
448 | EXT(ret32_set_ds): | |
316670eb | 449 | movl R32_DS(%rsp), %ds |
b0d623f7 | 450 | EXT(ret32_set_es): |
316670eb | 451 | movl R32_ES(%rsp), %es |
b0d623f7 | 452 | EXT(ret32_set_fs): |
316670eb | 453 | movl R32_FS(%rsp), %fs |
b0d623f7 | 454 | EXT(ret32_set_gs): |
316670eb | 455 | movl R32_GS(%rsp), %gs |
b0d623f7 A |
456 | |
457 | /* pop compat frame + trapno, trapfn and error */ | |
39236c6e | 458 | add $(ISS64_OFFSET)+8+8+8, %rsp |
316670eb | 459 | cmpl $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp) |
b0d623f7 | 460 | /* test for fast entry/exit */ |
316670eb | 461 | je L_fast_exit |
b0d623f7 | 462 | EXT(ret32_iret): |
316670eb | 463 | iretq /* return from interrupt */ |
b0d623f7 | 464 | |
39037602 | 465 | |
b0d623f7 A |
466 | L_fast_exit: |
467 | pop %rdx /* user return eip */ | |
468 | pop %rcx /* pop and toss cs */ | |
469 | andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */ | |
470 | popf /* flags - carry denotes failure */ | |
471 | pop %rcx /* user return esp */ | |
472 | sti /* interrupts enabled after sysexit */ | |
39236c6e | 473 | sysexitl /* 32-bit sysexit */ |
b0d623f7 | 474 | |
39037602 A |
475 | L_cr3_switch_island: |
476 | xor %eax, %eax | |
477 | movw %gs:CPU_ACTIVE_PCID, %ax | |
478 | or %rax, %rcx | |
479 | mov %rcx, %cr3 | |
480 | jmp L_cr3_switch_return | |
481 | ||
b0d623f7 A |
482 | ret_to_kernel: |
483 | #if DEBUG_IDT64 | |
39236c6e | 484 | cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */ |
b0d623f7 A |
485 | je 1f |
486 | cli | |
487 | POSTCODE2(0x6464) | |
39236c6e | 488 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
489 | hlt |
490 | 1: | |
39236c6e | 491 | cmpl $(KERNEL64_CS), R64_CS(%r15) |
b0d623f7 | 492 | je 2f |
39236c6e | 493 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
494 | hlt |
495 | 2: | |
496 | #endif | |
497 | ||
498 | L_64bit_return: | |
39236c6e A |
499 | /* |
500 | * Restore general 64-bit registers. | |
501 | * Here on fault stack and PCB address in R15. | |
502 | */ | |
503 | mov R64_R14(%r15), %r14 | |
504 | mov R64_R13(%r15), %r13 | |
505 | mov R64_R12(%r15), %r12 | |
506 | mov R64_R11(%r15), %r11 | |
507 | mov R64_R10(%r15), %r10 | |
508 | mov R64_R9(%r15), %r9 | |
509 | mov R64_R8(%r15), %r8 | |
510 | mov R64_RSI(%r15), %rsi | |
511 | mov R64_RDI(%r15), %rdi | |
512 | mov R64_RBP(%r15), %rbp | |
513 | mov R64_RDX(%r15), %rdx | |
514 | mov R64_RCX(%r15), %rcx | |
515 | mov R64_RBX(%r15), %rbx | |
516 | mov R64_RAX(%r15), %rax | |
b0d623f7 A |
517 | |
518 | /* | |
39236c6e A |
519 | * We must swap GS base if we're returning to user-space, |
520 | * or we're returning from an NMI that occurred in a trampoline | |
521 | * before the user GS had been swapped. In the latter case, the NMI | |
522 | * handler will have flagged the high-order 32-bits of the CS. | |
b0d623f7 | 523 | */ |
39236c6e A |
524 | cmpq $(KERNEL64_CS), R64_CS(%r15) |
525 | jz 1f | |
526 | swapgs | |
527 | 1: | |
528 | mov R64_R15(%r15), %rsp | |
529 | xchg %r15, %rsp | |
530 | add $(ISS64_OFFSET)+24, %rsp /* pop saved state */ | |
531 | /* + trapno/trapfn/error */ | |
b0d623f7 | 532 | cmpl $(SYSCALL_CS),ISF64_CS-24(%rsp) |
39236c6e | 533 | /* test for fast entry/exit */ |
b0d623f7 A |
534 | je L_sysret |
535 | .globl _dump_iretq | |
536 | EXT(ret64_iret): | |
537 | iretq /* return from interrupt */ | |
538 | ||
539 | L_sysret: | |
540 | /* | |
541 | * Here to load rcx/r11/rsp and perform the sysret back to user-space. | |
542 | * rcx user rip | |
39236c6e | 543 | * r11 user rflags |
b0d623f7 A |
544 | * rsp user stack pointer |
545 | */ | |
546 | mov ISF64_RIP-24(%rsp), %rcx | |
547 | mov ISF64_RFLAGS-24(%rsp), %r11 | |
548 | mov ISF64_RSP-24(%rsp), %rsp | |
549 | sysretq /* return from systen call */ | |
550 | ||
551 | ||
552 | ||
553 | /* | |
554 | * System call handlers. | |
555 | * These are entered via a syscall interrupt. The system call number in %rax | |
556 | * is saved to the error code slot in the stack frame. We then branch to the | |
557 | * common state saving code. | |
558 | */ | |
559 | ||
560 | #ifndef UNIX_INT | |
561 | #error NO UNIX INT!!! | |
562 | #endif | |
563 | Entry(idt64_unix_scall) | |
564 | swapgs /* switch to kernel gs (cpu_data) */ | |
b0d623f7 A |
565 | pushq %rax /* save system call number */ |
566 | PUSH_FUNCTION(HNDL_UNIX_SCALL) | |
567 | pushq $(UNIX_INT) | |
568 | jmp L_32bit_entry_check | |
569 | ||
570 | ||
571 | Entry(idt64_mach_scall) | |
572 | swapgs /* switch to kernel gs (cpu_data) */ | |
b0d623f7 A |
573 | pushq %rax /* save system call number */ |
574 | PUSH_FUNCTION(HNDL_MACH_SCALL) | |
575 | pushq $(MACH_INT) | |
576 | jmp L_32bit_entry_check | |
577 | ||
578 | ||
579 | Entry(idt64_mdep_scall) | |
580 | swapgs /* switch to kernel gs (cpu_data) */ | |
b0d623f7 A |
581 | pushq %rax /* save system call number */ |
582 | PUSH_FUNCTION(HNDL_MDEP_SCALL) | |
583 | pushq $(MACHDEP_INT) | |
584 | jmp L_32bit_entry_check | |
585 | ||
fe8ab488 | 586 | /* Programmed into MSR_IA32_LSTAR by mp_desc.c */ |
b0d623f7 A |
587 | Entry(hi64_syscall) |
588 | Entry(idt64_syscall) | |
b0d623f7 | 589 | L_syscall_continue: |
6d2010ae | 590 | swapgs /* Kapow! get per-cpu data area */ |
b0d623f7 A |
591 | mov %rsp, %gs:CPU_UBER_TMP /* save user stack */ |
592 | mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */ | |
593 | ||
594 | /* | |
595 | * Save values in the ISF frame in the PCB | |
596 | * to cons up the saved machine state. | |
597 | */ | |
598 | movl $(USER_DS), ISF64_SS(%rsp) | |
599 | movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */ | |
600 | mov %r11, ISF64_RFLAGS(%rsp) /* rflags */ | |
601 | mov %rcx, ISF64_RIP(%rsp) /* rip */ | |
602 | mov %gs:CPU_UBER_TMP, %rcx | |
603 | mov %rcx, ISF64_RSP(%rsp) /* user stack */ | |
604 | mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */ | |
605 | movq $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */ | |
606 | leaq HNDL_SYSCALL(%rip), %r11; | |
607 | movq %r11, ISF64_TRAPFN(%rsp) | |
39236c6e A |
608 | mov ISF64_RFLAGS(%rsp), %r11 /* Avoid leak, restore R11 */ |
609 | jmp L_dispatch_U64 /* this can only be 64-bit */ | |
b0d623f7 A |
610 | |
611 | /* | |
612 | * sysenter entry point | |
613 | * Requires user code to set up: | |
614 | * edx: user instruction pointer (return address) | |
615 | * ecx: user stack pointer | |
616 | * on which is pushed stub ret addr and saved ebx | |
617 | * Return to user-space is made using sysexit. | |
618 | * Note: sysenter/sysexit cannot be used for calls returning a value in edx, | |
619 | * or requiring ecx to be preserved. | |
620 | */ | |
621 | Entry(hi64_sysenter) | |
622 | Entry(idt64_sysenter) | |
623 | movq (%rsp), %rsp | |
624 | /* | |
625 | * Push values on to the PCB stack | |
626 | * to cons up the saved machine state. | |
627 | */ | |
628 | push $(USER_DS) /* ss */ | |
629 | push %rcx /* uesp */ | |
630 | pushf /* flags */ | |
6d2010ae A |
631 | /* |
632 | * Clear, among others, the Nested Task (NT) flags bit; | |
633 | * this is zeroed by INT, but not by SYSENTER. | |
634 | */ | |
635 | push $0 | |
636 | popf | |
b0d623f7 | 637 | push $(SYSENTER_CS) /* cs */ |
b0d623f7 | 638 | L_sysenter_continue: |
6d2010ae | 639 | swapgs /* switch to kernel gs (cpu_data) */ |
b0d623f7 A |
640 | push %rdx /* eip */ |
641 | push %rax /* err/eax - syscall code */ | |
642 | PUSH_FUNCTION(HNDL_SYSENTER) | |
643 | pushq $(T_SYSENTER) | |
644 | orl $(EFL_IF), ISF64_RFLAGS(%rsp) | |
645 | jmp L_32bit_entry_check | |
646 | ||
647 | ||
648 | Entry(idt64_page_fault) | |
649 | PUSH_FUNCTION(HNDL_ALLTRAPS) | |
6d2010ae A |
650 | push $(T_PAGE_FAULT) |
651 | push %rax /* save %rax temporarily */ | |
6d2010ae A |
652 | testb $3, 8+ISF64_CS(%rsp) /* was trap from kernel? */ |
653 | jz L_kernel_trap /* - yes, handle with care */ | |
654 | pop %rax /* restore %rax, swapgs, and continue */ | |
655 | swapgs | |
39236c6e | 656 | jmp L_dispatch_user |
b0d623f7 A |
657 | |
658 | ||
659 | /* | |
660 | * Debug trap. Check for single-stepping across system call into | |
661 | * kernel. If this is the case, taking the debug trap has turned | |
662 | * off single-stepping - save the flags register with the trace | |
663 | * bit set. | |
664 | */ | |
665 | Entry(idt64_debug) | |
666 | push $0 /* error code */ | |
667 | PUSH_FUNCTION(HNDL_ALLTRAPS) | |
668 | pushq $(T_DEBUG) | |
669 | ||
670 | testb $3, ISF64_CS(%rsp) | |
671 | jnz L_dispatch | |
672 | ||
673 | /* | |
674 | * trap came from kernel mode | |
675 | */ | |
676 | ||
677 | push %rax /* save %rax temporarily */ | |
b0d623f7 | 678 | lea EXT(idt64_sysenter)(%rip), %rax |
6d2010ae | 679 | cmp %rax, ISF64_RIP+8(%rsp) |
b0d623f7 | 680 | pop %rax |
6d2010ae | 681 | jne L_dispatch |
b0d623f7 A |
682 | /* |
683 | * Interrupt stack frame has been pushed on the temporary stack. | |
6d2010ae | 684 | * We have to switch to pcb stack and patch up the saved state. |
b0d623f7 | 685 | */ |
6d2010ae A |
686 | mov %rcx, ISF64_ERR(%rsp) /* save %rcx in error slot */ |
687 | mov ISF64_SS+8(%rsp), %rcx /* top of temp stack -> pcb stack */ | |
b0d623f7 A |
688 | xchg %rcx,%rsp /* switch to pcb stack */ |
689 | push $(USER_DS) /* ss */ | |
6d2010ae A |
690 | push ISF64_ERR(%rcx) /* saved %rcx into rsp slot */ |
691 | push ISF64_RFLAGS(%rcx) /* rflags */ | |
b0d623f7 | 692 | push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */ |
6d2010ae | 693 | mov ISF64_ERR(%rcx),%rcx /* restore %rcx */ |
b0d623f7 | 694 | jmp L_sysenter_continue /* continue sysenter entry */ |
b0d623f7 A |
695 | |
696 | ||
697 | Entry(idt64_double_fault) | |
698 | PUSH_FUNCTION(HNDL_DOUBLE_FAULT) | |
699 | pushq $(T_DOUBLE_FAULT) | |
39037602 | 700 | jmp L_dispatch_kernel |
b0d623f7 | 701 | |
b0d623f7 A |
702 | |
703 | /* | |
39236c6e A |
704 | * For GP/NP/SS faults, we use the IST1 stack. |
705 | * For faults from user-space, we have to copy the machine state to the | |
706 | * PCB stack and then dispatch as normal. | |
707 | * For faults in kernel-space, we need to scrub for kernel exit faults and | |
708 | * treat these as user-space faults. But for all other kernel-space faults | |
709 | * we continue to run on the IST1 stack and we dispatch to handle the fault | |
710 | * as fatal. | |
b0d623f7 A |
711 | */ |
712 | Entry(idt64_gen_prot) | |
713 | PUSH_FUNCTION(HNDL_ALLTRAPS) | |
714 | pushq $(T_GENERAL_PROTECTION) | |
715 | jmp trap_check_kernel_exit /* check for kernel exit sequence */ | |
716 | ||
717 | Entry(idt64_stack_fault) | |
718 | PUSH_FUNCTION(HNDL_ALLTRAPS) | |
719 | pushq $(T_STACK_FAULT) | |
720 | jmp trap_check_kernel_exit /* check for kernel exit sequence */ | |
721 | ||
722 | Entry(idt64_segnp) | |
723 | PUSH_FUNCTION(HNDL_ALLTRAPS) | |
724 | pushq $(T_SEGMENT_NOT_PRESENT) | |
725 | /* indicate fault type */ | |
726 | trap_check_kernel_exit: | |
39236c6e A |
727 | testb $3,ISF64_CS(%rsp) |
728 | jz L_kernel_gpf | |
729 | ||
730 | /* Here for fault from user-space. Copy interrupt state to PCB. */ | |
731 | swapgs | |
b0d623f7 | 732 | push %rax |
39236c6e A |
733 | mov %rcx, %gs:CPU_UBER_TMP /* save user RCX */ |
734 | mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */ | |
735 | mov ISF64_SS+8(%rsp), %rax | |
736 | mov %rax, ISF64_SS(%rcx) | |
737 | mov ISF64_RSP+8(%rsp), %rax | |
738 | mov %rax, ISF64_RSP(%rcx) | |
739 | mov ISF64_RFLAGS+8(%rsp), %rax | |
740 | mov %rax, ISF64_RFLAGS(%rcx) | |
741 | mov ISF64_CS+8(%rsp), %rax | |
742 | mov %rax, ISF64_CS(%rcx) | |
743 | mov ISF64_RIP+8(%rsp), %rax | |
744 | mov %rax, ISF64_RIP(%rcx) | |
745 | mov ISF64_ERR+8(%rsp), %rax | |
746 | mov %rax, ISF64_ERR(%rcx) | |
747 | mov ISF64_TRAPFN+8(%rsp), %rax | |
748 | mov %rax, ISF64_TRAPFN(%rcx) | |
749 | mov ISF64_TRAPNO+8(%rsp), %rax | |
750 | mov %rax, ISF64_TRAPNO(%rcx) | |
751 | pop %rax | |
752 | mov %gs:CPU_UBER_TMP, %rsp /* user RCX into RSP */ | |
753 | xchg %rcx, %rsp /* to PCB stack with user RCX */ | |
754 | jmp L_dispatch_user | |
b0d623f7 | 755 | |
39236c6e A |
756 | L_kernel_gpf: |
757 | /* Here for GPF from kernel_space. Check for recoverable cases. */ | |
758 | push %rax | |
b0d623f7 | 759 | leaq EXT(ret32_iret)(%rip), %rax |
6d2010ae | 760 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
761 | je L_fault_iret |
762 | leaq EXT(ret64_iret)(%rip), %rax | |
6d2010ae | 763 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
764 | je L_fault_iret |
765 | leaq EXT(ret32_set_ds)(%rip), %rax | |
6d2010ae | 766 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
767 | je L_32bit_fault_set_seg |
768 | leaq EXT(ret32_set_es)(%rip), %rax | |
6d2010ae | 769 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
770 | je L_32bit_fault_set_seg |
771 | leaq EXT(ret32_set_fs)(%rip), %rax | |
6d2010ae | 772 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
773 | je L_32bit_fault_set_seg |
774 | leaq EXT(ret32_set_gs)(%rip), %rax | |
6d2010ae | 775 | cmp %rax, 8+ISF64_RIP(%rsp) |
b0d623f7 A |
776 | je L_32bit_fault_set_seg |
777 | ||
39236c6e | 778 | /* Fall through */ |
6d2010ae A |
779 | |
780 | L_kernel_trap: | |
781 | /* | |
782 | * Here after taking an unexpected trap from kernel mode - perhaps | |
783 | * while running in the trampolines hereabouts. | |
784 | * Note: %rax has been pushed on stack. | |
785 | * Make sure we're not on the PCB stack, if so move to the kernel stack. | |
786 | * This is likely a fatal condition. | |
39236c6e | 787 | * But first, ensure we have the kernel gs base active... |
6d2010ae | 788 | */ |
39236c6e A |
789 | push %rcx |
790 | push %rdx | |
791 | mov $(MSR_IA32_GS_BASE), %ecx | |
792 | rdmsr /* read kernel gsbase */ | |
793 | test $0x80000000, %edx /* test MSB of address */ | |
794 | jne 1f | |
795 | swapgs /* so swap */ | |
6d2010ae | 796 | 1: |
39236c6e A |
797 | pop %rdx |
798 | pop %rcx | |
799 | ||
6d2010ae A |
800 | movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */ |
801 | subq %rsp, %rax | |
802 | cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */ | |
803 | jb 2f /* - yes, deal with it */ | |
804 | pop %rax /* - no, restore %rax */ | |
39236c6e | 805 | jmp L_dispatch_kernel |
6d2010ae A |
806 | 2: |
807 | /* | |
808 | * Here if %rsp is in the PCB | |
809 | * Copy the interrupt stack frame from PCB stack to kernel stack | |
810 | */ | |
811 | movq %gs:CPU_KERNEL_STACK, %rax | |
812 | xchgq %rax, %rsp | |
813 | pushq 8+ISF64_SS(%rax) | |
814 | pushq 8+ISF64_RSP(%rax) | |
815 | pushq 8+ISF64_RFLAGS(%rax) | |
816 | pushq 8+ISF64_CS(%rax) | |
817 | pushq 8+ISF64_RIP(%rax) | |
818 | pushq 8+ISF64_ERR(%rax) | |
819 | pushq 8+ISF64_TRAPFN(%rax) | |
820 | pushq 8+ISF64_TRAPNO(%rax) | |
821 | movq (%rax), %rax | |
39236c6e A |
822 | jmp L_dispatch_kernel |
823 | ||
b0d623f7 | 824 | |
b0d623f7 A |
825 | /* |
826 | * GP/NP fault on IRET: CS or SS is in error. | |
39236c6e A |
827 | * User GSBASE is active. |
828 | * On IST1 stack containing: | |
829 | * (rax saved above, which is immediately popped) | |
830 | * 0 ISF64_TRAPNO: trap code (NP or GP) | |
831 | * 8 ISF64_TRAPFN: trap function | |
832 | * 16 ISF64_ERR: segment number in error (error code) | |
833 | * 24 ISF64_RIP: kernel RIP | |
834 | * 32 ISF64_CS: kernel CS | |
835 | * 40 ISF64_RFLAGS: kernel RFLAGS | |
836 | * 48 ISF64_RSP: kernel RSP | |
837 | * 56 ISF64_SS: kernel SS | |
838 | * On the PCB stack, pointed to by the kernel's RSP is: | |
839 | * 0 user RIP | |
840 | * 8 user CS | |
841 | * 16 user RFLAGS | |
842 | * 24 user RSP | |
843 | * 32 user SS | |
b0d623f7 | 844 | * |
39236c6e A |
845 | * We need to move the kernel's TRAPNO, TRAPFN and ERR to the PCB and handle |
846 | * as a user fault with: | |
6d2010ae A |
847 | * 0 ISF64_TRAPNO: trap code (NP or GP) |
848 | * 8 ISF64_TRAPFN: trap function | |
849 | * 16 ISF64_ERR: segment number in error (error code) | |
39236c6e A |
850 | * 24 user RIP |
851 | * 32 user CS | |
852 | * 40 user RFLAGS | |
853 | * 48 user RSP | |
854 | * 56 user SS | |
b0d623f7 A |
855 | */ |
856 | L_fault_iret: | |
857 | pop %rax /* recover saved %rax */ | |
6d2010ae | 858 | mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */ |
39236c6e A |
859 | mov ISF64_RSP(%rsp), %rax |
860 | xchg %rax, %rsp /* switch to PCB stack */ | |
861 | push ISF64_ERR(%rax) | |
862 | push ISF64_TRAPFN(%rax) | |
863 | push ISF64_TRAPNO(%rax) | |
864 | mov ISF64_RIP(%rax), %rax /* restore rax */ | |
b0d623f7 A |
865 | /* now treat as fault from user */ |
866 | jmp L_dispatch | |
867 | ||
868 | /* | |
869 | * Fault restoring a segment register. All of the saved state is still | |
870 | * on the stack untouched since we haven't yet moved the stack pointer. | |
39236c6e A |
871 | * On IST1 stack containing: |
872 | * (rax saved above, which is immediately popped) | |
873 | * 0 ISF64_TRAPNO: trap code (NP or GP) | |
874 | * 8 ISF64_TRAPFN: trap function | |
875 | * 16 ISF64_ERR: segment number in error (error code) | |
876 | * 24 ISF64_RIP: kernel RIP | |
877 | * 32 ISF64_CS: kernel CS | |
878 | * 40 ISF64_RFLAGS: kernel RFLAGS | |
879 | * 48 ISF64_RSP: kernel RSP | |
880 | * 56 ISF64_SS: kernel SS | |
881 | * On the PCB stack, pointed to by the kernel's RSP is: | |
882 | * 0 user trap code | |
883 | * 8 user trap function | |
884 | * 16 user err | |
885 | * 24 user RIP | |
886 | * 32 user CS | |
887 | * 40 user RFLAGS | |
888 | * 48 user RSP | |
889 | * 56 user SS | |
b0d623f7 A |
890 | */ |
891 | L_32bit_fault_set_seg: | |
6d2010ae A |
892 | swapgs |
893 | pop %rax /* toss saved %rax from stack */ | |
894 | mov ISF64_TRAPNO(%rsp), %rax | |
895 | mov ISF64_TRAPFN(%rsp), %rcx | |
896 | mov ISF64_ERR(%rsp), %rdx | |
897 | mov ISF64_RSP(%rsp), %rsp /* reset stack to saved state */ | |
39236c6e A |
898 | mov %rax,R64_TRAPNO(%rsp) |
899 | mov %rcx,R64_TRAPFN(%rsp) | |
900 | mov %rdx,R64_ERR(%rsp) | |
b0d623f7 A |
901 | /* now treat as fault from user */ |
902 | /* except that all the state is */ | |
903 | /* already saved - we just have to */ | |
904 | /* move the trapno and error into */ | |
905 | /* the compatibility frame */ | |
39236c6e | 906 | jmp L_dispatch_U32_after_fault |
b0d623f7 A |
907 | |
908 | /* | |
909 | * Fatal exception handlers: | |
910 | */ | |
911 | Entry(idt64_db_task_dbl_fault) | |
912 | PUSH_FUNCTION(HNDL_DOUBLE_FAULT) | |
913 | pushq $(T_DOUBLE_FAULT) | |
914 | jmp L_dispatch | |
915 | ||
916 | Entry(idt64_db_task_stk_fault) | |
917 | PUSH_FUNCTION(HNDL_DOUBLE_FAULT) | |
918 | pushq $(T_STACK_FAULT) | |
919 | jmp L_dispatch | |
920 | ||
921 | Entry(idt64_mc) | |
922 | push $(0) /* Error */ | |
923 | PUSH_FUNCTION(HNDL_MACHINE_CHECK) | |
924 | pushq $(T_MACHINE_CHECK) | |
925 | jmp L_dispatch | |
926 | ||
39236c6e A |
927 | /* |
928 | * NMI | |
929 | * This may or may not be fatal but extreme care is required | |
930 | * because it may fall when control was already in another trampoline. | |
931 | * | |
932 | * We get here on IST2 stack which is used for NMIs only. | |
933 | * We must be aware of the interrupted state: | |
934 | * - from user-space, we | |
935 | * - copy state to the PCB and continue; | |
936 | * - from kernel-space, we | |
937 | * - copy state to the kernel stack and continue, but | |
938 | * - check what GSBASE was active, set the kernel base and | |
939 | * - ensure that the active state is restored when the NMI is dismissed. | |
940 | */ | |
941 | Entry(idt64_nmi) | |
942 | push %rax /* save RAX to ISF64_ERR */ | |
943 | push %rcx /* save RCX to ISF64_TRAPFN */ | |
944 | push %rdx /* save RDX to ISF64_TRAPNO */ | |
945 | testb $3, ISF64_CS(%rsp) /* NMI from user-space? */ | |
946 | je 1f | |
947 | ||
948 | /* From user-space: copy interrupt state to user PCB */ | |
949 | swapgs | |
950 | mov %gs:CPU_UBER_ISF, %rcx /* PCB stack addr */ | |
951 | add $(ISF64_SIZE), %rcx /* adjust to base of ISF */ | |
952 | swapgs /* swap back for L_dispatch */ | |
953 | jmp 4f /* Copy state to PCB */ | |
954 | ||
955 | 1: | |
956 | /* | |
957 | * From kernel-space: | |
958 | * Determine whether the kernel or user GS is set. | |
959 | * Set the kernel and ensure that we'll swap back correctly at IRET. | |
960 | */ | |
961 | mov $(MSR_IA32_GS_BASE), %ecx | |
962 | rdmsr /* read kernel gsbase */ | |
963 | test $0x80000000, %edx /* test MSB of address */ | |
964 | jne 2f | |
965 | swapgs /* so swap */ | |
966 | movl $1, ISF64_CS+4(%rsp) /* and set flag in CS slot */ | |
967 | 2: | |
968 | /* | |
969 | * Determine whether we're on the kernel or interrupt stack | |
970 | * when the NMI hit. | |
971 | */ | |
972 | mov ISF64_RSP(%rsp), %rcx | |
973 | mov %gs:CPU_KERNEL_STACK, %rax | |
974 | xor %rcx, %rax | |
975 | and EXT(kernel_stack_mask)(%rip), %rax | |
976 | test %rax, %rax /* are we on the kernel stack? */ | |
977 | je 3f /* yes */ | |
978 | ||
979 | mov %gs:CPU_INT_STACK_TOP, %rax | |
980 | dec %rax /* intr stack top is byte above max */ | |
981 | xor %rcx, %rax | |
982 | and EXT(kernel_stack_mask)(%rip), %rax | |
983 | test %rax, %rax /* are we on the interrupt stack? */ | |
984 | je 3f /* yes */ | |
985 | ||
986 | mov %gs:CPU_KERNEL_STACK, %rcx | |
987 | 3: | |
988 | /* 16-byte-align kernel/interrupt stack for state push */ | |
989 | and $0xFFFFFFFFFFFFFFF0, %rcx | |
990 | ||
991 | 4: | |
992 | /* | |
993 | * Copy state from NMI stack (RSP) to the save area (RCX) which is | |
994 | * the PCB for user or kernel/interrupt stack from kernel. | |
995 | * ISF64_ERR(RSP) saved RAX | |
996 | * ISF64_TRAPFN(RSP) saved RCX | |
997 | * ISF64_TRAPNO(RSP) saved RDX | |
998 | */ | |
999 | xchg %rsp, %rcx /* set for pushes */ | |
1000 | push ISF64_SS(%rcx) | |
1001 | push ISF64_RSP(%rcx) | |
1002 | push ISF64_RFLAGS(%rcx) | |
1003 | push ISF64_CS(%rcx) | |
1004 | push ISF64_RIP(%rcx) | |
1005 | push $(0) /* error code 0 */ | |
1006 | lea HNDL_ALLINTRS(%rip), %rax | |
1007 | push %rax /* trapfn allintrs */ | |
1008 | push $(T_NMI) /* trapno T_NMI */ | |
1009 | mov ISF64_ERR(%rcx), %rax | |
1010 | mov ISF64_TRAPNO(%rcx), %rdx | |
1011 | mov ISF64_TRAPFN(%rcx), %rcx | |
1012 | jmp L_dispatch | |
b0d623f7 | 1013 | |
39236c6e A |
1014 | |
1015 | /* All 'exceptions' enter hndl_alltraps, with: | |
1016 | * r15 x86_saved_state_t address | |
1017 | * rsp kernel stack if user-space, otherwise interrupt or kernel stack | |
1018 | * esi cs at trap | |
b0d623f7 A |
1019 | * |
1020 | * The rest of the state is set up as: | |
39236c6e | 1021 | * both rsp and r15 are 16-byte aligned |
b0d623f7 A |
1022 | * interrupts disabled |
1023 | * direction flag cleared | |
1024 | */ | |
1025 | Entry(hndl_alltraps) | |
1026 | mov %esi, %eax | |
1027 | testb $3, %al | |
1028 | jz trap_from_kernel | |
1029 | ||
1030 | TIME_TRAP_UENTRY | |
1031 | ||
6d2010ae A |
1032 | /* Check for active vtimers in the current task */ |
1033 | mov %gs:CPU_ACTIVE_THREAD, %rcx | |
fe8ab488 | 1034 | movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling trap/exception */ |
6d2010ae A |
1035 | mov TH_TASK(%rcx), %rbx |
1036 | TASK_VTIMER_CHECK(%rbx, %rcx) | |
1037 | ||
39236c6e | 1038 | CCALL1(user_trap, %r15) /* call user trap routine */ |
6d2010ae | 1039 | /* user_trap() unmasks interrupts */ |
b0d623f7 | 1040 | cli /* hold off intrs - critical section */ |
b0d623f7 A |
1041 | xorl %ecx, %ecx /* don't check if we're in the PFZ */ |
1042 | ||
b0d623f7 A |
1043 | |
1044 | Entry(return_from_trap) | |
39236c6e | 1045 | movq %gs:CPU_ACTIVE_THREAD,%r15 /* Get current thread */ |
fe8ab488 | 1046 | movl $-1, TH_IOTIER_OVERRIDE(%r15) /* Reset IO tier override to -1 before returning to userspace */ |
39236c6e A |
1047 | cmpl $0, TH_RWLOCK_COUNT(%r15) /* Check if current thread has pending RW locks held */ |
1048 | jz 1f | |
1049 | xorq %rbp, %rbp /* clear framepointer */ | |
1050 | mov %r15, %rdi /* Set RDI to current thread */ | |
1051 | CCALL(lck_rw_clear_promotions_x86) /* Clear promotions if needed */ | |
1052 | 1: | |
1053 | movq TH_PCB_ISS(%r15), %r15 /* PCB stack */ | |
b0d623f7 A |
1054 | movl %gs:CPU_PENDING_AST,%eax |
1055 | testl %eax,%eax | |
39236c6e | 1056 | je EXT(return_to_user) /* branch if no AST */ |
b0d623f7 A |
1057 | |
1058 | L_return_from_trap_with_ast: | |
b0d623f7 A |
1059 | testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */ |
1060 | je 2f /* no, go handle the AST */ | |
39236c6e | 1061 | cmpl $(SS_64), SS_FLAVOR(%r15) /* are we a 64-bit task? */ |
b0d623f7 A |
1062 | je 1f |
1063 | /* no... 32-bit user mode */ | |
39236c6e | 1064 | movl R32_EIP(%r15), %edi |
6d2010ae | 1065 | xorq %rbp, %rbp /* clear framepointer */ |
b0d623f7 A |
1066 | CCALL(commpage_is_in_pfz32) |
1067 | testl %eax, %eax | |
1068 | je 2f /* not in the PFZ... go service AST */ | |
39236c6e | 1069 | movl %eax, R32_EBX(%r15) /* let the PFZ know we've pended an AST */ |
b0d623f7 A |
1070 | jmp EXT(return_to_user) |
1071 | 1: | |
39236c6e | 1072 | movq R64_RIP(%r15), %rdi |
6d2010ae | 1073 | xorq %rbp, %rbp /* clear framepointer */ |
b0d623f7 A |
1074 | CCALL(commpage_is_in_pfz64) |
1075 | testl %eax, %eax | |
1076 | je 2f /* not in the PFZ... go service AST */ | |
39236c6e | 1077 | movl %eax, R64_RBX(%r15) /* let the PFZ know we've pended an AST */ |
b0d623f7 A |
1078 | jmp EXT(return_to_user) |
1079 | 2: | |
3e170ce0 | 1080 | sti /* interrupts always enabled on return to user mode */ |
b0d623f7 | 1081 | |
6d2010ae A |
1082 | xor %edi, %edi /* zero %rdi */ |
1083 | xorq %rbp, %rbp /* clear framepointer */ | |
1084 | CCALL(i386_astintr) /* take the AST */ | |
b0d623f7 | 1085 | |
3e170ce0 | 1086 | cli |
39236c6e | 1087 | mov %rsp, %r15 /* AST changes stack, saved state */ |
b0d623f7 A |
1088 | xorl %ecx, %ecx /* don't check if we're in the PFZ */ |
1089 | jmp EXT(return_from_trap) /* and check again (rare) */ | |
1090 | ||
1091 | /* | |
1092 | * Trap from kernel mode. No need to switch stacks. | |
1093 | * Interrupts must be off here - we will set them to state at time of trap | |
1094 | * as soon as it's safe for us to do so and not recurse doing preemption | |
39236c6e | 1095 | * |
b0d623f7 | 1096 | */ |
b0d623f7 | 1097 | trap_from_kernel: |
39236c6e A |
1098 | movq %r15, %rdi /* saved state addr */ |
1099 | pushq R64_RIP(%r15) /* Simulate a CALL from fault point */ | |
b0d623f7 A |
1100 | pushq %rbp /* Extend framepointer chain */ |
1101 | movq %rsp, %rbp | |
6d2010ae | 1102 | CCALLWITHSP(kernel_trap) /* to kernel trap routine */ |
b0d623f7 A |
1103 | popq %rbp |
1104 | addq $8, %rsp | |
39236c6e | 1105 | mov %rsp, %r15 /* DTrace slides stack/saved-state */ |
b0d623f7 A |
1106 | cli |
1107 | ||
1108 | movl %gs:CPU_PENDING_AST,%eax /* get pending asts */ | |
1109 | testl $(AST_URGENT),%eax /* any urgent preemption? */ | |
1110 | je ret_to_kernel /* no, nothing to do */ | |
39236c6e | 1111 | cmpl $(T_PREEMPT),R64_TRAPNO(%r15) |
b0d623f7 | 1112 | je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */ |
39236c6e | 1113 | testl $(EFL_IF),R64_RFLAGS(%r15) /* interrupts disabled? */ |
b0d623f7 A |
1114 | je ret_to_kernel |
1115 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */ | |
1116 | jne ret_to_kernel | |
1117 | movq %gs:CPU_KERNEL_STACK,%rax | |
1118 | movq %rsp,%rcx | |
1119 | xorq %rax,%rcx | |
1120 | andq EXT(kernel_stack_mask)(%rip),%rcx | |
1121 | testq %rcx,%rcx /* are we on the kernel stack? */ | |
1122 | jne ret_to_kernel /* no, skip it */ | |
1123 | ||
1124 | CCALL1(i386_astintr, $1) /* take the AST */ | |
39236c6e A |
1125 | |
1126 | mov %rsp, %r15 /* AST changes stack, saved state */ | |
b0d623f7 A |
1127 | jmp ret_to_kernel |
1128 | ||
1129 | ||
1130 | /* | |
1131 | * All interrupts on all tasks enter here with: | |
39236c6e A |
1132 | * r15 x86_saved_state_t |
1133 | * rsp kernel or interrupt stack | |
b0d623f7 A |
1134 | * esi cs at trap |
1135 | * | |
39236c6e | 1136 | * both rsp and r15 are 16-byte aligned |
b0d623f7 A |
1137 | * interrupts disabled |
1138 | * direction flag cleared | |
1139 | */ | |
1140 | Entry(hndl_allintrs) | |
1141 | /* | |
1142 | * test whether already on interrupt stack | |
1143 | */ | |
1144 | movq %gs:CPU_INT_STACK_TOP,%rcx | |
1145 | cmpq %rsp,%rcx | |
1146 | jb 1f | |
1147 | leaq -INTSTACK_SIZE(%rcx),%rdx | |
1148 | cmpq %rsp,%rdx | |
1149 | jb int_from_intstack | |
060df5ea | 1150 | 1: |
b0d623f7 A |
1151 | xchgq %rcx,%rsp /* switch to interrupt stack */ |
1152 | ||
1153 | mov %cr0,%rax /* get cr0 */ | |
1154 | orl $(CR0_TS),%eax /* or in TS bit */ | |
1155 | mov %rax,%cr0 /* set cr0 */ | |
1156 | ||
b0d623f7 | 1157 | pushq %rcx /* save pointer to old stack */ |
39236c6e A |
1158 | pushq %gs:CPU_INT_STATE /* save previous intr state */ |
1159 | movq %r15,%gs:CPU_INT_STATE /* set intr state */ | |
b0d623f7 A |
1160 | |
1161 | TIME_INT_ENTRY /* do timing */ | |
1162 | ||
6d2010ae A |
1163 | /* Check for active vtimers in the current task */ |
1164 | mov %gs:CPU_ACTIVE_THREAD, %rcx | |
1165 | mov TH_TASK(%rcx), %rbx | |
1166 | TASK_VTIMER_CHECK(%rbx, %rcx) | |
1167 | ||
b0d623f7 A |
1168 | incl %gs:CPU_PREEMPTION_LEVEL |
1169 | incl %gs:CPU_INTERRUPT_LEVEL | |
1170 | ||
39236c6e | 1171 | CCALL1(interrupt, %r15) /* call generic interrupt routine */ |
b0d623f7 | 1172 | |
b0d623f7 A |
1173 | .globl EXT(return_to_iret) |
1174 | LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ | |
1175 | ||
1176 | decl %gs:CPU_INTERRUPT_LEVEL | |
1177 | decl %gs:CPU_PREEMPTION_LEVEL | |
1178 | ||
1179 | TIME_INT_EXIT /* do timing */ | |
1180 | ||
39236c6e A |
1181 | popq %gs:CPU_INT_STATE /* reset/clear intr state pointer */ |
1182 | popq %rsp /* switch back to old stack */ | |
1183 | ||
b0d623f7 | 1184 | movq %gs:CPU_ACTIVE_THREAD,%rax |
6d2010ae | 1185 | movq TH_PCB_FPS(%rax),%rax /* get pcb's ifps */ |
b0d623f7 A |
1186 | cmpq $0,%rax /* Is there a context */ |
1187 | je 1f /* Branch if not */ | |
1188 | movl FP_VALID(%rax),%eax /* Load fp_valid */ | |
1189 | cmpl $0,%eax /* Check if valid */ | |
1190 | jne 1f /* Branch if valid */ | |
1191 | clts /* Clear TS */ | |
1192 | jmp 2f | |
1193 | 1: | |
1194 | mov %cr0,%rax /* get cr0 */ | |
1195 | orl $(CR0_TS),%eax /* or in TS bit */ | |
1196 | mov %rax,%cr0 /* set cr0 */ | |
1197 | 2: | |
b0d623f7 | 1198 | /* Load interrupted code segment into %eax */ |
39236c6e A |
1199 | movl R32_CS(%r15),%eax /* assume 32-bit state */ |
1200 | cmpl $(SS_64),SS_FLAVOR(%r15)/* 64-bit? */ | |
b0d623f7 A |
1201 | #if DEBUG_IDT64 |
1202 | jne 4f | |
39236c6e | 1203 | movl R64_CS(%r15),%eax /* 64-bit user mode */ |
b0d623f7 A |
1204 | jmp 3f |
1205 | 4: | |
39236c6e | 1206 | cmpl $(SS_32),SS_FLAVOR(%r15) |
b0d623f7 A |
1207 | je 3f |
1208 | POSTCODE2(0x6431) | |
39236c6e | 1209 | CCALL1(panic_idt64, %r15) |
b0d623f7 A |
1210 | hlt |
1211 | #else | |
1212 | jne 3f | |
39236c6e | 1213 | movl R64_CS(%r15),%eax /* 64-bit user mode */ |
b0d623f7 A |
1214 | #endif |
1215 | 3: | |
1216 | testb $3,%al /* user mode, */ | |
1217 | jnz ast_from_interrupt_user /* go handle potential ASTs */ | |
1218 | /* | |
1219 | * we only want to handle preemption requests if | |
1220 | * the interrupt fell in the kernel context | |
1221 | * and preemption isn't disabled | |
1222 | */ | |
1223 | movl %gs:CPU_PENDING_AST,%eax | |
1224 | testl $(AST_URGENT),%eax /* any urgent requests? */ | |
1225 | je ret_to_kernel /* no, nothing to do */ | |
1226 | ||
1227 | cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */ | |
1228 | jne ret_to_kernel /* yes, skip it */ | |
1229 | ||
b0d623f7 A |
1230 | /* |
1231 | * Take an AST from kernel space. We don't need (and don't want) | |
1232 | * to do as much as the case where the interrupt came from user | |
1233 | * space. | |
1234 | */ | |
1235 | CCALL1(i386_astintr, $1) | |
1236 | ||
39236c6e | 1237 | mov %rsp, %r15 /* AST changes stack, saved state */ |
b0d623f7 A |
1238 | jmp ret_to_kernel |
1239 | ||
1240 | ||
1241 | /* | |
1242 | * nested int - simple path, can't preempt etc on way out | |
1243 | */ | |
1244 | int_from_intstack: | |
1245 | incl %gs:CPU_PREEMPTION_LEVEL | |
1246 | incl %gs:CPU_INTERRUPT_LEVEL | |
060df5ea | 1247 | incl %gs:CPU_NESTED_ISTACK |
39236c6e A |
1248 | |
1249 | push %gs:CPU_INT_STATE | |
1250 | mov %r15, %gs:CPU_INT_STATE | |
1251 | ||
1252 | CCALL1(interrupt, %r15) | |
1253 | ||
1254 | pop %gs:CPU_INT_STATE | |
b0d623f7 A |
1255 | |
1256 | decl %gs:CPU_INTERRUPT_LEVEL | |
1257 | decl %gs:CPU_PREEMPTION_LEVEL | |
060df5ea | 1258 | decl %gs:CPU_NESTED_ISTACK |
39236c6e | 1259 | |
b0d623f7 A |
1260 | jmp ret_to_kernel |
1261 | ||
1262 | /* | |
1263 | * Take an AST from an interrupted user | |
1264 | */ | |
1265 | ast_from_interrupt_user: | |
1266 | movl %gs:CPU_PENDING_AST,%eax | |
1267 | testl %eax,%eax /* pending ASTs? */ | |
1268 | je EXT(ret_to_user) /* no, nothing to do */ | |
1269 | ||
1270 | TIME_TRAP_UENTRY | |
1271 | ||
1272 | movl $1, %ecx /* check if we're in the PFZ */ | |
1273 | jmp L_return_from_trap_with_ast /* return */ | |
1274 | ||
1275 | ||
1276 | /* Syscall dispatch routines! */ | |
1277 | ||
1278 | /* | |
1279 | * | |
1280 | * 32bit Tasks | |
1281 | * System call entries via INTR_GATE or sysenter: | |
1282 | * | |
39236c6e A |
1283 | * r15 x86_saved_state32_t |
1284 | * rsp kernel stack | |
1285 | * | |
1286 | * both rsp and r15 are 16-byte aligned | |
b0d623f7 A |
1287 | * interrupts disabled |
1288 | * direction flag cleared | |
1289 | */ | |
1290 | ||
1291 | Entry(hndl_sysenter) | |
1292 | /* | |
1293 | * We can be here either for a mach syscall or a unix syscall, | |
1294 | * as indicated by the sign of the code: | |
1295 | */ | |
39236c6e | 1296 | movl R32_EAX(%r15),%eax |
b0d623f7 A |
1297 | testl %eax,%eax |
1298 | js EXT(hndl_mach_scall) /* < 0 => mach */ | |
1299 | /* > 0 => unix */ | |
1300 | ||
1301 | Entry(hndl_unix_scall) | |
b0d623f7 A |
1302 | |
1303 | TIME_TRAP_UENTRY | |
1304 | ||
b0d623f7 | 1305 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ |
6d2010ae A |
1306 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
1307 | incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */ | |
b0d623f7 A |
1308 | |
1309 | /* Check for active vtimers in the current task */ | |
1310 | TASK_VTIMER_CHECK(%rbx,%rcx) | |
1311 | ||
1312 | sti | |
1313 | ||
39236c6e | 1314 | CCALL1(unix_syscall, %r15) |
b0d623f7 A |
1315 | /* |
1316 | * always returns through thread_exception_return | |
1317 | */ | |
1318 | ||
1319 | ||
1320 | Entry(hndl_mach_scall) | |
1321 | TIME_TRAP_UENTRY | |
1322 | ||
b0d623f7 | 1323 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ |
6d2010ae A |
1324 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
1325 | incl TH_SYSCALLS_MACH(%rcx) /* increment call count */ | |
b0d623f7 A |
1326 | |
1327 | /* Check for active vtimers in the current task */ | |
1328 | TASK_VTIMER_CHECK(%rbx,%rcx) | |
1329 | ||
1330 | sti | |
1331 | ||
39236c6e | 1332 | CCALL1(mach_call_munger, %r15) |
b0d623f7 A |
1333 | /* |
1334 | * always returns through thread_exception_return | |
1335 | */ | |
1336 | ||
1337 | ||
1338 | Entry(hndl_mdep_scall) | |
1339 | TIME_TRAP_UENTRY | |
1340 | ||
b0d623f7 A |
1341 | /* Check for active vtimers in the current task */ |
1342 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ | |
6d2010ae | 1343 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
b0d623f7 A |
1344 | TASK_VTIMER_CHECK(%rbx,%rcx) |
1345 | ||
1346 | sti | |
1347 | ||
39236c6e | 1348 | CCALL1(machdep_syscall, %r15) |
b0d623f7 A |
1349 | /* |
1350 | * always returns through thread_exception_return | |
1351 | */ | |
1352 | ||
b0d623f7 A |
1353 | /* |
1354 | * 64bit Tasks | |
1355 | * System call entries via syscall only: | |
1356 | * | |
39236c6e A |
1357 | * r15 x86_saved_state64_t |
1358 | * rsp kernel stack | |
1359 | * | |
1360 | * both rsp and r15 are 16-byte aligned | |
b0d623f7 A |
1361 | * interrupts disabled |
1362 | * direction flag cleared | |
1363 | */ | |
1364 | ||
1365 | Entry(hndl_syscall) | |
1366 | TIME_TRAP_UENTRY | |
1367 | ||
b0d623f7 | 1368 | movq %gs:CPU_ACTIVE_THREAD,%rcx /* get current thread */ |
fe8ab488 | 1369 | movl $-1, TH_IOTIER_OVERRIDE(%rcx) /* Reset IO tier override to -1 before handling syscall */ |
6d2010ae | 1370 | movq TH_TASK(%rcx),%rbx /* point to current task */ |
b0d623f7 A |
1371 | |
1372 | /* Check for active vtimers in the current task */ | |
1373 | TASK_VTIMER_CHECK(%rbx,%rcx) | |
1374 | ||
1375 | /* | |
1376 | * We can be here either for a mach, unix machdep or diag syscall, | |
1377 | * as indicated by the syscall class: | |
1378 | */ | |
39236c6e | 1379 | movl R64_RAX(%r15), %eax /* syscall number/class */ |
b0d623f7 A |
1380 | movl %eax, %edx |
1381 | andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */ | |
1382 | cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx | |
1383 | je EXT(hndl_mach_scall64) | |
1384 | cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx | |
1385 | je EXT(hndl_unix_scall64) | |
1386 | cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx | |
1387 | je EXT(hndl_mdep_scall64) | |
1388 | cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx | |
1389 | je EXT(hndl_diag_scall64) | |
1390 | ||
1391 | /* Syscall class unknown */ | |
316670eb | 1392 | sti |
b0d623f7 A |
1393 | CCALL3(i386_exception, $(EXC_SYSCALL), %rax, $1) |
1394 | /* no return */ | |
1395 | ||
1396 | ||
1397 | Entry(hndl_unix_scall64) | |
6d2010ae | 1398 | incl TH_SYSCALLS_UNIX(%rcx) /* increment call count */ |
b0d623f7 A |
1399 | sti |
1400 | ||
39236c6e | 1401 | CCALL1(unix_syscall64, %r15) |
b0d623f7 A |
1402 | /* |
1403 | * always returns through thread_exception_return | |
1404 | */ | |
1405 | ||
1406 | ||
1407 | Entry(hndl_mach_scall64) | |
6d2010ae | 1408 | incl TH_SYSCALLS_MACH(%rcx) /* increment call count */ |
b0d623f7 A |
1409 | sti |
1410 | ||
39236c6e | 1411 | CCALL1(mach_call_munger64, %r15) |
b0d623f7 A |
1412 | /* |
1413 | * always returns through thread_exception_return | |
1414 | */ | |
1415 | ||
1416 | ||
1417 | ||
1418 | Entry(hndl_mdep_scall64) | |
1419 | sti | |
1420 | ||
39236c6e | 1421 | CCALL1(machdep_syscall64, %r15) |
b0d623f7 A |
1422 | /* |
1423 | * always returns through thread_exception_return | |
1424 | */ | |
1425 | ||
b0d623f7 | 1426 | Entry(hndl_diag_scall64) |
39236c6e | 1427 | CCALL1(diagCall64, %r15) // Call diagnostics |
316670eb | 1428 | test %eax, %eax // What kind of return is this? |
060df5ea | 1429 | je 1f // - branch if bad (zero) |
060df5ea A |
1430 | jmp EXT(return_to_user) // Normal return, do not check asts... |
1431 | 1: | |
316670eb | 1432 | sti |
b0d623f7 A |
1433 | CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1) |
1434 | /* no return */ | |
1435 | ||
1436 | Entry(hndl_machine_check) | |
39236c6e | 1437 | CCALL1(panic_machine_check64, %r15) |
b0d623f7 A |
1438 | hlt |
1439 | ||
1440 | Entry(hndl_double_fault) | |
39236c6e | 1441 | CCALL1(panic_double_fault64, %r15) |
b0d623f7 | 1442 | hlt |