]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/idt64.s
xnu-1699.32.7.tar.gz
[apple/xnu.git] / osfmk / i386 / idt64.s
1 /*
2 * Copyright (c) 2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <i386/asm.h>
29 #include <i386/asm64.h>
30 #include <assym.s>
31 #include <mach_kdb.h>
32 #include <i386/eflags.h>
33 #include <i386/trap.h>
34 #include <i386/rtclock_asm.h>
35 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
36 #include <mach/i386/syscall_sw.h>
37 #include <i386/postcode.h>
38 #include <i386/proc_reg.h>
39 #include <mach/exception_types.h>
40
41
42 /*
43 * Low-memory compability-mode handlers.
44 */
45 #define LO_ALLINTRS EXT(lo_allintrs)
46 #define LO_ALLTRAPS EXT(lo_alltraps)
47 #define LO_SYSCALL EXT(lo_syscall)
48 #define LO_UNIX_SCALL EXT(lo_unix_scall)
49 #define LO_MACH_SCALL EXT(lo_mach_scall)
50 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
51 #define LO_DIAG_SCALL EXT(lo_diag_scall)
52 #define LO_DOUBLE_FAULT EXT(lo_df64)
53 #define LO_MACHINE_CHECK EXT(lo_mc64)
54
55 /*
56 * Interrupt descriptor table and code vectors for it.
57 *
58 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
59 * reformatted ("fixed") before use.
60 * All vector are rebased in uber-space.
61 * Special vectors (e.g. double-fault) use a non-0 IST.
62 */
63 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
64 .data ;\
65 .long vec ;\
66 .long KERNEL_UBER_BASE_HI32 ;\
67 .word seg ;\
68 .byte ist*16 ;\
69 .byte type ;\
70 .long 0 ;\
71 .text
72
73 #define IDT64_ENTRY(vec,ist,type) \
74 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
75 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
76 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
77
78 /*
79 * Push trap number and address of compatibility mode handler,
80 * then branch to common trampoline. Error already pushed.
81 */
82 #define EXCEP64_ERR(n,name) \
83 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
84 Entry(name) ;\
85 push $(LO_ALLTRAPS) ;\
86 push $(n) ;\
87 jmp L_enter_lohandler
88
89
90 /*
91 * Push error(0), trap number and address of compatibility mode handler,
92 * then branch to common trampoline.
93 */
94 #define EXCEPTION64(n,name) \
95 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
96 Entry(name) ;\
97 push $0 ;\
98 push $(LO_ALLTRAPS) ;\
99 push $(n) ;\
100 jmp L_enter_lohandler
101
102
103 /*
104 * Interrupt from user.
105 * Push error (0), trap number and address of compatibility mode handler,
106 * then branch to common trampoline.
107 */
108 #define EXCEP64_USR(n,name) \
109 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
110 Entry(name) ;\
111 push $0 ;\
112 push $(LO_ALLTRAPS) ;\
113 push $(n) ;\
114 jmp L_enter_lohandler
115
116
117 /*
118 * Special interrupt code from user.
119 */
120 #define EXCEP64_SPC_USR(n,name) \
121 IDT64_ENTRY(name,0,U_INTR_GATE)
122
123
124 /*
125 * Special interrupt code.
126 * In 64-bit mode we may use an IST slot instead of task gates.
127 */
128 #define EXCEP64_IST(n,name,ist) \
129 IDT64_ENTRY(name,ist,K_INTR_GATE)
130 #define EXCEP64_SPC(n,name) \
131 IDT64_ENTRY(name,0,K_INTR_GATE)
132
133
134 /*
135 * Interrupt.
136 * Push zero err, interrupt vector and address of compatibility mode handler,
137 * then branch to common trampoline.
138 */
139 #define INTERRUPT64(n) \
140 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
141 .align FALIGN ;\
142 L_ ## n: ;\
143 push $0 ;\
144 push $(LO_ALLINTRS) ;\
145 push $(n) ;\
146 jmp L_enter_lohandler
147
148
149 .data
150 .align 12
151 Entry(master_idt64)
152 Entry(hi64_data_base)
153 .text
154 .code64
155 Entry(hi64_text_base)
156
157 EXCEPTION64(0x00,t64_zero_div)
158 EXCEP64_SPC(0x01,hi64_debug)
159 INTERRUPT64(0x02) /* NMI */
160 EXCEP64_USR(0x03,t64_int3)
161 EXCEP64_USR(0x04,t64_into)
162 EXCEP64_USR(0x05,t64_bounds)
163 EXCEPTION64(0x06,t64_invop)
164 EXCEPTION64(0x07,t64_nofpu)
165 #if MACH_KDB
166 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
167 #else
168 EXCEP64_IST(0x08,hi64_double_fault,1)
169 #endif
170 EXCEPTION64(0x09,a64_fpu_over)
171 EXCEPTION64(0x0a,a64_inv_tss)
172 EXCEP64_SPC(0x0b,hi64_segnp)
173 #if MACH_KDB
174 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
175 #else
176 EXCEP64_SPC(0x0c,hi64_stack_fault)
177 #endif
178 EXCEP64_SPC(0x0d,hi64_gen_prot)
179 EXCEP64_SPC(0x0e, hi64_page_fault)
180 EXCEPTION64(0x0f,t64_trap_0f)
181 EXCEPTION64(0x10,t64_fpu_err)
182 EXCEPTION64(0x11,t64_trap_11)
183 EXCEP64_IST(0x12,mc64,1)
184 EXCEPTION64(0x13,t64_sse_err)
185 EXCEPTION64(0x14,t64_trap_14)
186 EXCEPTION64(0x15,t64_trap_15)
187 EXCEPTION64(0x16,t64_trap_16)
188 EXCEPTION64(0x17,t64_trap_17)
189 EXCEPTION64(0x18,t64_trap_18)
190 EXCEPTION64(0x19,t64_trap_19)
191 EXCEPTION64(0x1a,t64_trap_1a)
192 EXCEPTION64(0x1b,t64_trap_1b)
193 EXCEPTION64(0x1c,t64_trap_1c)
194 EXCEPTION64(0x1d,t64_trap_1d)
195 EXCEPTION64(0x1e,t64_trap_1e)
196 EXCEPTION64(0x1f,t64_trap_1f)
197
198 INTERRUPT64(0x20)
199 INTERRUPT64(0x21)
200 INTERRUPT64(0x22)
201 INTERRUPT64(0x23)
202 INTERRUPT64(0x24)
203 INTERRUPT64(0x25)
204 INTERRUPT64(0x26)
205 INTERRUPT64(0x27)
206 INTERRUPT64(0x28)
207 INTERRUPT64(0x29)
208 INTERRUPT64(0x2a)
209 INTERRUPT64(0x2b)
210 INTERRUPT64(0x2c)
211 INTERRUPT64(0x2d)
212 INTERRUPT64(0x2e)
213 INTERRUPT64(0x2f)
214
215 INTERRUPT64(0x30)
216 INTERRUPT64(0x31)
217 INTERRUPT64(0x32)
218 INTERRUPT64(0x33)
219 INTERRUPT64(0x34)
220 INTERRUPT64(0x35)
221 INTERRUPT64(0x36)
222 INTERRUPT64(0x37)
223 INTERRUPT64(0x38)
224 INTERRUPT64(0x39)
225 INTERRUPT64(0x3a)
226 INTERRUPT64(0x3b)
227 INTERRUPT64(0x3c)
228 INTERRUPT64(0x3d)
229 INTERRUPT64(0x3e)
230 INTERRUPT64(0x3f)
231
232 INTERRUPT64(0x40)
233 INTERRUPT64(0x41)
234 INTERRUPT64(0x42)
235 INTERRUPT64(0x43)
236 INTERRUPT64(0x44)
237 INTERRUPT64(0x45)
238 INTERRUPT64(0x46)
239 INTERRUPT64(0x47)
240 INTERRUPT64(0x48)
241 INTERRUPT64(0x49)
242 INTERRUPT64(0x4a)
243 INTERRUPT64(0x4b)
244 INTERRUPT64(0x4c)
245 INTERRUPT64(0x4d)
246 INTERRUPT64(0x4e)
247 INTERRUPT64(0x4f)
248
249 INTERRUPT64(0x50)
250 INTERRUPT64(0x51)
251 INTERRUPT64(0x52)
252 INTERRUPT64(0x53)
253 INTERRUPT64(0x54)
254 INTERRUPT64(0x55)
255 INTERRUPT64(0x56)
256 INTERRUPT64(0x57)
257 INTERRUPT64(0x58)
258 INTERRUPT64(0x59)
259 INTERRUPT64(0x5a)
260 INTERRUPT64(0x5b)
261 INTERRUPT64(0x5c)
262 INTERRUPT64(0x5d)
263 INTERRUPT64(0x5e)
264 INTERRUPT64(0x5f)
265
266 INTERRUPT64(0x60)
267 INTERRUPT64(0x61)
268 INTERRUPT64(0x62)
269 INTERRUPT64(0x63)
270 INTERRUPT64(0x64)
271 INTERRUPT64(0x65)
272 INTERRUPT64(0x66)
273 INTERRUPT64(0x67)
274 INTERRUPT64(0x68)
275 INTERRUPT64(0x69)
276 INTERRUPT64(0x6a)
277 INTERRUPT64(0x6b)
278 INTERRUPT64(0x6c)
279 INTERRUPT64(0x6d)
280 INTERRUPT64(0x6e)
281 INTERRUPT64(0x6f)
282
283 INTERRUPT64(0x70)
284 INTERRUPT64(0x71)
285 INTERRUPT64(0x72)
286 INTERRUPT64(0x73)
287 INTERRUPT64(0x74)
288 INTERRUPT64(0x75)
289 INTERRUPT64(0x76)
290 INTERRUPT64(0x77)
291 INTERRUPT64(0x78)
292 INTERRUPT64(0x79)
293 INTERRUPT64(0x7a)
294 INTERRUPT64(0x7b)
295 INTERRUPT64(0x7c)
296 INTERRUPT64(0x7d)
297 INTERRUPT64(0x7e)
298 EXCEP64_USR(0x7f, t64_dtrace_ret)
299
300 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
301 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
302 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
303 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
304
305 INTERRUPT64(0x84)
306 INTERRUPT64(0x85)
307 INTERRUPT64(0x86)
308 INTERRUPT64(0x87)
309 INTERRUPT64(0x88)
310 INTERRUPT64(0x89)
311 INTERRUPT64(0x8a)
312 INTERRUPT64(0x8b)
313 INTERRUPT64(0x8c)
314 INTERRUPT64(0x8d)
315 INTERRUPT64(0x8e)
316 INTERRUPT64(0x8f)
317
318 INTERRUPT64(0x90)
319 INTERRUPT64(0x91)
320 INTERRUPT64(0x92)
321 INTERRUPT64(0x93)
322 INTERRUPT64(0x94)
323 INTERRUPT64(0x95)
324 INTERRUPT64(0x96)
325 INTERRUPT64(0x97)
326 INTERRUPT64(0x98)
327 INTERRUPT64(0x99)
328 INTERRUPT64(0x9a)
329 INTERRUPT64(0x9b)
330 INTERRUPT64(0x9c)
331 INTERRUPT64(0x9d)
332 INTERRUPT64(0x9e)
333 INTERRUPT64(0x9f)
334
335 INTERRUPT64(0xa0)
336 INTERRUPT64(0xa1)
337 INTERRUPT64(0xa2)
338 INTERRUPT64(0xa3)
339 INTERRUPT64(0xa4)
340 INTERRUPT64(0xa5)
341 INTERRUPT64(0xa6)
342 INTERRUPT64(0xa7)
343 INTERRUPT64(0xa8)
344 INTERRUPT64(0xa9)
345 INTERRUPT64(0xaa)
346 INTERRUPT64(0xab)
347 INTERRUPT64(0xac)
348 INTERRUPT64(0xad)
349 INTERRUPT64(0xae)
350 INTERRUPT64(0xaf)
351
352 INTERRUPT64(0xb0)
353 INTERRUPT64(0xb1)
354 INTERRUPT64(0xb2)
355 INTERRUPT64(0xb3)
356 INTERRUPT64(0xb4)
357 INTERRUPT64(0xb5)
358 INTERRUPT64(0xb6)
359 INTERRUPT64(0xb7)
360 INTERRUPT64(0xb8)
361 INTERRUPT64(0xb9)
362 INTERRUPT64(0xba)
363 INTERRUPT64(0xbb)
364 INTERRUPT64(0xbc)
365 INTERRUPT64(0xbd)
366 INTERRUPT64(0xbe)
367 INTERRUPT64(0xbf)
368
369 INTERRUPT64(0xc0)
370 INTERRUPT64(0xc1)
371 INTERRUPT64(0xc2)
372 INTERRUPT64(0xc3)
373 INTERRUPT64(0xc4)
374 INTERRUPT64(0xc5)
375 INTERRUPT64(0xc6)
376 INTERRUPT64(0xc7)
377 INTERRUPT64(0xc8)
378 INTERRUPT64(0xc9)
379 INTERRUPT64(0xca)
380 INTERRUPT64(0xcb)
381 INTERRUPT64(0xcc)
382 INTERRUPT64(0xcd)
383 INTERRUPT64(0xce)
384 INTERRUPT64(0xcf)
385
386 INTERRUPT64(0xd0)
387 INTERRUPT64(0xd1)
388 INTERRUPT64(0xd2)
389 INTERRUPT64(0xd3)
390 INTERRUPT64(0xd4)
391 INTERRUPT64(0xd5)
392 INTERRUPT64(0xd6)
393 INTERRUPT64(0xd7)
394 INTERRUPT64(0xd8)
395 INTERRUPT64(0xd9)
396 INTERRUPT64(0xda)
397 INTERRUPT64(0xdb)
398 INTERRUPT64(0xdc)
399 INTERRUPT64(0xdd)
400 INTERRUPT64(0xde)
401 INTERRUPT64(0xdf)
402
403 INTERRUPT64(0xe0)
404 INTERRUPT64(0xe1)
405 INTERRUPT64(0xe2)
406 INTERRUPT64(0xe3)
407 INTERRUPT64(0xe4)
408 INTERRUPT64(0xe5)
409 INTERRUPT64(0xe6)
410 INTERRUPT64(0xe7)
411 INTERRUPT64(0xe8)
412 INTERRUPT64(0xe9)
413 INTERRUPT64(0xea)
414 INTERRUPT64(0xeb)
415 INTERRUPT64(0xec)
416 INTERRUPT64(0xed)
417 INTERRUPT64(0xee)
418 INTERRUPT64(0xef)
419
420 INTERRUPT64(0xf0)
421 INTERRUPT64(0xf1)
422 INTERRUPT64(0xf2)
423 INTERRUPT64(0xf3)
424 INTERRUPT64(0xf4)
425 INTERRUPT64(0xf5)
426 INTERRUPT64(0xf6)
427 INTERRUPT64(0xf7)
428 INTERRUPT64(0xf8)
429 INTERRUPT64(0xf9)
430 INTERRUPT64(0xfa)
431 INTERRUPT64(0xfb)
432 INTERRUPT64(0xfc)
433 INTERRUPT64(0xfd)
434 INTERRUPT64(0xfe)
435 EXCEPTION64(0xff,t64_preempt)
436
437
438 .text
439 /*
440 *
441 * Trap/interrupt entry points.
442 *
443 * All traps must create the following 32-bit save area on the PCB "stack"
444 * - this is identical to the legacy mode 32-bit case:
445 *
446 * gs
447 * fs
448 * es
449 * ds
450 * edi
451 * esi
452 * ebp
453 * cr2 (defined only for page fault)
454 * ebx
455 * edx
456 * ecx
457 * eax
458 * trap number
459 * error code
460 * eip
461 * cs
462 * eflags
463 * user esp - if from user
464 * user ss - if from user
465 *
466 * Above this is the trap number and compatibility mode handler address
467 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
468 *
469 * (trapno, trapfn)
470 * err
471 * rip
472 * cs
473 * rflags
474 * rsp
475 * ss
476 *
477 */
478
479 .code32
480
481 /*
482 * Control is passed here to return to the compatibility mode user.
483 * At this stage we're in kernel space in compatibility mode
484 * but we need to switch into 64-bit mode in the 4G-based trampoline
485 * space before performing the iret.
486 */
487 ret_to_user:
488 movl %gs:CPU_ACTIVE_THREAD,%ecx
489
490 movl TH_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
491 cmpl $0,%eax /* Is there a debug register context? */
492 je 2f /* branch if not */
493 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
494 jne 1f
495 movl DS_DR0(%eax), %ecx /* If so, load the 32 bit DRs */
496 movl %ecx, %db0
497 movl DS_DR1(%eax), %ecx
498 movl %ecx, %db1
499 movl DS_DR2(%eax), %ecx
500 movl %ecx, %db2
501 movl DS_DR3(%eax), %ecx
502 movl %ecx, %db3
503 movl DS_DR7(%eax), %ecx
504 movl %ecx, %gs:CPU_DR7
505 movl $0, %gs:CPU_DR7 + 4
506 jmp 2f
507 1:
508 ENTER_64BIT_MODE() /* Enter long mode */
509 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
510 mov %rcx, %dr0
511 mov DS64_DR1(%eax), %rcx
512 mov %rcx, %dr1
513 mov DS64_DR2(%eax), %rcx
514 mov %rcx, %dr2
515 mov DS64_DR3(%eax), %rcx
516 mov %rcx, %dr3
517 mov DS64_DR7(%eax), %rcx
518 mov %rcx, %gs:CPU_DR7
519 jmp 3f /* Enter uberspace */
520 2:
521 ENTER_64BIT_MODE()
522 3:
523 ENTER_UBERSPACE()
524
525 /*
526 * Now switch %cr3, if necessary.
527 */
528 swapgs /* switch back to uber-kernel gs base */
529 mov %gs:CPU_TASK_CR3,%rcx
530 mov %rcx,%gs:CPU_ACTIVE_CR3
531 mov %cr3, %rax
532 cmp %rcx, %rax
533 je 1f
534 /* flag the copyio engine state as WINDOWS_CLEAN */
535 mov %gs:CPU_ACTIVE_THREAD,%eax
536 movl $(WINDOWS_CLEAN),TH_COPYIO_STATE(%eax)
537 mov %rcx,%cr3 /* switch to user's address space */
538 1:
539
540 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
541 cmp $0, %rax
542 je 1f
543 mov %rax, %dr7 /* Set DR7 */
544 movq $0, %gs:CPU_DR7
545 1:
546
547 /*
548 * Adjust stack to use uber-space.
549 */
550 mov $(KERNEL_UBER_BASE_HI32), %rax
551 shl $32, %rsp
552 shrd $32, %rax, %rsp /* relocate into uber-space */
553
554 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
555 jne L_64bit_return
556 jmp L_32bit_return
557
558 ret_to_kernel:
559 ENTER_64BIT_MODE()
560 ENTER_UBERSPACE()
561
562 swapgs /* switch back to uber-kernel gs base */
563
564 /*
565 * Adjust stack to use uber-space.
566 */
567 mov $(KERNEL_UBER_BASE_HI32), %rax
568 shl $32, %rsp
569 shrd $32, %rax, %rsp /* relocate into uber-space */
570
571 /* Check for return to 64-bit kernel space (EFI today) */
572 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
573 jne L_64bit_return
574 /* fall through for 32-bit return */
575
576 L_32bit_return:
577 /*
578 * Restore registers into the machine state for iret.
579 */
580 movl R32_EIP(%rsp), %eax
581 movl %eax, ISC32_RIP(%rsp)
582 movl R32_EFLAGS(%rsp), %eax
583 movl %eax, ISC32_RFLAGS(%rsp)
584 movl R32_CS(%rsp), %eax
585 movl %eax, ISC32_CS(%rsp)
586 movl R32_UESP(%rsp), %eax
587 movl %eax, ISC32_RSP(%rsp)
588 movl R32_SS(%rsp), %eax
589 movl %eax, ISC32_SS(%rsp)
590
591 /*
592 * Restore general 32-bit registers
593 */
594 movl R32_EAX(%rsp), %eax
595 movl R32_EBX(%rsp), %ebx
596 movl R32_ECX(%rsp), %ecx
597 movl R32_EDX(%rsp), %edx
598 movl R32_EBP(%rsp), %ebp
599 movl R32_ESI(%rsp), %esi
600 movl R32_EDI(%rsp), %edi
601
602 /*
603 * Restore segment registers. We make take an exception here but
604 * we've got enough space left in the save frame area to absorb
605 * a hardware frame plus the trapfn and trapno
606 */
607 swapgs
608 EXT(ret32_set_ds):
609 movw R32_DS(%rsp), %ds
610 EXT(ret32_set_es):
611 movw R32_ES(%rsp), %es
612 EXT(ret32_set_fs):
613 movw R32_FS(%rsp), %fs
614 EXT(ret32_set_gs):
615 movw R32_GS(%rsp), %gs
616
617 add $(ISC32_OFFSET)+8+8+8, %rsp /* pop compat frame +
618 trapno, trapfn and error */
619 cmp $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
620 /* test for fast entry/exit */
621 je L_fast_exit
622 EXT(ret32_iret):
623 iretq /* return from interrupt */
624
625 L_fast_exit:
626 pop %rdx /* user return eip */
627 pop %rcx /* pop and toss cs */
628 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
629 popf /* flags - carry denotes failure */
630 pop %rcx /* user return esp */
631 .code32
632 sti /* interrupts enabled after sysexit */
633 sysexit /* 32-bit sysexit */
634 .code64
635
636 L_64bit_return:
637 /*
638 * Set the GS Base MSR with the user's gs base.
639 */
640 movl %gs:CPU_UBER_USER_GS_BASE, %eax
641 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
642 movl $(MSR_IA32_GS_BASE), %ecx
643 swapgs
644 testb $3, R64_CS(%rsp) /* returning to user-space? */
645 jz 1f
646 wrmsr /* set 64-bit base */
647 1:
648
649 /*
650 * Restore general 64-bit registers
651 */
652 mov R64_R15(%rsp), %r15
653 mov R64_R14(%rsp), %r14
654 mov R64_R13(%rsp), %r13
655 mov R64_R12(%rsp), %r12
656 mov R64_R11(%rsp), %r11
657 mov R64_R10(%rsp), %r10
658 mov R64_R9(%rsp), %r9
659 mov R64_R8(%rsp), %r8
660 mov R64_RSI(%rsp), %rsi
661 mov R64_RDI(%rsp), %rdi
662 mov R64_RBP(%rsp), %rbp
663 mov R64_RDX(%rsp), %rdx
664 mov R64_RBX(%rsp), %rbx
665 mov R64_RCX(%rsp), %rcx
666 mov R64_RAX(%rsp), %rax
667
668 add $(ISS64_OFFSET)+8+8+8, %rsp /* pop saved state frame +
669 trapno, trapfn and error */
670 cmpl $(SYSCALL_CS),ISF64_CS-8-8-8(%rsp)
671 /* test for fast entry/exit */
672 je L_sysret
673 EXT(ret64_iret):
674 iretq /* return from interrupt */
675
676 L_sysret:
677 /*
678 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
679 * rcx user rip
680 * r1 user rflags
681 * rsp user stack pointer
682 */
683 mov ISF64_RIP-8-8-8(%rsp), %rcx
684 mov ISF64_RFLAGS-8-8-8(%rsp), %r11
685 mov ISF64_RSP-8-8-8(%rsp), %rsp
686 sysretq /* return from system call */
687
688 /*
689 * Common path to enter locore handlers.
690 */
691 L_enter_lohandler:
692 swapgs /* switch to kernel gs (cpu_data) */
693 L_enter_lohandler_continue:
694 cmpl $(USER64_CS), ISF64_CS(%rsp)
695 je L_64bit_enter /* this is a 64-bit user task */
696 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
697 je L_64bit_enter /* we're in 64-bit (EFI) code */
698 jmp L_32bit_enter
699
700 /*
701 * System call handlers.
702 * These are entered via a syscall interrupt. The system call number in %rax
703 * is saved to the error code slot in the stack frame. We then branch to the
704 * common state saving code.
705 */
706
707 Entry(hi64_unix_scall)
708 swapgs /* switch to kernel gs (cpu_data) */
709 L_unix_scall_continue:
710 push %rax /* save system call number */
711 push $(LO_UNIX_SCALL)
712 push $(UNIX_INT)
713 jmp L_32bit_enter_check
714
715
716 Entry(hi64_mach_scall)
717 swapgs /* switch to kernel gs (cpu_data) */
718 L_mach_scall_continue:
719 push %rax /* save system call number */
720 push $(LO_MACH_SCALL)
721 push $(MACH_INT)
722 jmp L_32bit_enter_check
723
724
725 Entry(hi64_mdep_scall)
726 swapgs /* switch to kernel gs (cpu_data) */
727 L_mdep_scall_continue:
728 push %rax /* save system call number */
729 push $(LO_MDEP_SCALL)
730 push $(MACHDEP_INT)
731 jmp L_32bit_enter_check
732
733
734 Entry(hi64_diag_scall)
735 swapgs /* switch to kernel gs (cpu_data) */
736 L_diag_scall_continue:
737 push %rax /* save system call number */
738 push $(LO_DIAG_SCALL)
739 push $(DIAG_INT)
740 jmp L_32bit_enter_check
741
742 Entry(hi64_syscall)
743 swapgs /* Kapow! get per-cpu data area */
744 L_syscall_continue:
745 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
746 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
747
748 /*
749 * Save values in the ISF frame in the PCB
750 * to cons up the saved machine state.
751 */
752 movl $(USER_DS), ISF64_SS(%rsp)
753 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
754 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
755 mov %rcx, ISF64_RIP(%rsp) /* rip */
756 mov %gs:CPU_UBER_TMP, %rcx
757 mov %rcx, ISF64_RSP(%rsp) /* user stack */
758 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
759 movl $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
760 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
761 jmp L_64bit_enter /* this can only be a 64-bit task */
762
763
764 L_32bit_enter_check:
765 /*
766 * Check we're not a confused 64-bit user.
767 */
768 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
769 jne L_64bit_entry_reject
770 jmp L_32bit_enter
771 /*
772 * sysenter entry point
773 * Requires user code to set up:
774 * edx: user instruction pointer (return address)
775 * ecx: user stack pointer
776 * on which is pushed stub ret addr and saved ebx
777 * Return to user-space is made using sysexit.
778 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
779 * or requiring ecx to be preserved.
780 */
781 Entry(hi64_sysenter)
782 mov (%rsp), %rsp /* switch from temporary stack to pcb */
783 /*
784 * Push values on to the PCB stack
785 * to cons up the saved machine state.
786 */
787 push $(USER_DS) /* ss */
788 push %rcx /* uesp */
789 pushf /* flags */
790 /*
791 * Clear, among others, the Nested Task (NT) flags bit;
792 * this is zeroed by INT, but not by SYSENTER.
793 */
794 push $0
795 popf
796 push $(SYSENTER_CS) /* cs */
797 swapgs /* switch to kernel gs (cpu_data) */
798 L_sysenter_continue:
799 push %rdx /* eip */
800 push %rax /* err/eax - syscall code */
801 push $0
802 push $(T_SYSENTER)
803 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
804 movl $(LO_MACH_SCALL), ISF64_TRAPFN(%rsp)
805 testl %eax, %eax
806 js L_32bit_enter_check
807 movl $(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp)
808 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
809 jne L_64bit_entry_reject
810 /* If the caller (typically LibSystem) has recorded the cumulative size of
811 * the arguments in EAX, copy them over from the user stack directly.
812 * We recover from exceptions inline--if the copy loop doesn't complete
813 * due to an exception, we fall back to copyin from compatibility mode.
814 * We can potentially extend this mechanism to mach traps as well (DRK).
815 */
816 L_sysenter_copy_args:
817 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
818 jz L_32bit_enter
819 xor %r9, %r9
820 mov %gs:CPU_UBER_ARG_STORE, %r8
821 movl %eax, %r9d
822 mov %gs:CPU_UBER_ARG_STORE_VALID, %r12
823 xor %r10, %r10
824 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d
825 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %r9d
826 movl $0, (%r12)
827 EXT(hi64_sysenter_user_arg_copy):
828 0:
829 movl 4(%rcx, %r10, 4), %r11d
830 movl %r11d, (%r8, %r10, 4)
831 incl %r10d
832 decl %r9d
833 jnz 0b
834 movl $1, (%r12)
835 /* Fall through to 32-bit handler */
836
837 L_32bit_enter:
838 cld
839 /*
840 * Make space for the compatibility save area.
841 */
842 sub $(ISC32_OFFSET), %rsp
843 movl $(SS_32), SS_FLAVOR(%rsp)
844
845 /*
846 * Save segment regs
847 */
848 mov %ds, R32_DS(%rsp)
849 mov %es, R32_ES(%rsp)
850 mov %fs, R32_FS(%rsp)
851 mov %gs, R32_GS(%rsp)
852
853 /*
854 * Save general 32-bit registers
855 */
856 mov %eax, R32_EAX(%rsp)
857 mov %ebx, R32_EBX(%rsp)
858 mov %ecx, R32_ECX(%rsp)
859 mov %edx, R32_EDX(%rsp)
860 mov %ebp, R32_EBP(%rsp)
861 mov %esi, R32_ESI(%rsp)
862 mov %edi, R32_EDI(%rsp)
863
864 /* Unconditionally save cr2; only meaningful on page faults */
865 mov %cr2, %rax
866 mov %eax, R32_CR2(%rsp)
867
868 /*
869 * Copy registers already saved in the machine state
870 * (in the interrupt stack frame) into the compat save area.
871 */
872 mov ISC32_RIP(%rsp), %eax
873 mov %eax, R32_EIP(%rsp)
874 mov ISC32_RFLAGS(%rsp), %eax
875 mov %eax, R32_EFLAGS(%rsp)
876 mov ISC32_CS(%rsp), %eax
877 mov %eax, R32_CS(%rsp)
878 testb $3, %al
879 jz 1f
880 xor %ebp, %ebp
881 1:
882 mov ISC32_RSP(%rsp), %eax
883 mov %eax, R32_UESP(%rsp)
884 mov ISC32_SS(%rsp), %eax
885 mov %eax, R32_SS(%rsp)
886 L_32bit_enter_after_fault:
887 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
888 mov %ebx, R32_TRAPNO(%rsp)
889 mov ISC32_ERR(%rsp), %eax
890 mov %eax, R32_ERR(%rsp)
891 mov ISC32_TRAPFN(%rsp), %edx
892
893 /*
894 * Common point to enter lo_handler in compatibilty mode:
895 * %ebx trapno
896 * %edx locore handler address
897 */
898 L_enter_lohandler2:
899 /*
900 * Switch address space to kernel
901 * if not shared space and not already mapped.
902 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
903 */
904 mov %cr3, %rax
905 mov %gs:CPU_TASK_CR3, %rcx
906 cmp %rax, %rcx /* is the task's cr3 loaded? */
907 jne 1f
908 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
909 je 2f
910 1:
911 mov %gs:CPU_KERNEL_CR3, %rcx
912 cmp %rax, %rcx
913 je 2f
914 mov %rcx, %cr3
915 mov %rcx, %gs:CPU_ACTIVE_CR3
916 2:
917 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
918 cmpl $0, TH_PCB_IDS(%ecx) /* Is there a debug register state? */
919 jz 21f
920 xor %ecx, %ecx /* If so, reset DR7 (the control) */
921 mov %rcx, %dr7
922 21:
923 /*
924 * Switch to compatibility mode.
925 * Then establish kernel segments.
926 */
927 swapgs /* Done with uber-kernel gs */
928 ENTER_COMPAT_MODE()
929
930 /*
931 * Now in compatibility mode and running in compatibility space
932 * prepare to enter the locore handler.
933 * %ebx trapno
934 * %edx lo_handler pointer
935 * Note: the stack pointer (now 32-bit) is now directly addressing the
936 * the kernel below 4G and therefore is automagically re-based.
937 */
938 mov $(KERNEL_DS), %eax
939 mov %eax, %ss
940 mov %eax, %ds
941 mov %eax, %es
942 mov %eax, %fs
943 mov $(CPU_DATA_GS), %eax
944 mov %eax, %gs
945
946 incl %gs:hwIntCnt(,%ebx,4) /* Bump the trap/intr count */
947
948 /* Dispatch the designated lo handler */
949 jmp *%edx
950
951 .code64
952 L_64bit_entry_reject:
953 /*
954 * Here for a 64-bit user attempting an invalid kernel entry.
955 */
956 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
957 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
958 /* Fall through... */
959
960 L_64bit_enter:
961 /*
962 * Here for a 64-bit user task, or special 64-bit kernel code.
963 * Make space for the save area.
964 */
965 sub $(ISS64_OFFSET), %rsp
966 movl $(SS_64), SS_FLAVOR(%rsp)
967
968 cld
969 /*
970 * Save segment regs
971 */
972 mov %fs, R64_FS(%rsp)
973 mov %gs, R64_GS(%rsp)
974
975 /* Save general-purpose registers */
976 mov %rax, R64_RAX(%rsp)
977 mov %rcx, R64_RCX(%rsp)
978 mov %rbx, R64_RBX(%rsp)
979 mov %rbp, R64_RBP(%rsp)
980 mov %r11, R64_R11(%rsp)
981 mov %r12, R64_R12(%rsp)
982 mov %r13, R64_R13(%rsp)
983 mov %r14, R64_R14(%rsp)
984 mov %r15, R64_R15(%rsp)
985
986 /* cr2 is significant only for page-faults */
987 mov %cr2, %rax
988 mov %rax, R64_CR2(%rsp)
989
990 /* Other registers (which may contain syscall args) */
991 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
992 mov %rsi, R64_RSI(%rsp)
993 mov %rdx, R64_RDX(%rsp)
994 mov %r10, R64_R10(%rsp)
995 mov %r8, R64_R8(%rsp)
996 mov %r9, R64_R9(%rsp) /* .. arg5 */
997
998 L_64bit_enter_after_fault:
999 /*
1000 * At this point we're almost ready to join the common lo-entry code.
1001 */
1002 mov R64_TRAPNO(%rsp), %ebx
1003 mov R64_TRAPFN(%rsp), %edx
1004
1005 testb $3, ISF64_CS+ISS64_OFFSET(%rsp)
1006 jz 1f
1007 xor %rbp, %rbp
1008 1:
1009 jmp L_enter_lohandler2
1010
1011 Entry(hi64_page_fault)
1012 push $(LO_ALLTRAPS)
1013 push $(T_PAGE_FAULT)
1014 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1015 jne L_enter_lohandler
1016 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1017 jne hi64_kernel_trap
1018 mov ISF64_RSP(%rsp), %rsp
1019 jmp L_32bit_enter
1020
1021 /*
1022 * Debug trap. Check for single-stepping across system call into
1023 * kernel. If this is the case, taking the debug trap has turned
1024 * off single-stepping - save the flags register with the trace
1025 * bit set.
1026 */
1027 Entry(hi64_debug)
1028 swapgs /* set %gs for cpu data */
1029 push $0 /* error code */
1030 push $(LO_ALLTRAPS)
1031 push $(T_DEBUG)
1032
1033 testb $3, ISF64_CS(%rsp)
1034 jnz L_enter_lohandler_continue
1035
1036 /*
1037 * trap came from kernel mode
1038 */
1039 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1040 jne L_enter_lohandler_continue /* trap not in uber-space */
1041
1042 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
1043 jne 6f
1044 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1045 jmp L_mach_scall_continue /* continue system call entry */
1046 6:
1047 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
1048 jne 5f
1049 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1050 jmp L_mdep_scall_continue /* continue system call entry */
1051 5:
1052 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1053 jne 4f
1054 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1055 jmp L_unix_scall_continue /* continue system call entry */
1056 4:
1057 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1058 jne L_enter_lohandler_continue
1059 /*
1060 * Interrupt stack frame has been pushed on the temporary stack.
1061 * We have to switch to pcb stack and copy eflags.
1062 */
1063 add $40,%rsp /* remove trapno/trapfn/err/rip/cs */
1064 push %rcx /* save %rcx - user stack pointer */
1065 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1066 xchg %rcx,%rsp /* switch to pcb stack */
1067 push $(USER_DS) /* ss */
1068 push (%rcx) /* saved %rcx into rsp slot */
1069 push 8(%rcx) /* rflags */
1070 mov (%rcx),%rcx /* restore %rcx */
1071 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1072 jmp L_sysenter_continue /* continue sysenter entry */
1073
1074
1075 Entry(hi64_double_fault)
1076 swapgs /* set %gs for cpu data */
1077 push $(LO_DOUBLE_FAULT)
1078 push $(T_DOUBLE_FAULT)
1079
1080 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1081 jne L_enter_lohandler_continue /* trap not in uber-space */
1082
1083 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1084 jne L_enter_lohandler_continue
1085
1086 mov ISF64_RSP(%rsp), %rsp
1087 jmp L_syscall_continue
1088
1089
1090 /*
1091 * General protection or segment-not-present fault.
1092 * Check for a GP/NP fault in the kernel_return
1093 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1094 *
1095 * rsp-> 0 ISF64_TRAPNO: trap code (NP or GP)
1096 * 8 ISF64_TRAPFN: trap function
1097 * 16 ISF64_ERR: segment number in error (error code)
1098 * 24 ISF64_RIP: rip
1099 * 32 ISF64_CS: cs
1100 * 40 ISF64_RFLAGS: rflags
1101 * 48 ISF64_RSP: rsp
1102 * 56 ISF64_SS: ss
1103 * 64 old registers (trap is from kernel)
1104 */
1105 Entry(hi64_gen_prot)
1106 push $(LO_ALLTRAPS)
1107 push $(T_GENERAL_PROTECTION)
1108 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1109
1110 Entry(hi64_stack_fault)
1111 push $(LO_ALLTRAPS)
1112 push $(T_STACK_FAULT)
1113 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1114
1115 Entry(hi64_segnp)
1116 push $(LO_ALLTRAPS)
1117 push $(T_SEGMENT_NOT_PRESENT)
1118 /* indicate fault type */
1119 trap_check_kernel_exit:
1120 testb $3,ISF64_CS(%rsp)
1121 jnz L_enter_lohandler
1122 /* trap was from kernel mode, so */
1123 /* check for the kernel exit sequence */
1124 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1125 jne L_enter_lohandler_continue /* trap not in uber-space */
1126
1127 cmpl $(EXT(ret32_iret)), ISF64_RIP(%rsp)
1128 je L_fault_iret32
1129 cmpl $(EXT(ret32_set_ds)), ISF64_RIP(%rsp)
1130 je L_32bit_fault_set_seg
1131 cmpl $(EXT(ret32_set_es)), ISF64_RIP(%rsp)
1132 je L_32bit_fault_set_seg
1133 cmpl $(EXT(ret32_set_fs)), ISF64_RIP(%rsp)
1134 je L_32bit_fault_set_seg
1135 cmpl $(EXT(ret32_set_gs)), ISF64_RIP(%rsp)
1136 je L_32bit_fault_set_seg
1137
1138 cmpl $(EXT(ret64_iret)), ISF64_RIP(%rsp)
1139 je L_fault_iret64
1140
1141 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
1142 cmove ISF64_RSP(%rsp), %rsp
1143 je L_32bit_enter
1144
1145 hi64_kernel_trap:
1146 /*
1147 * Here after taking an unexpected trap from kernel mode - perhaps
1148 * while running in the trampolines hereabouts.
1149 * Make sure we're not on the PCB stack, if so move to the kernel stack.
1150 * This is likely a fatal condition.
1151 * But first, try to be sure we have the kernel gs base active...
1152 */
1153 cmpq $0, %gs:CPU_THIS /* test gs_base */
1154 js 1f /* -ve kernel addr, no swap */
1155 swapgs /* +ve user addr, swap */
1156 1:
1157 movq %rax, %gs:CPU_UBER_TMP /* save %rax */
1158 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
1159 subq %rsp, %rax
1160 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
1161 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1162 ja L_enter_lohandler_continue /* stack not in PCB */
1163
1164 /*
1165 * Here if %rsp is in the PCB
1166 * Copy the interrupt stack frame from PCB stack to kernel stack
1167 */
1168 movq %gs:CPU_KERNEL_STACK, %rax /* note: %rax restored below */
1169 xchgq %rax, %rsp
1170 pushq ISF64_SS(%rax)
1171 pushq ISF64_RSP(%rax)
1172 pushq ISF64_RFLAGS(%rax)
1173 pushq ISF64_CS(%rax)
1174 pushq ISF64_RIP(%rax)
1175 pushq ISF64_ERR(%rax)
1176 pushq ISF64_TRAPFN(%rax)
1177 pushq ISF64_TRAPNO(%rax)
1178 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1179 jmp L_enter_lohandler_continue
1180
1181
1182 /*
1183 * GP/NP fault on IRET: CS or SS is in error.
1184 * All registers contain the user's values.
1185 *
1186 * on SP is
1187 * 0 ISF64_TRAPNO: trap code (NP or GP)
1188 * 8 ISF64_TRAPFN: trap function
1189 * 16 ISF64_ERR: segment number in error (error code)
1190 * 24 ISF64_RIP: rip
1191 * 32 ISF64_CS: cs
1192 * 40 ISF64_RFLAGS: rflags
1193 * 48 ISF64_RSP: rsp
1194 * 56 ISF64_SS: ss --> new new trapno/trapfn
1195 * 64 pad --> new errcode
1196 * 72 user rip
1197 * 80 user cs
1198 * 88 user rflags
1199 * 96 user rsp
1200 * 104 user ss (16-byte aligned)
1201 */
1202 L_fault_iret32:
1203 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1204 mov ISF64_TRAPNO(%rsp), %rax
1205 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1206 mov ISF64_ERR(%rsp), %rax
1207 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1208 mov ISF64_RIP(%rsp), %rax /* restore rax */
1209 add $(ISF64_SS), %rsp /* reset to original frame */
1210 /* now treat as fault from user */
1211 swapgs
1212 jmp L_32bit_enter
1213
1214 L_fault_iret64:
1215 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1216 mov ISF64_TRAPNO(%rsp), %rax
1217 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1218 mov ISF64_ERR(%rsp), %rax
1219 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1220 mov ISF64_RIP(%rsp), %rax /* restore rax */
1221 add $(ISF64_SS), %rsp /* reset to original frame */
1222 /* now treat as fault from user */
1223 swapgs
1224 jmp L_64bit_enter
1225
1226 /*
1227 * Fault restoring a segment register. All of the saved state is still
1228 * on the stack untouched since we didn't move the stack pointer.
1229 */
1230 L_32bit_fault_set_seg:
1231 mov ISF64_TRAPNO(%rsp), %rax
1232 mov ISF64_ERR(%rsp), %rdx
1233 mov ISF64_RSP(%rsp), %rsp /* reload stack prior to fault */
1234 mov %rax,ISC32_TRAPNO(%rsp)
1235 mov %rdx,ISC32_ERR(%rsp)
1236 /* now treat as fault from user */
1237 /* except that all the state is */
1238 /* already saved - we just have to */
1239 /* move the trapno and error into */
1240 /* the compatibility frame */
1241 swapgs
1242 jmp L_32bit_enter_after_fault
1243
1244
1245 /*
1246 * Fatal exception handlers:
1247 */
1248 Entry(db_task_dbl_fault64)
1249 push $(LO_DOUBLE_FAULT)
1250 push $(T_DOUBLE_FAULT)
1251 jmp L_enter_lohandler
1252
1253 Entry(db_task_stk_fault64)
1254 push $(LO_DOUBLE_FAULT)
1255 push $(T_STACK_FAULT)
1256 jmp L_enter_lohandler
1257
1258 Entry(mc64)
1259 push $(0) /* Error */
1260 push $(LO_MACHINE_CHECK)
1261 push $(T_MACHINE_CHECK)
1262 jmp L_enter_lohandler
1263
1264
1265 .code32
1266
1267 /*
1268 * All task 'exceptions' enter lo_alltraps:
1269 * esp -> x86_saved_state_t
1270 *
1271 * The rest of the state is set up as:
1272 * cr3 -> kernel directory
1273 * esp -> low based stack
1274 * gs -> CPU_DATA_GS
1275 * cs -> KERNEL32_CS
1276 * ss/ds/es -> KERNEL_DS
1277 *
1278 * interrupts disabled
1279 * direction flag cleared
1280 */
1281 Entry(lo_alltraps)
1282 movl R32_CS(%esp),%eax /* assume 32-bit state */
1283 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1284 jne 1f
1285 movl R64_CS(%esp),%eax /* 64-bit user mode */
1286 1:
1287 testb $3,%al
1288 jz trap_from_kernel
1289 /* user mode trap */
1290 TIME_TRAP_UENTRY
1291
1292 movl %gs:CPU_ACTIVE_THREAD,%ecx
1293 movl TH_TASK(%ecx),%ebx
1294
1295 /* Check for active vtimers in the current task */
1296 TASK_VTIMER_CHECK(%ebx, %ecx)
1297
1298 movl %gs:CPU_KERNEL_STACK,%ebx
1299 xchgl %ebx,%esp /* switch to kernel stack */
1300
1301 CCALL1(user_trap, %ebx) /* call user trap routine */
1302 /* user_trap() unmasks interrupts */
1303 cli /* hold off intrs - critical section */
1304 xorl %ecx,%ecx /* don't check if we're in the PFZ */
1305
1306 /*
1307 * Return from trap or system call, checking for ASTs.
1308 * On lowbase PCB stack with intrs disabled
1309 */
1310 Entry(return_from_trap)
1311 movl %gs:CPU_ACTIVE_THREAD, %esp
1312 movl TH_PCB_ISS(%esp),%esp /* switch back to PCB stack */
1313 movl %gs:CPU_PENDING_AST, %eax
1314 testl %eax, %eax
1315 je return_to_user /* branch if no AST */
1316 LEXT(return_from_trap_with_ast)
1317 movl %gs:CPU_KERNEL_STACK, %ebx
1318 xchgl %ebx, %esp /* switch to kernel stack */
1319
1320 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
1321 je 2f /* no, go handle the AST */
1322 cmpl $(SS_64), SS_FLAVOR(%ebx) /* are we a 64-bit task? */
1323 je 1f
1324 /* no... 32-bit user mode */
1325 movl R32_EIP(%ebx), %eax
1326 pushl %ebx /* save PCB stack */
1327 xorl %ebp, %ebp /* clear frame pointer */
1328 CCALL1(commpage_is_in_pfz32, %eax)
1329 popl %ebx /* retrieve pointer to PCB stack */
1330 testl %eax, %eax
1331 je 2f /* not in the PFZ... go service AST */
1332 movl %eax, R32_EBX(%ebx) /* let the PFZ know we've pended an AST */
1333 xchgl %ebx, %esp /* switch back to PCB stack */
1334 jmp return_to_user
1335 1: /* 64-bit user mode */
1336 movl R64_RIP(%ebx), %ecx
1337 movl R64_RIP+4(%ebx), %eax
1338 pushl %ebx /* save PCB stack */
1339 xorl %ebp, %ebp /* clear frame pointer */
1340 CCALL2(commpage_is_in_pfz64, %ecx, %eax)
1341 popl %ebx /* retrieve pointer to PCB stack */
1342 testl %eax, %eax
1343 je 2f /* not in the PFZ... go service AST */
1344 movl %eax, R64_RBX(%ebx) /* let the PFZ know we've pended an AST */
1345 xchgl %ebx, %esp /* switch back to PCB stack */
1346 jmp return_to_user
1347 2:
1348 sti /* interrupts always enabled on return to user mode */
1349 pushl %ebx /* save PCB stack */
1350 xorl %ebp, %ebp /* Clear framepointer */
1351 CCALL1(i386_astintr, $0) /* take the AST */
1352 cli
1353
1354 popl %esp /* switch back to PCB stack (w/exc link) */
1355
1356 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1357 jmp EXT(return_from_trap) /* and check again (rare) */
1358
1359
1360
1361 /*
1362 * Trap from kernel mode. No need to switch stacks.
1363 * Interrupts must be off here - we will set them to state at time of trap
1364 * as soon as it's safe for us to do so and not recurse doing preemption
1365 */
1366 trap_from_kernel:
1367 movl %esp, %eax /* saved state addr */
1368 pushl R32_EIP(%esp) /* Simulate a CALL from fault point */
1369 pushl %ebp /* Extend framepointer chain */
1370 movl %esp, %ebp
1371 CCALL1WITHSP(kernel_trap, %eax) /* Call kernel trap handler */
1372 popl %ebp
1373 addl $4, %esp
1374 cli
1375
1376 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
1377 testl $ AST_URGENT,%eax /* any urgent preemption? */
1378 je ret_to_kernel /* no, nothing to do */
1379 cmpl $ T_PREEMPT,R32_TRAPNO(%esp)
1380 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
1381 testl $ EFL_IF,R32_EFLAGS(%esp) /* interrupts disabled? */
1382 je ret_to_kernel
1383 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1384 jne ret_to_kernel
1385 movl %gs:CPU_KERNEL_STACK,%eax
1386 movl %esp,%ecx
1387 xorl %eax,%ecx
1388 and EXT(kernel_stack_mask),%ecx
1389 testl %ecx,%ecx /* are we on the kernel stack? */
1390 jne ret_to_kernel /* no, skip it */
1391
1392 CCALL1(i386_astintr, $1) /* take the AST */
1393
1394
1395 /*
1396 * All interrupts on all tasks enter here with:
1397 * esp-> -> x86_saved_state_t
1398 *
1399 * cr3 -> kernel directory
1400 * esp -> low based stack
1401 * gs -> CPU_DATA_GS
1402 * cs -> KERNEL32_CS
1403 * ss/ds/es -> KERNEL_DS
1404 *
1405 * interrupts disabled
1406 * direction flag cleared
1407 */
1408 Entry(lo_allintrs)
1409 /*
1410 * test whether already on interrupt stack
1411 */
1412 movl %gs:CPU_INT_STACK_TOP,%ecx
1413 cmpl %esp,%ecx
1414 jb 1f
1415 leal -INTSTACK_SIZE(%ecx),%edx
1416 cmpl %esp,%edx
1417 jb int_from_intstack
1418 1:
1419 xchgl %ecx,%esp /* switch to interrupt stack */
1420
1421 movl %cr0,%eax /* get cr0 */
1422 orl $(CR0_TS),%eax /* or in TS bit */
1423 movl %eax,%cr0 /* set cr0 */
1424
1425 subl $8, %esp /* for 16-byte stack alignment */
1426 pushl %ecx /* save pointer to old stack */
1427 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
1428
1429 TIME_INT_ENTRY /* do timing */
1430
1431 movl %gs:CPU_ACTIVE_THREAD,%ecx
1432 movl TH_TASK(%ecx),%ebx
1433
1434 /* Check for active vtimers in the current task */
1435 TASK_VTIMER_CHECK(%ebx, %ecx)
1436
1437 incl %gs:CPU_PREEMPTION_LEVEL
1438 incl %gs:CPU_INTERRUPT_LEVEL
1439
1440 movl %gs:CPU_INT_STATE, %eax
1441 CCALL1(interrupt, %eax) /* call generic interrupt routine */
1442
1443 cli /* just in case we returned with intrs enabled */
1444 xorl %eax,%eax
1445 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1446
1447 decl %gs:CPU_INTERRUPT_LEVEL
1448 decl %gs:CPU_PREEMPTION_LEVEL
1449
1450 TIME_INT_EXIT /* do timing */
1451
1452 movl %gs:CPU_ACTIVE_THREAD,%eax
1453 movl TH_PCB_FPS(%eax),%eax /* get pcb's ifps */
1454 testl %eax, %eax /* Is there a context */
1455 je 1f /* Branch if not */
1456 cmpl $0, FP_VALID(%eax) /* Check fp_valid */
1457 jne 1f /* Branch if valid */
1458 clts /* Clear TS */
1459 jmp 2f
1460 1:
1461 movl %cr0,%eax /* get cr0 */
1462 orl $(CR0_TS),%eax /* or in TS bit */
1463 movl %eax,%cr0 /* set cr0 */
1464 2:
1465 popl %esp /* switch back to old stack */
1466
1467 /* Load interrupted code segment into %eax */
1468 movl R32_CS(%esp),%eax /* assume 32-bit state */
1469 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1470 jne 3f
1471 movl R64_CS(%esp),%eax /* 64-bit user mode */
1472 3:
1473 testb $3,%al /* user mode, */
1474 jnz ast_from_interrupt_user /* go handle potential ASTs */
1475 /*
1476 * we only want to handle preemption requests if
1477 * the interrupt fell in the kernel context
1478 * and preemption isn't disabled
1479 */
1480 movl %gs:CPU_PENDING_AST,%eax
1481 testl $ AST_URGENT,%eax /* any urgent requests? */
1482 je ret_to_kernel /* no, nothing to do */
1483
1484 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1485 jne ret_to_kernel /* yes, skip it */
1486
1487 movl %gs:CPU_KERNEL_STACK,%eax
1488 movl %esp,%ecx
1489 xorl %eax,%ecx
1490 and EXT(kernel_stack_mask),%ecx
1491 testl %ecx,%ecx /* are we on the kernel stack? */
1492 jne ret_to_kernel /* no, skip it */
1493
1494 /*
1495 * Take an AST from kernel space. We don't need (and don't want)
1496 * to do as much as the case where the interrupt came from user
1497 * space.
1498 */
1499 CCALL1(i386_astintr, $1)
1500
1501 jmp ret_to_kernel
1502
1503
1504 /*
1505 * nested int - simple path, can't preempt etc on way out
1506 */
1507 int_from_intstack:
1508 incl %gs:CPU_PREEMPTION_LEVEL
1509 incl %gs:CPU_INTERRUPT_LEVEL
1510 incl %gs:CPU_NESTED_ISTACK
1511
1512 movl %esp, %edx /* x86_saved_state */
1513 CCALL1(interrupt, %edx)
1514
1515 decl %gs:CPU_INTERRUPT_LEVEL
1516 decl %gs:CPU_PREEMPTION_LEVEL
1517 decl %gs:CPU_NESTED_ISTACK
1518
1519 jmp ret_to_kernel
1520
1521 /*
1522 * Take an AST from an interrupted user
1523 */
1524 ast_from_interrupt_user:
1525 movl %gs:CPU_PENDING_AST,%eax
1526 testl %eax,%eax /* pending ASTs? */
1527 je ret_to_user /* no, nothing to do */
1528
1529 TIME_TRAP_UENTRY
1530
1531 movl $1, %ecx /* check if we're in the PFZ */
1532 jmp EXT(return_from_trap_with_ast) /* return */
1533
1534
1535 /*
1536 * 32bit Tasks
1537 * System call entries via INTR_GATE or sysenter:
1538 *
1539 * esp -> x86_saved_state32_t
1540 * cr3 -> kernel directory
1541 * esp -> low based stack
1542 * gs -> CPU_DATA_GS
1543 * cs -> KERNEL32_CS
1544 * ss/ds/es -> KERNEL_DS
1545 *
1546 * interrupts disabled
1547 * direction flag cleared
1548 */
1549
1550 Entry(lo_unix_scall)
1551 TIME_TRAP_UENTRY
1552
1553 movl %gs:CPU_KERNEL_STACK,%edi
1554 xchgl %edi,%esp /* switch to kernel stack */
1555 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1556 movl TH_TASK(%ecx),%ebx /* point to current task */
1557 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1558
1559 /* Check for active vtimers in the current task */
1560 TASK_VTIMER_CHECK(%ebx, %ecx)
1561
1562 sti
1563
1564 CCALL1(unix_syscall, %edi)
1565 /*
1566 * always returns through thread_exception_return
1567 */
1568
1569
1570 Entry(lo_mach_scall)
1571 TIME_TRAP_UENTRY
1572
1573 movl %gs:CPU_KERNEL_STACK,%edi
1574 xchgl %edi,%esp /* switch to kernel stack */
1575 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1576 movl TH_TASK(%ecx),%ebx /* point to current task */
1577 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1578
1579 /* Check for active vtimers in the current task */
1580 TASK_VTIMER_CHECK(%ebx, %ecx)
1581
1582 sti
1583
1584 CCALL1(mach_call_munger, %edi)
1585 /*
1586 * always returns through thread_exception_return
1587 */
1588
1589
1590 Entry(lo_mdep_scall)
1591 TIME_TRAP_UENTRY
1592
1593 movl %gs:CPU_KERNEL_STACK,%edi
1594 xchgl %edi,%esp /* switch to kernel stack */
1595 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1596 movl TH_TASK(%ecx),%ebx /* point to current task */
1597
1598 /* Check for active vtimers in the current task */
1599 TASK_VTIMER_CHECK(%ebx, %ecx)
1600
1601 sti
1602
1603 CCALL1(machdep_syscall, %edi)
1604 /*
1605 * always returns through thread_exception_return
1606 */
1607
1608
1609 Entry(lo_diag_scall)
1610 TIME_TRAP_UENTRY
1611
1612 movl %gs:CPU_KERNEL_STACK,%edi
1613 xchgl %edi,%esp /* switch to kernel stack */
1614 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1615 movl TH_TASK(%ecx),%ebx /* point to current task */
1616
1617 /* Check for active vtimers in the current task */
1618 TASK_VTIMER_CHECK(%ebx, %ecx)
1619
1620 pushl %edi /* push pbc stack for later */
1621
1622 CCALL1(diagCall, %edi) // Call diagnostics
1623
1624 cli // Disable interruptions just in case
1625 cmpl $0,%eax // What kind of return is this?
1626 je 1f // - branch if bad (zero)
1627 popl %esp // Get back the original stack
1628 jmp return_to_user // Normal return, do not check asts...
1629 1:
1630 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1631 // pass what would be the diag syscall
1632 // error return - cause an exception
1633 /* no return */
1634
1635
1636 return_to_user:
1637 TIME_TRAP_UEXIT
1638 jmp ret_to_user
1639
1640
1641 /*
1642 * 64bit Tasks
1643 * System call entries via syscall only:
1644 *
1645 * esp -> x86_saved_state64_t
1646 * cr3 -> kernel directory
1647 * esp -> low based stack
1648 * gs -> CPU_DATA_GS
1649 * cs -> KERNEL32_CS
1650 * ss/ds/es -> KERNEL_DS
1651 *
1652 * interrupts disabled
1653 * direction flag cleared
1654 */
1655
1656 Entry(lo_syscall)
1657 TIME_TRAP_UENTRY
1658
1659 movl %gs:CPU_KERNEL_STACK,%edi
1660 xchgl %edi,%esp /* switch to kernel stack */
1661
1662 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1663 movl TH_TASK(%ecx),%ebx /* point to current task */
1664
1665 /* Check for active vtimers in the current task */
1666 TASK_VTIMER_CHECK(%ebx, %ecx)
1667
1668 /*
1669 * We can be here either for a mach, unix machdep or diag syscall,
1670 * as indicated by the syscall class:
1671 */
1672 movl R64_RAX(%edi), %eax /* syscall number/class */
1673 movl %eax, %edx
1674 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1675 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1676 je EXT(lo64_mach_scall)
1677 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1678 je EXT(lo64_unix_scall)
1679 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1680 je EXT(lo64_mdep_scall)
1681 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1682 je EXT(lo64_diag_scall)
1683
1684 sti
1685
1686 /* Syscall class unknown */
1687 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
1688 /* no return */
1689
1690
1691 Entry(lo64_unix_scall)
1692 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1693 sti
1694
1695 CCALL1(unix_syscall64, %edi)
1696 /*
1697 * always returns through thread_exception_return
1698 */
1699
1700
1701 Entry(lo64_mach_scall)
1702 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1703 sti
1704
1705 CCALL1(mach_call_munger64, %edi)
1706 /*
1707 * always returns through thread_exception_return
1708 */
1709
1710
1711
1712 Entry(lo64_mdep_scall)
1713 sti
1714
1715 CCALL1(machdep_syscall64, %edi)
1716 /*
1717 * always returns through thread_exception_return
1718 */
1719
1720
1721 Entry(lo64_diag_scall)
1722 CCALL1(diagCall64, %edi) // Call diagnostics
1723
1724 cli // Disable interruptions just in case
1725 cmpl $0,%eax // What kind of return is this?
1726 je 1f
1727 movl %edi, %esp // Get back the original stack
1728 jmp return_to_user // Normal return, do not check asts...
1729 1:
1730 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1731 // pass what would be the diag syscall
1732 // error return - cause an exception
1733 /* no return */
1734
1735
1736
1737 /*
1738 * Compatibility mode's last gasp...
1739 */
1740 Entry(lo_df64)
1741 movl %esp, %eax
1742 CCALL1(panic_double_fault64, %eax)
1743 hlt
1744
1745 Entry(lo_mc64)
1746 movl %esp, %eax
1747 CCALL1(panic_machine_check64, %eax)
1748 hlt