]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/idt64.s
3ae022e9e747a3adbaac5a3b5938eead7e35f360
[apple/xnu.git] / osfmk / i386 / idt64.s
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 #include <i386/asm.h>
31 #include <i386/asm64.h>
32 #include <assym.s>
33 #include <mach_kdb.h>
34 #include <i386/eflags.h>
35 #include <i386/trap.h>
36 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
37 #include <mach/i386/syscall_sw.h>
38 #include <i386/postcode.h>
39 #include <i386/proc_reg.h>
40
41 /*
42 * Locore handlers.
43 */
44 #define LO_ALLINTRS EXT(lo_allintrs)
45 #define LO_ALLTRAPS EXT(lo_alltraps)
46 #define LO_SYSENTER EXT(lo_sysenter)
47 #define LO_SYSCALL EXT(lo_syscall)
48 #define LO_UNIX_SCALL EXT(lo_unix_scall)
49 #define LO_MACH_SCALL EXT(lo_mach_scall)
50 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
51 #define LO_DIAG_SCALL EXT(lo_diag_scall)
52 #define LO_DOUBLE_FAULT EXT(lo_df64)
53 #define LO_MACHINE_CHECK EXT(lo_mc64)
54
55 /*
56 * Interrupt descriptor table and code vectors for it.
57 *
58 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
59 * reformatted ("fixed") before use.
60 * All vector are rebased in uber-space.
61 * Special vectors (e.g. double-fault) use a non-0 IST.
62 */
63 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
64 .data ;\
65 .long vec ;\
66 .long KERNEL_UBER_BASE_HI32 ;\
67 .word seg ;\
68 .byte ist*16 ;\
69 .byte type ;\
70 .long 0 ;\
71 .text
72
73 #define IDT64_ENTRY(vec,ist,type) \
74 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
75 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
76 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
77
78 /*
79 * Push trap number and address of compatibility mode handler,
80 * then branch to common trampoline. Error already pushed.
81 */
82 #define EXCEP64_ERR(n,name) \
83 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
84 Entry(name) ;\
85 push $(n) ;\
86 movl $(LO_ALLTRAPS), 4(%rsp) ;\
87 jmp L_enter_lohandler
88
89
90 /*
91 * Push error(0), trap number and address of compatibility mode handler,
92 * then branch to common trampoline.
93 */
94 #define EXCEPTION64(n,name) \
95 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
96 Entry(name) ;\
97 push $0 ;\
98 push $(n) ;\
99 movl $(LO_ALLTRAPS), 4(%rsp) ;\
100 jmp L_enter_lohandler
101
102
103 /*
104 * Interrupt from user.
105 * Push error (0), trap number and address of compatibility mode handler,
106 * then branch to common trampoline.
107 */
108 #define EXCEP64_USR(n,name) \
109 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
110 Entry(name) ;\
111 push $0 ;\
112 push $(n) ;\
113 movl $(LO_ALLTRAPS), 4(%rsp) ;\
114 jmp L_enter_lohandler
115
116
117 /*
118 * Special interrupt code from user.
119 */
120 #define EXCEP64_SPC_USR(n,name) \
121 IDT64_ENTRY(name,0,U_INTR_GATE)
122
123
124 /*
125 * Special interrupt code.
126 * In 64-bit mode we may use an IST slot instead of task gates.
127 */
128 #define EXCEP64_IST(n,name,ist) \
129 IDT64_ENTRY(name,ist,K_INTR_GATE)
130 #define EXCEP64_SPC(n,name) \
131 IDT64_ENTRY(name,0,K_INTR_GATE)
132
133
134 /*
135 * Interrupt.
136 * Push zero err, interrupt vector and address of compatibility mode handler,
137 * then branch to common trampoline.
138 */
139 #define INTERRUPT64(n) \
140 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
141 .align FALIGN ;\
142 L_ ## n: ;\
143 push $0 ;\
144 push $(n) ;\
145 movl $(LO_ALLINTRS), 4(%rsp) ;\
146 jmp L_enter_lohandler
147
148
149 .data
150 .align 12
151 Entry(master_idt64)
152 Entry(hi64_data_base)
153 .text
154 .code64
155 Entry(hi64_text_base)
156
157 EXCEPTION64(0x00,t64_zero_div)
158 EXCEP64_SPC(0x01,hi64_debug)
159 INTERRUPT64(0x02) /* NMI */
160 EXCEP64_USR(0x03,t64_int3)
161 EXCEP64_USR(0x04,t64_into)
162 EXCEP64_USR(0x05,t64_bounds)
163 EXCEPTION64(0x06,t64_invop)
164 EXCEPTION64(0x07,t64_nofpu)
165 #if MACH_KDB
166 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
167 #else
168 EXCEP64_IST(0x08,hi64_double_fault,1)
169 #endif
170 EXCEPTION64(0x09,a64_fpu_over)
171 EXCEPTION64(0x0a,a64_inv_tss)
172 EXCEP64_SPC(0x0b,hi64_segnp)
173 #if MACH_KDB
174 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
175 #else
176 EXCEP64_IST(0x0c,hi64_stack_fault,1)
177 #endif
178 EXCEP64_SPC(0x0d,hi64_gen_prot)
179 EXCEP64_ERR(0x0e,t64_page_fault)
180 EXCEPTION64(0x0f,t64_trap_0f)
181 EXCEPTION64(0x10,t64_fpu_err)
182 EXCEPTION64(0x11,t64_trap_11)
183 EXCEP64_IST(0x12,mc64,1)
184 EXCEPTION64(0x13,t64_sse_err)
185 EXCEPTION64(0x14,t64_trap_14)
186 EXCEPTION64(0x15,t64_trap_15)
187 EXCEPTION64(0x16,t64_trap_16)
188 EXCEPTION64(0x17,t64_trap_17)
189 EXCEPTION64(0x18,t64_trap_18)
190 EXCEPTION64(0x19,t64_trap_19)
191 EXCEPTION64(0x1a,t64_trap_1a)
192 EXCEPTION64(0x1b,t64_trap_1b)
193 EXCEPTION64(0x1c,t64_trap_1c)
194 EXCEPTION64(0x1d,t64_trap_1d)
195 EXCEPTION64(0x1e,t64_trap_1e)
196 EXCEPTION64(0x1f,t64_trap_1f)
197
198 INTERRUPT64(0x20)
199 INTERRUPT64(0x21)
200 INTERRUPT64(0x22)
201 INTERRUPT64(0x23)
202 INTERRUPT64(0x24)
203 INTERRUPT64(0x25)
204 INTERRUPT64(0x26)
205 INTERRUPT64(0x27)
206 INTERRUPT64(0x28)
207 INTERRUPT64(0x29)
208 INTERRUPT64(0x2a)
209 INTERRUPT64(0x2b)
210 INTERRUPT64(0x2c)
211 INTERRUPT64(0x2d)
212 INTERRUPT64(0x2e)
213 INTERRUPT64(0x2f)
214
215 INTERRUPT64(0x30)
216 INTERRUPT64(0x31)
217 INTERRUPT64(0x32)
218 INTERRUPT64(0x33)
219 INTERRUPT64(0x34)
220 INTERRUPT64(0x35)
221 INTERRUPT64(0x36)
222 INTERRUPT64(0x37)
223 INTERRUPT64(0x38)
224 INTERRUPT64(0x39)
225 INTERRUPT64(0x3a)
226 INTERRUPT64(0x3b)
227 INTERRUPT64(0x3c)
228 INTERRUPT64(0x3d)
229 INTERRUPT64(0x3e)
230 INTERRUPT64(0x3f)
231
232 INTERRUPT64(0x40)
233 INTERRUPT64(0x41)
234 INTERRUPT64(0x42)
235 INTERRUPT64(0x43)
236 INTERRUPT64(0x44)
237 INTERRUPT64(0x45)
238 INTERRUPT64(0x46)
239 INTERRUPT64(0x47)
240 INTERRUPT64(0x48)
241 INTERRUPT64(0x49)
242 INTERRUPT64(0x4a)
243 INTERRUPT64(0x4b)
244 INTERRUPT64(0x4c)
245 INTERRUPT64(0x4d)
246 INTERRUPT64(0x4e)
247 INTERRUPT64(0x4f)
248
249 INTERRUPT64(0x50)
250 INTERRUPT64(0x51)
251 INTERRUPT64(0x52)
252 INTERRUPT64(0x53)
253 INTERRUPT64(0x54)
254 INTERRUPT64(0x55)
255 INTERRUPT64(0x56)
256 INTERRUPT64(0x57)
257 INTERRUPT64(0x58)
258 INTERRUPT64(0x59)
259 INTERRUPT64(0x5a)
260 INTERRUPT64(0x5b)
261 INTERRUPT64(0x5c)
262 INTERRUPT64(0x5d)
263 INTERRUPT64(0x5e)
264 INTERRUPT64(0x5f)
265
266 INTERRUPT64(0x60)
267 INTERRUPT64(0x61)
268 INTERRUPT64(0x62)
269 INTERRUPT64(0x63)
270 INTERRUPT64(0x64)
271 INTERRUPT64(0x65)
272 INTERRUPT64(0x66)
273 INTERRUPT64(0x67)
274 INTERRUPT64(0x68)
275 INTERRUPT64(0x69)
276 INTERRUPT64(0x6a)
277 INTERRUPT64(0x6b)
278 INTERRUPT64(0x6c)
279 INTERRUPT64(0x6d)
280 INTERRUPT64(0x6e)
281 INTERRUPT64(0x6f)
282
283 INTERRUPT64(0x70)
284 INTERRUPT64(0x71)
285 INTERRUPT64(0x72)
286 INTERRUPT64(0x73)
287 INTERRUPT64(0x74)
288 INTERRUPT64(0x75)
289 INTERRUPT64(0x76)
290 INTERRUPT64(0x77)
291 INTERRUPT64(0x78)
292 INTERRUPT64(0x79)
293 INTERRUPT64(0x7a)
294 INTERRUPT64(0x7b)
295 INTERRUPT64(0x7c)
296 INTERRUPT64(0x7d)
297 INTERRUPT64(0x7e)
298 INTERRUPT64(0x7f)
299
300 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
301 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
302 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
303 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
304
305 INTERRUPT64(0x84)
306 INTERRUPT64(0x85)
307 INTERRUPT64(0x86)
308 INTERRUPT64(0x87)
309 INTERRUPT64(0x88)
310 INTERRUPT64(0x89)
311 INTERRUPT64(0x8a)
312 INTERRUPT64(0x8b)
313 INTERRUPT64(0x8c)
314 INTERRUPT64(0x8d)
315 INTERRUPT64(0x8e)
316 INTERRUPT64(0x8f)
317
318 INTERRUPT64(0x90)
319 INTERRUPT64(0x91)
320 INTERRUPT64(0x92)
321 INTERRUPT64(0x93)
322 INTERRUPT64(0x94)
323 INTERRUPT64(0x95)
324 INTERRUPT64(0x96)
325 INTERRUPT64(0x97)
326 INTERRUPT64(0x98)
327 INTERRUPT64(0x99)
328 INTERRUPT64(0x9a)
329 INTERRUPT64(0x9b)
330 INTERRUPT64(0x9c)
331 INTERRUPT64(0x9d)
332 INTERRUPT64(0x9e)
333 INTERRUPT64(0x9f)
334
335 INTERRUPT64(0xa0)
336 INTERRUPT64(0xa1)
337 INTERRUPT64(0xa2)
338 INTERRUPT64(0xa3)
339 INTERRUPT64(0xa4)
340 INTERRUPT64(0xa5)
341 INTERRUPT64(0xa6)
342 INTERRUPT64(0xa7)
343 INTERRUPT64(0xa8)
344 INTERRUPT64(0xa9)
345 INTERRUPT64(0xaa)
346 INTERRUPT64(0xab)
347 INTERRUPT64(0xac)
348 INTERRUPT64(0xad)
349 INTERRUPT64(0xae)
350 INTERRUPT64(0xaf)
351
352 INTERRUPT64(0xb0)
353 INTERRUPT64(0xb1)
354 INTERRUPT64(0xb2)
355 INTERRUPT64(0xb3)
356 INTERRUPT64(0xb4)
357 INTERRUPT64(0xb5)
358 INTERRUPT64(0xb6)
359 INTERRUPT64(0xb7)
360 INTERRUPT64(0xb8)
361 INTERRUPT64(0xb9)
362 INTERRUPT64(0xba)
363 INTERRUPT64(0xbb)
364 INTERRUPT64(0xbc)
365 INTERRUPT64(0xbd)
366 INTERRUPT64(0xbe)
367 INTERRUPT64(0xbf)
368
369 INTERRUPT64(0xc0)
370 INTERRUPT64(0xc1)
371 INTERRUPT64(0xc2)
372 INTERRUPT64(0xc3)
373 INTERRUPT64(0xc4)
374 INTERRUPT64(0xc5)
375 INTERRUPT64(0xc6)
376 INTERRUPT64(0xc7)
377 INTERRUPT64(0xc8)
378 INTERRUPT64(0xc9)
379 INTERRUPT64(0xca)
380 INTERRUPT64(0xcb)
381 INTERRUPT64(0xcc)
382 INTERRUPT64(0xcd)
383 INTERRUPT64(0xce)
384 INTERRUPT64(0xcf)
385
386 INTERRUPT64(0xd0)
387 INTERRUPT64(0xd1)
388 INTERRUPT64(0xd2)
389 INTERRUPT64(0xd3)
390 INTERRUPT64(0xd4)
391 INTERRUPT64(0xd5)
392 INTERRUPT64(0xd6)
393 INTERRUPT64(0xd7)
394 INTERRUPT64(0xd8)
395 INTERRUPT64(0xd9)
396 INTERRUPT64(0xda)
397 INTERRUPT64(0xdb)
398 INTERRUPT64(0xdc)
399 INTERRUPT64(0xdd)
400 INTERRUPT64(0xde)
401 INTERRUPT64(0xdf)
402
403 INTERRUPT64(0xe0)
404 INTERRUPT64(0xe1)
405 INTERRUPT64(0xe2)
406 INTERRUPT64(0xe3)
407 INTERRUPT64(0xe4)
408 INTERRUPT64(0xe5)
409 INTERRUPT64(0xe6)
410 INTERRUPT64(0xe7)
411 INTERRUPT64(0xe8)
412 INTERRUPT64(0xe9)
413 INTERRUPT64(0xea)
414 INTERRUPT64(0xeb)
415 INTERRUPT64(0xec)
416 INTERRUPT64(0xed)
417 INTERRUPT64(0xee)
418 INTERRUPT64(0xef)
419
420 INTERRUPT64(0xf0)
421 INTERRUPT64(0xf1)
422 INTERRUPT64(0xf2)
423 INTERRUPT64(0xf3)
424 INTERRUPT64(0xf4)
425 INTERRUPT64(0xf5)
426 INTERRUPT64(0xf6)
427 INTERRUPT64(0xf7)
428 INTERRUPT64(0xf8)
429 INTERRUPT64(0xf9)
430 INTERRUPT64(0xfa)
431 INTERRUPT64(0xfb)
432 INTERRUPT64(0xfc)
433 INTERRUPT64(0xfd)
434 INTERRUPT64(0xfe)
435 EXCEPTION64(0xff,t64_preempt)
436
437
438 .text
439 /*
440 *
441 * Trap/interrupt entry points.
442 *
443 * All traps must create the following 32-bit save area on the PCB "stack"
444 * - this is identical to the legacy mode 32-bit case:
445 *
446 * gs
447 * fs
448 * es
449 * ds
450 * edi
451 * esi
452 * ebp
453 * cr2 (defined only for page fault)
454 * ebx
455 * edx
456 * ecx
457 * eax
458 * trap number
459 * error code
460 * eip
461 * cs
462 * eflags
463 * user esp - if from user
464 * user ss - if from user
465 *
466 * Above this is the trap number and compatibility mode handler address
467 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
468 *
469 * (trapno, trapfn)
470 * err
471 * rip
472 * cs
473 * rflags
474 * rsp
475 * ss
476 *
477 */
478
479 .code32
480 /*
481 * Control is passed here to return to the compatibility mode user.
482 * At this stage we're in kernel space in compatibility mode
483 * but we need to switch into 64-bit mode in the 4G-based trampoline
484 * space before performing the iret.
485 */
486 Entry(lo64_ret_to_user)
487 movl %gs:CPU_ACTIVE_THREAD,%ecx
488
489 movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
490 cmpl $0,%eax /* Is there a debug register context? */
491 je 2f /* branch if not */
492 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 64-bit task? */
493 jne 1f
494 movl DS_DR0(%eax), %ecx /* If not, load the 32 bit DRs */
495 movl %ecx, %db0
496 movl DS_DR1(%eax), %ecx
497 movl %ecx, %db1
498 movl DS_DR2(%eax), %ecx
499 movl %ecx, %db2
500 movl DS_DR3(%eax), %ecx
501 movl %ecx, %db3
502 movl DS_DR7(%eax), %ecx
503 movl %ecx, %gs:CPU_DR7
504 movl $0, %gs:CPU_DR7 + 4
505 jmp 2f
506 1:
507 ENTER_64BIT_MODE() /* Enter long mode */
508 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
509 mov %rcx, %dr0
510 mov DS64_DR1(%eax), %rcx
511 mov %rcx, %dr1
512 mov DS64_DR2(%eax), %rcx
513 mov %rcx, %dr2
514 mov DS64_DR3(%eax), %rcx
515 mov %rcx, %dr3
516 mov DS64_DR7(%eax), %rcx
517 mov %rcx, %gs:CPU_DR7
518 jmp 3f /* Enter uberspace */
519 2:
520 ENTER_64BIT_MODE()
521 3:
522 ENTER_UBERSPACE()
523
524 /*
525 * Now switch %cr3, if necessary.
526 */
527 swapgs /* switch back to uber-kernel gs base */
528 mov %gs:CPU_TASK_CR3,%rcx
529 mov %rcx,%gs:CPU_ACTIVE_CR3
530 mov %cr3, %rax
531 cmp %rcx, %rax
532 je 1f
533 /* flag the copyio engine state as WINDOWS_CLEAN */
534 mov %gs:CPU_ACTIVE_THREAD,%eax
535 movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
536 mov %rcx,%cr3 /* switch to user's address space */
537 1:
538
539 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
540 cmp $0, %rax
541 je 1f
542 mov %rax, %dr7 /* Set DR7 */
543 movq $0, %gs:CPU_DR7
544 1:
545
546 /*
547 * Adjust stack to use uber-space.
548 */
549 mov $(KERNEL_UBER_BASE_HI32), %rax
550 shl $32, %rsp
551 shrd $32, %rax, %rsp /* relocate into uber-space */
552
553 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
554 jne L_64bit_return
555 jmp L_32bit_return
556
557 Entry(lo64_ret_to_kernel)
558 ENTER_64BIT_MODE()
559 ENTER_UBERSPACE()
560
561 swapgs /* switch back to uber-kernel gs base */
562
563 /*
564 * Adjust stack to use uber-space.
565 */
566 mov $(KERNEL_UBER_BASE_HI32), %rax
567 shl $32, %rsp
568 shrd $32, %rax, %rsp /* relocate into uber-space */
569
570 /* Check for return to 64-bit kernel space (EFI today) */
571 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
572 jne L_64bit_return
573 /* fall through for 32-bit return */
574
575 L_32bit_return:
576 /*
577 * Restore registers into the machine state for iret.
578 */
579 movl R_EIP(%rsp), %eax
580 movl %eax, ISC32_RIP(%rsp)
581 movl R_EFLAGS(%rsp), %eax
582 movl %eax, ISC32_RFLAGS(%rsp)
583 movl R_CS(%rsp), %eax
584 movl %eax, ISC32_CS(%rsp)
585 movl R_UESP(%rsp), %eax
586 movl %eax, ISC32_RSP(%rsp)
587 movl R_SS(%rsp), %eax
588 movl %eax, ISC32_SS(%rsp)
589
590 /*
591 * Restore general 32-bit registers
592 */
593 movl R_EAX(%rsp), %eax
594 movl R_EBX(%rsp), %ebx
595 movl R_ECX(%rsp), %ecx
596 movl R_EDX(%rsp), %edx
597 movl R_EBP(%rsp), %ebp
598 movl R_ESI(%rsp), %esi
599 movl R_EDI(%rsp), %edi
600
601 /*
602 * Restore segment registers. We make take an exception here but
603 * we've got enough space left in the save frame area to absorb
604 * a hardware frame plus the trapfn and trapno
605 */
606 swapgs
607 EXT(ret32_set_ds):
608 movw R_DS(%rsp), %ds
609 EXT(ret32_set_es):
610 movw R_ES(%rsp), %es
611 EXT(ret32_set_fs):
612 movw R_FS(%rsp), %fs
613 EXT(ret32_set_gs):
614 movw R_GS(%rsp), %gs
615
616 add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame +
617 trapno/trapfn and error */
618 cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
619 /* test for fast entry/exit */
620 je L_fast_exit
621 EXT(ret32_iret):
622 iretq /* return from interrupt */
623
624 L_fast_exit:
625 pop %rdx /* user return eip */
626 pop %rcx /* pop and toss cs */
627 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
628 popf /* flags - carry denotes failure */
629 pop %rcx /* user return esp */
630 .code32
631 sti /* interrupts enabled after sysexit */
632 sysexit /* 32-bit sysexit */
633 .code64
634
635 L_64bit_return:
636 /*
637 * Set the GS Base MSR with the user's gs base.
638 */
639 movl %gs:CPU_UBER_USER_GS_BASE, %eax
640 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
641 movl $(MSR_IA32_GS_BASE), %ecx
642 swapgs
643 testb $3, R64_CS(%rsp) /* returning to user-space? */
644 jz 1f
645 wrmsr /* set 64-bit base */
646 1:
647
648 /*
649 * Restore general 64-bit registers
650 */
651 mov R64_R15(%rsp), %r15
652 mov R64_R14(%rsp), %r14
653 mov R64_R13(%rsp), %r13
654 mov R64_R12(%rsp), %r12
655 mov R64_R11(%rsp), %r11
656 mov R64_R10(%rsp), %r10
657 mov R64_R9(%rsp), %r9
658 mov R64_R8(%rsp), %r8
659 mov R64_RSI(%rsp), %rsi
660 mov R64_RDI(%rsp), %rdi
661 mov R64_RBP(%rsp), %rbp
662 mov R64_RDX(%rsp), %rdx
663 mov R64_RBX(%rsp), %rbx
664 mov R64_RCX(%rsp), %rcx
665 mov R64_RAX(%rsp), %rax
666
667 add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame +
668 trapno/trapfn and error */
669 cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
670 /* test for fast entry/exit */
671 je L_sysret
672 EXT(ret64_iret):
673 iretq /* return from interrupt */
674
675 L_sysret:
676 /*
677 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
678 * rcx user rip
679 * r1 user rflags
680 * rsp user stack pointer
681 */
682 mov ISF64_RIP-16(%rsp), %rcx
683 mov ISF64_RFLAGS-16(%rsp), %r11
684 mov ISF64_RSP-16(%rsp), %rsp
685 sysretq /* return from system call */
686
687 /*
688 * Common path to enter locore handlers.
689 */
690 L_enter_lohandler:
691 swapgs /* switch to kernel gs (cpu_data) */
692 L_enter_lohandler_continue:
693 cmpl $(USER64_CS), ISF64_CS(%rsp)
694 je L_64bit_enter /* this is a 64-bit user task */
695 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
696 je L_64bit_enter /* we're in 64-bit (EFI) code */
697 jmp L_32bit_enter
698
699 /*
700 * System call handlers.
701 * These are entered via a syscall interrupt. The system call number in %rax
702 * is saved to the error code slot in the stack frame. We then branch to the
703 * common state saving code.
704 */
705
706 Entry(hi64_unix_scall)
707 swapgs /* switch to kernel gs (cpu_data) */
708 L_unix_scall_continue:
709 push %rax /* save system call number */
710 push $(UNIX_INT)
711 movl $(LO_UNIX_SCALL), 4(%rsp)
712 jmp L_32bit_enter_check
713
714
715 Entry(hi64_mach_scall)
716 swapgs /* switch to kernel gs (cpu_data) */
717 L_mach_scall_continue:
718 push %rax /* save system call number */
719 push $(MACH_INT)
720 movl $(LO_MACH_SCALL), 4(%rsp)
721 jmp L_32bit_enter_check
722
723
724 Entry(hi64_mdep_scall)
725 swapgs /* switch to kernel gs (cpu_data) */
726 L_mdep_scall_continue:
727 push %rax /* save system call number */
728 push $(MACHDEP_INT)
729 movl $(LO_MDEP_SCALL), 4(%rsp)
730 jmp L_32bit_enter_check
731
732
733 Entry(hi64_diag_scall)
734 swapgs /* switch to kernel gs (cpu_data) */
735 L_diag_scall_continue:
736 push %rax /* save system call number */
737 push $(DIAG_INT)
738 movl $(LO_DIAG_SCALL), 4(%rsp)
739 jmp L_32bit_enter_check
740
741 Entry(hi64_syscall)
742 swapgs /* Kapow! get per-cpu data area */
743 L_syscall_continue:
744 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
745 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
746
747 /*
748 * Save values in the ISF frame in the PCB
749 * to cons up the saved machine state.
750 */
751 movl $(USER_DS), ISF64_SS(%rsp)
752 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
753 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
754 mov %rcx, ISF64_RIP(%rsp) /* rip */
755 mov %gs:CPU_UBER_TMP, %rcx
756 mov %rcx, ISF64_RSP(%rsp) /* user stack */
757 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
758 movl $(0), ISF64_TRAPNO(%rsp) /* trapno */
759 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
760 jmp L_64bit_enter /* this can only be a 64-bit task */
761
762 /*
763 * sysenter entry point
764 * Requires user code to set up:
765 * edx: user instruction pointer (return address)
766 * ecx: user stack pointer
767 * on which is pushed stub ret addr and saved ebx
768 * Return to user-space is made using sysexit.
769 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
770 * or requiring ecx to be preserved.
771 */
772 Entry(hi64_sysenter)
773 mov (%rsp), %rsp /* switch from temporary stack to pcb */
774 /*
775 * Push values on to the PCB stack
776 * to cons up the saved machine state.
777 */
778 push $(USER_DS) /* ss */
779 push %rcx /* uesp */
780 pushf /* flags */
781 /*
782 * Clear, among others, the Nested Task (NT) flags bit;
783 * This is cleared by INT, but not by sysenter, which only
784 * clears RF, VM and IF.
785 */
786 push $0
787 popf
788 push $(SYSENTER_CS) /* cs */
789 swapgs /* switch to kernel gs (cpu_data) */
790 L_sysenter_continue:
791 push %rdx /* eip */
792 push %rax /* err/eax - syscall code */
793 push $(0)
794 movl $(LO_SYSENTER), ISF64_TRAPFN(%rsp)
795 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
796
797 L_32bit_enter_check:
798 /*
799 * Check we're not a confused 64-bit user.
800 */
801 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
802 jne L_64bit_entry_reject
803 /* fall through to 32-bit handler: */
804
805 L_32bit_enter:
806 /*
807 * Make space for the compatibility save area.
808 */
809 sub $(ISC32_OFFSET), %rsp
810 movl $(SS_32), SS_FLAVOR(%rsp)
811
812 /*
813 * Save segment regs
814 */
815 mov %ds, R_DS(%rsp)
816 mov %es, R_ES(%rsp)
817 mov %fs, R_FS(%rsp)
818 mov %gs, R_GS(%rsp)
819
820 /*
821 * Save general 32-bit registers
822 */
823 mov %eax, R_EAX(%rsp)
824 mov %ebx, R_EBX(%rsp)
825 mov %ecx, R_ECX(%rsp)
826 mov %edx, R_EDX(%rsp)
827 mov %ebp, R_EBP(%rsp)
828 mov %esi, R_ESI(%rsp)
829 mov %edi, R_EDI(%rsp)
830
831 /* Unconditionally save cr2; only meaningful on page faults */
832 mov %cr2, %rax
833 mov %eax, R_CR2(%rsp)
834
835 /*
836 * Copy registers already saved in the machine state
837 * (in the interrupt stack frame) into the compat save area.
838 */
839 mov ISC32_RIP(%rsp), %eax
840 mov %eax, R_EIP(%rsp)
841 mov ISC32_RFLAGS(%rsp), %eax
842 mov %eax, R_EFLAGS(%rsp)
843 mov ISC32_CS(%rsp), %eax
844 mov %eax, R_CS(%rsp)
845 mov ISC32_RSP(%rsp), %eax
846 mov %eax, R_UESP(%rsp)
847 mov ISC32_SS(%rsp), %eax
848 mov %eax, R_SS(%rsp)
849 L_32bit_enter_after_fault:
850 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
851 mov %ebx, R_TRAPNO(%rsp)
852 mov ISC32_ERR(%rsp), %eax
853 mov %eax, R_ERR(%rsp)
854 mov ISC32_TRAPFN(%rsp), %edx
855
856 /*
857 * Common point to enter lo_handler in compatibilty mode:
858 * %ebx trapno
859 * %edx locore handler address
860 */
861 L_enter_lohandler2:
862 /*
863 * Switch address space to kernel
864 * if not shared space and not already mapped.
865 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
866 */
867 mov %cr3, %rax
868 mov %gs:CPU_TASK_CR3, %rcx
869 cmp %rax, %rcx /* is the task's cr3 loaded? */
870 jne 1f
871 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
872 je 2f
873 1:
874 mov %gs:CPU_KERNEL_CR3, %rcx
875 cmp %rax, %rcx
876 je 2f
877 mov %rcx, %cr3
878 mov %rcx, %gs:CPU_ACTIVE_CR3
879 2:
880 /*
881 * Switch to compatibility mode.
882 * Then establish kernel segments.
883 */
884 swapgs /* Done with uber-kernel gs */
885 ENTER_COMPAT_MODE()
886
887 /*
888 * Now in compatibility mode and running in compatibility space
889 * prepare to enter the locore handler.
890 * %ebx trapno
891 * %edx lo_handler pointer
892 * Note: the stack pointer (now 32-bit) is now directly addressing the
893 * the kernel below 4G and therefore is automagically re-based.
894 */
895 mov $(KERNEL_DS), %eax
896 mov %eax, %ss
897 mov %eax, %ds
898 mov %eax, %es
899 mov %eax, %fs
900 mov $(CPU_DATA_GS), %eax
901 mov %eax, %gs
902
903 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
904 cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */
905 je 1f
906 movl $0, %ecx /* If so, reset DR7 (the control) */
907 movl %ecx, %dr7
908 1:
909 addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
910
911 /* Dispatch the designated lo handler */
912 jmp *%edx
913
914 .code64
915 L_64bit_entry_reject:
916 /*
917 * Here for a 64-bit user attempting an invalid kernel entry.
918 */
919 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
920 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
921 /* Fall through... */
922
923 L_64bit_enter:
924 /*
925 * Here for a 64-bit user task, or special 64-bit kernel code.
926 * Make space for the save area.
927 */
928 sub $(ISS64_OFFSET), %rsp
929 movl $(SS_64), SS_FLAVOR(%rsp)
930
931 /*
932 * Save segment regs
933 */
934 mov %fs, R64_FS(%rsp)
935 mov %gs, R64_GS(%rsp)
936
937 /* Save general-purpose registers */
938 mov %rax, R64_RAX(%rsp)
939 mov %rcx, R64_RCX(%rsp)
940 mov %rbx, R64_RBX(%rsp)
941 mov %rbp, R64_RBP(%rsp)
942 mov %r11, R64_R11(%rsp)
943 mov %r12, R64_R12(%rsp)
944 mov %r13, R64_R13(%rsp)
945 mov %r14, R64_R14(%rsp)
946 mov %r15, R64_R15(%rsp)
947
948 /* cr2 is significant only for page-faults */
949 mov %cr2, %rax
950 mov %rax, R64_CR2(%rsp)
951
952 /* Other registers (which may contain syscall args) */
953 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
954 mov %rsi, R64_RSI(%rsp)
955 mov %rdx, R64_RDX(%rsp)
956 mov %r10, R64_R10(%rsp)
957 mov %r8, R64_R8(%rsp)
958 mov %r9, R64_R9(%rsp) /* .. arg5 */
959
960 L_64bit_enter_after_fault:
961 /*
962 * At this point we're almost ready to join the common lo-entry code.
963 */
964 mov R64_TRAPNO(%rsp), %ebx
965 mov R64_TRAPFN(%rsp), %edx
966
967 jmp L_enter_lohandler2
968
969 /*
970 * Debug trap. Check for single-stepping across system call into
971 * kernel. If this is the case, taking the debug trap has turned
972 * off single-stepping - save the flags register with the trace
973 * bit set.
974 */
975 Entry(hi64_debug)
976 swapgs /* set %gs for cpu data */
977 push $0 /* error code */
978 push $(T_DEBUG)
979 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
980
981 testb $3, ISF64_CS(%rsp)
982 jnz L_enter_lohandler_continue
983
984 /*
985 * trap came from kernel mode
986 */
987 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
988 jne L_enter_lohandler_continue /* trap not in uber-space */
989
990 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
991 jne 6f
992 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
993 jmp L_mach_scall_continue /* continue system call entry */
994 6:
995 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
996 jne 5f
997 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
998 jmp L_mdep_scall_continue /* continue system call entry */
999 5:
1000 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1001 jne 4f
1002 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1003 jmp L_unix_scall_continue /* continue system call entry */
1004 4:
1005 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1006 jne L_enter_lohandler_continue
1007 /*
1008 * Interrupt stack frame has been pushed on the temporary stack.
1009 * We have to switch to pcb stack and copy eflags.
1010 */
1011 add $32,%rsp /* remove trapno/trapfn/err/rip/cs */
1012 push %rcx /* save %rcx - user stack pointer */
1013 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1014 xchg %rcx,%rsp /* switch to pcb stack */
1015 push $(USER_DS) /* ss */
1016 push (%rcx) /* saved %rcx into rsp slot */
1017 push 8(%rcx) /* rflags */
1018 mov (%rcx),%rcx /* restore %rcx */
1019 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1020 jmp L_sysenter_continue /* continue sysenter entry */
1021
1022
1023 Entry(hi64_double_fault)
1024 swapgs /* set %gs for cpu data */
1025 push $(T_DOUBLE_FAULT)
1026 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1027
1028 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1029 jne L_enter_lohandler_continue /* trap not in uber-space */
1030
1031 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1032 jne L_enter_lohandler_continue
1033
1034 mov ISF64_RSP(%rsp), %rsp
1035 jmp L_syscall_continue
1036
1037
1038 /*
1039 * General protection or segment-not-present fault.
1040 * Check for a GP/NP fault in the kernel_return
1041 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1042 *
1043 * rsp-> 0: trap code (NP or GP) and trap function
1044 * 8: segment number in error (error code)
1045 * 16 rip
1046 * 24 cs
1047 * 32 rflags
1048 * 40 rsp
1049 * 48 ss
1050 * 56 old registers (trap is from kernel)
1051 */
1052 Entry(hi64_gen_prot)
1053 push $(T_GENERAL_PROTECTION)
1054 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1055
1056 Entry(hi64_segnp)
1057 push $(T_SEGMENT_NOT_PRESENT)
1058 /* indicate fault type */
1059 trap_check_kernel_exit:
1060 movl $(LO_ALLTRAPS), 4(%rsp)
1061 testb $3,24(%rsp)
1062 jnz hi64_take_trap
1063 /* trap was from kernel mode, so */
1064 /* check for the kernel exit sequence */
1065 cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1066 jne hi64_take_trap /* trap not in uber-space */
1067
1068 cmpl $(EXT(ret32_iret)), 16(%rsp)
1069 je L_fault_iret
1070 cmpl $(EXT(ret32_set_ds)), 16(%rsp)
1071 je L_32bit_fault_set_seg
1072 cmpl $(EXT(ret32_set_es)), 16(%rsp)
1073 je L_32bit_fault_set_seg
1074 cmpl $(EXT(ret32_set_fs)), 16(%rsp)
1075 je L_32bit_fault_set_seg
1076 cmpl $(EXT(ret32_set_gs)), 16(%rsp)
1077 je L_32bit_fault_set_seg
1078
1079 cmpl $(EXT(ret64_iret)), 16(%rsp)
1080 je L_fault_iret
1081
1082 hi64_take_trap:
1083 jmp L_enter_lohandler
1084
1085
1086 /*
1087 * GP/NP fault on IRET: CS or SS is in error.
1088 * All registers contain the user's values.
1089 *
1090 * on SP is
1091 * 0 trap number/function
1092 * 8 errcode
1093 * 16 rip
1094 * 24 cs
1095 * 32 rflags
1096 * 40 rsp --> new trapno
1097 * 48 ss --> new errcode
1098 * 56 user rip
1099 * 64 user cs
1100 * 72 user rflags
1101 * 80 user rsp
1102 * 88 user ss
1103 */
1104 L_fault_iret:
1105 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1106 pop %rax /* get trap number */
1107 mov %rax, 40-8(%rsp) /* put in user trap number */
1108 pop %rax /* get error code */
1109 mov %rax, 48-8-8(%rsp) /* put in user errcode */
1110 pop %rax /* restore rax */
1111 add $16,%rsp /* eat 2 more slots */
1112 /* now treat as fault from user */
1113 jmp L_enter_lohandler
1114
1115 /*
1116 * Fault restoring a segment register. All of the saved state is still
1117 * on the stack untouched since we haven't yet moved the stack pointer.
1118 */
1119 L_32bit_fault_set_seg:
1120 pop %rax /* get trap number/function */
1121 pop %rdx /* get error code */
1122 add $40,%rsp /* pop stack to saved state */
1123 mov %rax,ISC32_TRAPNO(%rsp)
1124 mov %rdx,ISC32_ERR(%rsp)
1125 /* now treat as fault from user */
1126 /* except that all the state is */
1127 /* already saved - we just have to */
1128 /* move the trapno and error into */
1129 /* the compatibility frame */
1130 swapgs
1131 jmp L_32bit_enter_after_fault
1132
1133
1134 /*
1135 * Fatal exception handlers:
1136 */
1137 Entry(db_task_dbl_fault64)
1138 push $(T_DOUBLE_FAULT)
1139 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1140 jmp L_enter_lohandler
1141
1142 Entry(db_task_stk_fault64)
1143 Entry(hi64_stack_fault)
1144 push $(T_STACK_FAULT)
1145 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1146 jmp L_enter_lohandler
1147
1148 Entry(mc64)
1149 push $(0) /* Error */
1150 push $(T_MACHINE_CHECK)
1151 movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1152 jmp L_enter_lohandler