]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/idt64.s
b79f8d7aa3c09dc0e9023bfe8e0d34764a87fbf5
[apple/xnu.git] / osfmk / i386 / idt64.s
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #include <i386/asm.h>
23 #include <i386/asm64.h>
24 #include <assym.s>
25 #include <mach_kdb.h>
26 #include <i386/eflags.h>
27 #include <i386/trap.h>
28 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
29 #include <mach/i386/syscall_sw.h>
30 #include <i386/postcode.h>
31 #include <i386/proc_reg.h>
32
33 /*
34 * Locore handlers.
35 */
36 #define LO_ALLINTRS EXT(lo_allintrs)
37 #define LO_ALLTRAPS EXT(lo_alltraps)
38 #define LO_SYSENTER EXT(lo_sysenter)
39 #define LO_SYSCALL EXT(lo_syscall)
40 #define LO_UNIX_SCALL EXT(lo_unix_scall)
41 #define LO_MACH_SCALL EXT(lo_mach_scall)
42 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
43 #define LO_DIAG_SCALL EXT(lo_diag_scall)
44 #define LO_DOUBLE_FAULT EXT(lo_df64)
45 #define LO_MACHINE_CHECK EXT(lo_mc64)
46
47 /*
48 * Interrupt descriptor table and code vectors for it.
49 *
50 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
51 * reformatted ("fixed") before use.
52 * All vector are rebased in uber-space.
53 * Special vectors (e.g. double-fault) use a non-0 IST.
54 */
55 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
56 .data ;\
57 .long vec ;\
58 .long KERNEL_UBER_BASE_HI32 ;\
59 .word seg ;\
60 .byte ist*16 ;\
61 .byte type ;\
62 .long 0 ;\
63 .text
64
65 #define IDT64_ENTRY(vec,ist,type) \
66 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
67 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
68 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
69
70 /*
71 * Push trap number and address of compatibility mode handler,
72 * then branch to common trampoline. Error already pushed.
73 */
74 #define EXCEP64_ERR(n,name) \
75 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
76 Entry(name) ;\
77 push $(n) ;\
78 movl $(LO_ALLTRAPS), 4(%rsp) ;\
79 jmp L_enter_lohandler
80
81
82 /*
83 * Push error(0), trap number and address of compatibility mode handler,
84 * then branch to common trampoline.
85 */
86 #define EXCEPTION64(n,name) \
87 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
88 Entry(name) ;\
89 push $0 ;\
90 push $(n) ;\
91 movl $(LO_ALLTRAPS), 4(%rsp) ;\
92 jmp L_enter_lohandler
93
94
95 /*
96 * Interrupt from user.
97 * Push error (0), trap number and address of compatibility mode handler,
98 * then branch to common trampoline.
99 */
100 #define EXCEP64_USR(n,name) \
101 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
102 Entry(name) ;\
103 push $0 ;\
104 push $(n) ;\
105 movl $(LO_ALLTRAPS), 4(%rsp) ;\
106 jmp L_enter_lohandler
107
108
109 /*
110 * Special interrupt code from user.
111 */
112 #define EXCEP64_SPC_USR(n,name) \
113 IDT64_ENTRY(name,0,U_INTR_GATE)
114
115
116 /*
117 * Special interrupt code.
118 * In 64-bit mode we may use an IST slot instead of task gates.
119 */
120 #define EXCEP64_IST(n,name,ist) \
121 IDT64_ENTRY(name,ist,K_INTR_GATE)
122 #define EXCEP64_SPC(n,name) \
123 IDT64_ENTRY(name,0,K_INTR_GATE)
124
125
126 /*
127 * Interrupt.
128 * Push zero err, interrupt vector and address of compatibility mode handler,
129 * then branch to common trampoline.
130 */
131 #define INTERRUPT64(n) \
132 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
133 .align FALIGN ;\
134 L_ ## n: ;\
135 push $0 ;\
136 push $(n) ;\
137 movl $(LO_ALLINTRS), 4(%rsp) ;\
138 jmp L_enter_lohandler
139
140
141 .data
142 .align 12
143 Entry(master_idt64)
144 Entry(hi64_data_base)
145 .text
146 .code64
147 Entry(hi64_text_base)
148
149 EXCEPTION64(0x00,t64_zero_div)
150 EXCEP64_SPC(0x01,hi64_debug)
151 INTERRUPT64(0x02) /* NMI */
152 EXCEP64_USR(0x03,t64_int3)
153 EXCEP64_USR(0x04,t64_into)
154 EXCEP64_USR(0x05,t64_bounds)
155 EXCEPTION64(0x06,t64_invop)
156 EXCEPTION64(0x07,t64_nofpu)
157 #if MACH_KDB
158 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
159 #else
160 EXCEP64_IST(0x08,hi64_double_fault,1)
161 #endif
162 EXCEPTION64(0x09,a64_fpu_over)
163 EXCEPTION64(0x0a,a64_inv_tss)
164 EXCEP64_SPC(0x0b,hi64_segnp)
165 #if MACH_KDB
166 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
167 #else
168 EXCEP64_IST(0x0c,hi64_stack_fault,1)
169 #endif
170 EXCEP64_SPC(0x0d,hi64_gen_prot)
171 EXCEP64_ERR(0x0e,t64_page_fault)
172 EXCEPTION64(0x0f,t64_trap_0f)
173 EXCEPTION64(0x10,t64_fpu_err)
174 EXCEPTION64(0x11,t64_trap_11)
175 EXCEP64_IST(0x12,mc64,1)
176 EXCEPTION64(0x13,t64_sse_err)
177 EXCEPTION64(0x14,t64_trap_14)
178 EXCEPTION64(0x15,t64_trap_15)
179 EXCEPTION64(0x16,t64_trap_16)
180 EXCEPTION64(0x17,t64_trap_17)
181 EXCEPTION64(0x18,t64_trap_18)
182 EXCEPTION64(0x19,t64_trap_19)
183 EXCEPTION64(0x1a,t64_trap_1a)
184 EXCEPTION64(0x1b,t64_trap_1b)
185 EXCEPTION64(0x1c,t64_trap_1c)
186 EXCEPTION64(0x1d,t64_trap_1d)
187 EXCEPTION64(0x1e,t64_trap_1e)
188 EXCEPTION64(0x1f,t64_trap_1f)
189
190 INTERRUPT64(0x20)
191 INTERRUPT64(0x21)
192 INTERRUPT64(0x22)
193 INTERRUPT64(0x23)
194 INTERRUPT64(0x24)
195 INTERRUPT64(0x25)
196 INTERRUPT64(0x26)
197 INTERRUPT64(0x27)
198 INTERRUPT64(0x28)
199 INTERRUPT64(0x29)
200 INTERRUPT64(0x2a)
201 INTERRUPT64(0x2b)
202 INTERRUPT64(0x2c)
203 INTERRUPT64(0x2d)
204 INTERRUPT64(0x2e)
205 INTERRUPT64(0x2f)
206
207 INTERRUPT64(0x30)
208 INTERRUPT64(0x31)
209 INTERRUPT64(0x32)
210 INTERRUPT64(0x33)
211 INTERRUPT64(0x34)
212 INTERRUPT64(0x35)
213 INTERRUPT64(0x36)
214 INTERRUPT64(0x37)
215 INTERRUPT64(0x38)
216 INTERRUPT64(0x39)
217 INTERRUPT64(0x3a)
218 INTERRUPT64(0x3b)
219 INTERRUPT64(0x3c)
220 INTERRUPT64(0x3d)
221 INTERRUPT64(0x3e)
222 INTERRUPT64(0x3f)
223
224 INTERRUPT64(0x40)
225 INTERRUPT64(0x41)
226 INTERRUPT64(0x42)
227 INTERRUPT64(0x43)
228 INTERRUPT64(0x44)
229 INTERRUPT64(0x45)
230 INTERRUPT64(0x46)
231 INTERRUPT64(0x47)
232 INTERRUPT64(0x48)
233 INTERRUPT64(0x49)
234 INTERRUPT64(0x4a)
235 INTERRUPT64(0x4b)
236 INTERRUPT64(0x4c)
237 INTERRUPT64(0x4d)
238 INTERRUPT64(0x4e)
239 INTERRUPT64(0x4f)
240
241 INTERRUPT64(0x50)
242 INTERRUPT64(0x51)
243 INTERRUPT64(0x52)
244 INTERRUPT64(0x53)
245 INTERRUPT64(0x54)
246 INTERRUPT64(0x55)
247 INTERRUPT64(0x56)
248 INTERRUPT64(0x57)
249 INTERRUPT64(0x58)
250 INTERRUPT64(0x59)
251 INTERRUPT64(0x5a)
252 INTERRUPT64(0x5b)
253 INTERRUPT64(0x5c)
254 INTERRUPT64(0x5d)
255 INTERRUPT64(0x5e)
256 INTERRUPT64(0x5f)
257
258 INTERRUPT64(0x60)
259 INTERRUPT64(0x61)
260 INTERRUPT64(0x62)
261 INTERRUPT64(0x63)
262 INTERRUPT64(0x64)
263 INTERRUPT64(0x65)
264 INTERRUPT64(0x66)
265 INTERRUPT64(0x67)
266 INTERRUPT64(0x68)
267 INTERRUPT64(0x69)
268 INTERRUPT64(0x6a)
269 INTERRUPT64(0x6b)
270 INTERRUPT64(0x6c)
271 INTERRUPT64(0x6d)
272 INTERRUPT64(0x6e)
273 INTERRUPT64(0x6f)
274
275 INTERRUPT64(0x70)
276 INTERRUPT64(0x71)
277 INTERRUPT64(0x72)
278 INTERRUPT64(0x73)
279 INTERRUPT64(0x74)
280 INTERRUPT64(0x75)
281 INTERRUPT64(0x76)
282 INTERRUPT64(0x77)
283 INTERRUPT64(0x78)
284 INTERRUPT64(0x79)
285 INTERRUPT64(0x7a)
286 INTERRUPT64(0x7b)
287 INTERRUPT64(0x7c)
288 INTERRUPT64(0x7d)
289 INTERRUPT64(0x7e)
290 INTERRUPT64(0x7f)
291
292 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
293 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
294 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
295 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
296
297 INTERRUPT64(0x84)
298 INTERRUPT64(0x85)
299 INTERRUPT64(0x86)
300 INTERRUPT64(0x87)
301 INTERRUPT64(0x88)
302 INTERRUPT64(0x89)
303 INTERRUPT64(0x8a)
304 INTERRUPT64(0x8b)
305 INTERRUPT64(0x8c)
306 INTERRUPT64(0x8d)
307 INTERRUPT64(0x8e)
308 INTERRUPT64(0x8f)
309
310 INTERRUPT64(0x90)
311 INTERRUPT64(0x91)
312 INTERRUPT64(0x92)
313 INTERRUPT64(0x93)
314 INTERRUPT64(0x94)
315 INTERRUPT64(0x95)
316 INTERRUPT64(0x96)
317 INTERRUPT64(0x97)
318 INTERRUPT64(0x98)
319 INTERRUPT64(0x99)
320 INTERRUPT64(0x9a)
321 INTERRUPT64(0x9b)
322 INTERRUPT64(0x9c)
323 INTERRUPT64(0x9d)
324 INTERRUPT64(0x9e)
325 INTERRUPT64(0x9f)
326
327 INTERRUPT64(0xa0)
328 INTERRUPT64(0xa1)
329 INTERRUPT64(0xa2)
330 INTERRUPT64(0xa3)
331 INTERRUPT64(0xa4)
332 INTERRUPT64(0xa5)
333 INTERRUPT64(0xa6)
334 INTERRUPT64(0xa7)
335 INTERRUPT64(0xa8)
336 INTERRUPT64(0xa9)
337 INTERRUPT64(0xaa)
338 INTERRUPT64(0xab)
339 INTERRUPT64(0xac)
340 INTERRUPT64(0xad)
341 INTERRUPT64(0xae)
342 INTERRUPT64(0xaf)
343
344 INTERRUPT64(0xb0)
345 INTERRUPT64(0xb1)
346 INTERRUPT64(0xb2)
347 INTERRUPT64(0xb3)
348 INTERRUPT64(0xb4)
349 INTERRUPT64(0xb5)
350 INTERRUPT64(0xb6)
351 INTERRUPT64(0xb7)
352 INTERRUPT64(0xb8)
353 INTERRUPT64(0xb9)
354 INTERRUPT64(0xba)
355 INTERRUPT64(0xbb)
356 INTERRUPT64(0xbc)
357 INTERRUPT64(0xbd)
358 INTERRUPT64(0xbe)
359 INTERRUPT64(0xbf)
360
361 INTERRUPT64(0xc0)
362 INTERRUPT64(0xc1)
363 INTERRUPT64(0xc2)
364 INTERRUPT64(0xc3)
365 INTERRUPT64(0xc4)
366 INTERRUPT64(0xc5)
367 INTERRUPT64(0xc6)
368 INTERRUPT64(0xc7)
369 INTERRUPT64(0xc8)
370 INTERRUPT64(0xc9)
371 INTERRUPT64(0xca)
372 INTERRUPT64(0xcb)
373 INTERRUPT64(0xcc)
374 INTERRUPT64(0xcd)
375 INTERRUPT64(0xce)
376 INTERRUPT64(0xcf)
377
378 INTERRUPT64(0xd0)
379 INTERRUPT64(0xd1)
380 INTERRUPT64(0xd2)
381 INTERRUPT64(0xd3)
382 INTERRUPT64(0xd4)
383 INTERRUPT64(0xd5)
384 INTERRUPT64(0xd6)
385 INTERRUPT64(0xd7)
386 INTERRUPT64(0xd8)
387 INTERRUPT64(0xd9)
388 INTERRUPT64(0xda)
389 INTERRUPT64(0xdb)
390 INTERRUPT64(0xdc)
391 INTERRUPT64(0xdd)
392 INTERRUPT64(0xde)
393 INTERRUPT64(0xdf)
394
395 INTERRUPT64(0xe0)
396 INTERRUPT64(0xe1)
397 INTERRUPT64(0xe2)
398 INTERRUPT64(0xe3)
399 INTERRUPT64(0xe4)
400 INTERRUPT64(0xe5)
401 INTERRUPT64(0xe6)
402 INTERRUPT64(0xe7)
403 INTERRUPT64(0xe8)
404 INTERRUPT64(0xe9)
405 INTERRUPT64(0xea)
406 INTERRUPT64(0xeb)
407 INTERRUPT64(0xec)
408 INTERRUPT64(0xed)
409 INTERRUPT64(0xee)
410 INTERRUPT64(0xef)
411
412 INTERRUPT64(0xf0)
413 INTERRUPT64(0xf1)
414 INTERRUPT64(0xf2)
415 INTERRUPT64(0xf3)
416 INTERRUPT64(0xf4)
417 INTERRUPT64(0xf5)
418 INTERRUPT64(0xf6)
419 INTERRUPT64(0xf7)
420 INTERRUPT64(0xf8)
421 INTERRUPT64(0xf9)
422 INTERRUPT64(0xfa)
423 INTERRUPT64(0xfb)
424 INTERRUPT64(0xfc)
425 INTERRUPT64(0xfd)
426 INTERRUPT64(0xfe)
427 EXCEPTION64(0xff,t64_preempt)
428
429
430 .text
431 /*
432 *
433 * Trap/interrupt entry points.
434 *
435 * All traps must create the following 32-bit save area on the PCB "stack"
436 * - this is identical to the legacy mode 32-bit case:
437 *
438 * gs
439 * fs
440 * es
441 * ds
442 * edi
443 * esi
444 * ebp
445 * cr2 (defined only for page fault)
446 * ebx
447 * edx
448 * ecx
449 * eax
450 * trap number
451 * error code
452 * eip
453 * cs
454 * eflags
455 * user esp - if from user
456 * user ss - if from user
457 *
458 * Above this is the trap number and compatibility mode handler address
459 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
460 *
461 * (trapno, trapfn)
462 * err
463 * rip
464 * cs
465 * rflags
466 * rsp
467 * ss
468 *
469 */
470
471 .code32
472 /*
473 * Control is passed here to return to the compatibility mode user.
474 * At this stage we're in kernel space in compatibility mode
475 * but we need to switch into 64-bit mode in the 4G-based trampoline
476 * space before performing the iret.
477 */
478 Entry(lo64_ret_to_user)
479 movl %gs:CPU_ACTIVE_THREAD,%ecx
480
481 movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
482 cmpl $0,%eax /* Is there a debug register context? */
483 je 2f /* branch if not */
484 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 64-bit task? */
485 jne 1f
486 movl DS_DR0(%eax), %ecx /* If not, load the 32 bit DRs */
487 movl %ecx, %db0
488 movl DS_DR1(%eax), %ecx
489 movl %ecx, %db1
490 movl DS_DR2(%eax), %ecx
491 movl %ecx, %db2
492 movl DS_DR3(%eax), %ecx
493 movl %ecx, %db3
494 movl DS_DR7(%eax), %ecx
495 movl %ecx, %gs:CPU_DR7
496 movl $0, %gs:CPU_DR7 + 4
497 jmp 2f
498 1:
499 ENTER_64BIT_MODE() /* Enter long mode */
500 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
501 mov %rcx, %dr0
502 mov DS64_DR1(%eax), %rcx
503 mov %rcx, %dr1
504 mov DS64_DR2(%eax), %rcx
505 mov %rcx, %dr2
506 mov DS64_DR3(%eax), %rcx
507 mov %rcx, %dr3
508 mov DS64_DR7(%eax), %rcx
509 mov %rcx, %gs:CPU_DR7
510 jmp 3f /* Enter uberspace */
511 2:
512 ENTER_64BIT_MODE()
513 3:
514 ENTER_UBERSPACE()
515
516 /*
517 * Now switch %cr3, if necessary.
518 */
519 swapgs /* switch back to uber-kernel gs base */
520 mov %gs:CPU_TASK_CR3,%rcx
521 mov %rcx,%gs:CPU_ACTIVE_CR3
522 mov %cr3, %rax
523 cmp %rcx, %rax
524 je 1f
525 /* flag the copyio engine state as WINDOWS_CLEAN */
526 mov %gs:CPU_ACTIVE_THREAD,%eax
527 movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
528 mov %rcx,%cr3 /* switch to user's address space */
529 1:
530
531 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
532 cmp $0, %rax
533 je 1f
534 mov %rax, %dr7 /* Set DR7 */
535 movq $0, %gs:CPU_DR7
536 1:
537
538 /*
539 * Adjust stack to use uber-space.
540 */
541 mov $(KERNEL_UBER_BASE_HI32), %rax
542 shl $32, %rsp
543 shrd $32, %rax, %rsp /* relocate into uber-space */
544
545 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
546 jne L_64bit_return
547 jmp L_32bit_return
548
549 Entry(lo64_ret_to_kernel)
550 ENTER_64BIT_MODE()
551 ENTER_UBERSPACE()
552
553 swapgs /* switch back to uber-kernel gs base */
554
555 /*
556 * Adjust stack to use uber-space.
557 */
558 mov $(KERNEL_UBER_BASE_HI32), %rax
559 shl $32, %rsp
560 shrd $32, %rax, %rsp /* relocate into uber-space */
561
562 /* Check for return to 64-bit kernel space (EFI today) */
563 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
564 jne L_64bit_return
565 /* fall through for 32-bit return */
566
567 L_32bit_return:
568 /*
569 * Restore registers into the machine state for iret.
570 */
571 movl R_EIP(%rsp), %eax
572 movl %eax, ISC32_RIP(%rsp)
573 movl R_EFLAGS(%rsp), %eax
574 movl %eax, ISC32_RFLAGS(%rsp)
575 movl R_CS(%rsp), %eax
576 movl %eax, ISC32_CS(%rsp)
577 movl R_UESP(%rsp), %eax
578 movl %eax, ISC32_RSP(%rsp)
579 movl R_SS(%rsp), %eax
580 movl %eax, ISC32_SS(%rsp)
581
582 /*
583 * Restore general 32-bit registers
584 */
585 movl R_EAX(%rsp), %eax
586 movl R_EBX(%rsp), %ebx
587 movl R_ECX(%rsp), %ecx
588 movl R_EDX(%rsp), %edx
589 movl R_EBP(%rsp), %ebp
590 movl R_ESI(%rsp), %esi
591 movl R_EDI(%rsp), %edi
592
593 /*
594 * Restore segment registers. We make take an exception here but
595 * we've got enough space left in the save frame area to absorb
596 * a hardware frame plus the trapfn and trapno
597 */
598 swapgs
599 EXT(ret32_set_ds):
600 movw R_DS(%rsp), %ds
601 EXT(ret32_set_es):
602 movw R_ES(%rsp), %es
603 EXT(ret32_set_fs):
604 movw R_FS(%rsp), %fs
605 EXT(ret32_set_gs):
606 movw R_GS(%rsp), %gs
607
608 add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame +
609 trapno/trapfn and error */
610 cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
611 /* test for fast entry/exit */
612 je L_fast_exit
613 EXT(ret32_iret):
614 iretq /* return from interrupt */
615
616 L_fast_exit:
617 pop %rdx /* user return eip */
618 pop %rcx /* pop and toss cs */
619 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
620 popf /* flags - carry denotes failure */
621 pop %rcx /* user return esp */
622 .code32
623 sti /* interrupts enabled after sysexit */
624 sysexit /* 32-bit sysexit */
625 .code64
626
627 L_64bit_return:
628 /*
629 * Set the GS Base MSR with the user's gs base.
630 */
631 movl %gs:CPU_UBER_USER_GS_BASE, %eax
632 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
633 movl $(MSR_IA32_GS_BASE), %ecx
634 swapgs
635 testb $3, R64_CS(%rsp) /* returning to user-space? */
636 jz 1f
637 wrmsr /* set 64-bit base */
638 1:
639
640 /*
641 * Restore general 64-bit registers
642 */
643 mov R64_R15(%rsp), %r15
644 mov R64_R14(%rsp), %r14
645 mov R64_R13(%rsp), %r13
646 mov R64_R12(%rsp), %r12
647 mov R64_R11(%rsp), %r11
648 mov R64_R10(%rsp), %r10
649 mov R64_R9(%rsp), %r9
650 mov R64_R8(%rsp), %r8
651 mov R64_RSI(%rsp), %rsi
652 mov R64_RDI(%rsp), %rdi
653 mov R64_RBP(%rsp), %rbp
654 mov R64_RDX(%rsp), %rdx
655 mov R64_RBX(%rsp), %rbx
656 mov R64_RCX(%rsp), %rcx
657 mov R64_RAX(%rsp), %rax
658
659 add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame +
660 trapno/trapfn and error */
661 cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
662 /* test for fast entry/exit */
663 je L_sysret
664 EXT(ret64_iret):
665 iretq /* return from interrupt */
666
667 L_sysret:
668 /*
669 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
670 * rcx user rip
671 * r1 user rflags
672 * rsp user stack pointer
673 */
674 mov ISF64_RIP-16(%rsp), %rcx
675 mov ISF64_RFLAGS-16(%rsp), %r11
676 mov ISF64_RSP-16(%rsp), %rsp
677 sysretq /* return from system call */
678
679 /*
680 * Common path to enter locore handlers.
681 */
682 L_enter_lohandler:
683 swapgs /* switch to kernel gs (cpu_data) */
684 L_enter_lohandler_continue:
685 cmpl $(USER64_CS), ISF64_CS(%rsp)
686 je L_64bit_enter /* this is a 64-bit user task */
687 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
688 je L_64bit_enter /* we're in 64-bit (EFI) code */
689 jmp L_32bit_enter
690
691 /*
692 * System call handlers.
693 * These are entered via a syscall interrupt. The system call number in %rax
694 * is saved to the error code slot in the stack frame. We then branch to the
695 * common state saving code.
696 */
697
698 Entry(hi64_unix_scall)
699 swapgs /* switch to kernel gs (cpu_data) */
700 L_unix_scall_continue:
701 push %rax /* save system call number */
702 push $(UNIX_INT)
703 movl $(LO_UNIX_SCALL), 4(%rsp)
704 jmp L_32bit_enter_check
705
706
707 Entry(hi64_mach_scall)
708 swapgs /* switch to kernel gs (cpu_data) */
709 L_mach_scall_continue:
710 push %rax /* save system call number */
711 push $(MACH_INT)
712 movl $(LO_MACH_SCALL), 4(%rsp)
713 jmp L_32bit_enter_check
714
715
716 Entry(hi64_mdep_scall)
717 swapgs /* switch to kernel gs (cpu_data) */
718 L_mdep_scall_continue:
719 push %rax /* save system call number */
720 push $(MACHDEP_INT)
721 movl $(LO_MDEP_SCALL), 4(%rsp)
722 jmp L_32bit_enter_check
723
724
725 Entry(hi64_diag_scall)
726 swapgs /* switch to kernel gs (cpu_data) */
727 L_diag_scall_continue:
728 push %rax /* save system call number */
729 push $(DIAG_INT)
730 movl $(LO_DIAG_SCALL), 4(%rsp)
731 jmp L_32bit_enter_check
732
733 Entry(hi64_syscall)
734 swapgs /* Kapow! get per-cpu data area */
735 L_syscall_continue:
736 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
737 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
738
739 /*
740 * Save values in the ISF frame in the PCB
741 * to cons up the saved machine state.
742 */
743 movl $(USER_DS), ISF64_SS(%rsp)
744 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
745 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
746 mov %rcx, ISF64_RIP(%rsp) /* rip */
747 mov %gs:CPU_UBER_TMP, %rcx
748 mov %rcx, ISF64_RSP(%rsp) /* user stack */
749 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
750 movl $(0), ISF64_TRAPNO(%rsp) /* trapno */
751 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
752 jmp L_64bit_enter /* this can only be a 64-bit task */
753
754 /*
755 * sysenter entry point
756 * Requires user code to set up:
757 * edx: user instruction pointer (return address)
758 * ecx: user stack pointer
759 * on which is pushed stub ret addr and saved ebx
760 * Return to user-space is made using sysexit.
761 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
762 * or requiring ecx to be preserved.
763 */
764 Entry(hi64_sysenter)
765 mov (%rsp), %rsp /* switch from temporary stack to pcb */
766 /*
767 * Push values on to the PCB stack
768 * to cons up the saved machine state.
769 */
770 push $(USER_DS) /* ss */
771 push %rcx /* uesp */
772 pushf /* flags */
773 /*
774 * Clear, among others, the Nested Task (NT) flags bit;
775 * This is cleared by INT, but not by sysenter, which only
776 * clears RF, VM and IF.
777 */
778 push $0
779 popf
780 push $(SYSENTER_CS) /* cs */
781 swapgs /* switch to kernel gs (cpu_data) */
782 L_sysenter_continue:
783 push %rdx /* eip */
784 push %rax /* err/eax - syscall code */
785 push $(0)
786 movl $(LO_SYSENTER), ISF64_TRAPFN(%rsp)
787 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
788
789 L_32bit_enter_check:
790 /*
791 * Check we're not a confused 64-bit user.
792 */
793 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
794 jne L_64bit_entry_reject
795 /* fall through to 32-bit handler: */
796
797 L_32bit_enter:
798 /*
799 * Make space for the compatibility save area.
800 */
801 sub $(ISC32_OFFSET), %rsp
802 movl $(SS_32), SS_FLAVOR(%rsp)
803
804 /*
805 * Save segment regs
806 */
807 mov %ds, R_DS(%rsp)
808 mov %es, R_ES(%rsp)
809 mov %fs, R_FS(%rsp)
810 mov %gs, R_GS(%rsp)
811
812 /*
813 * Save general 32-bit registers
814 */
815 mov %eax, R_EAX(%rsp)
816 mov %ebx, R_EBX(%rsp)
817 mov %ecx, R_ECX(%rsp)
818 mov %edx, R_EDX(%rsp)
819 mov %ebp, R_EBP(%rsp)
820 mov %esi, R_ESI(%rsp)
821 mov %edi, R_EDI(%rsp)
822
823 /* Unconditionally save cr2; only meaningful on page faults */
824 mov %cr2, %rax
825 mov %eax, R_CR2(%rsp)
826
827 /*
828 * Copy registers already saved in the machine state
829 * (in the interrupt stack frame) into the compat save area.
830 */
831 mov ISC32_RIP(%rsp), %eax
832 mov %eax, R_EIP(%rsp)
833 mov ISC32_RFLAGS(%rsp), %eax
834 mov %eax, R_EFLAGS(%rsp)
835 mov ISC32_CS(%rsp), %eax
836 mov %eax, R_CS(%rsp)
837 mov ISC32_RSP(%rsp), %eax
838 mov %eax, R_UESP(%rsp)
839 mov ISC32_SS(%rsp), %eax
840 mov %eax, R_SS(%rsp)
841 L_32bit_enter_after_fault:
842 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
843 mov %ebx, R_TRAPNO(%rsp)
844 mov ISC32_ERR(%rsp), %eax
845 mov %eax, R_ERR(%rsp)
846 mov ISC32_TRAPFN(%rsp), %edx
847
848 /*
849 * Common point to enter lo_handler in compatibilty mode:
850 * %ebx trapno
851 * %edx locore handler address
852 */
853 L_enter_lohandler2:
854 /*
855 * Switch address space to kernel
856 * if not shared space and not already mapped.
857 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
858 */
859 mov %cr3, %rax
860 mov %gs:CPU_TASK_CR3, %rcx
861 cmp %rax, %rcx /* is the task's cr3 loaded? */
862 jne 1f
863 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
864 je 2f
865 1:
866 mov %gs:CPU_KERNEL_CR3, %rcx
867 cmp %rax, %rcx
868 je 2f
869 mov %rcx, %cr3
870 mov %rcx, %gs:CPU_ACTIVE_CR3
871 2:
872 /*
873 * Switch to compatibility mode.
874 * Then establish kernel segments.
875 */
876 swapgs /* Done with uber-kernel gs */
877 ENTER_COMPAT_MODE()
878
879 /*
880 * Now in compatibility mode and running in compatibility space
881 * prepare to enter the locore handler.
882 * %ebx trapno
883 * %edx lo_handler pointer
884 * Note: the stack pointer (now 32-bit) is now directly addressing the
885 * the kernel below 4G and therefore is automagically re-based.
886 */
887 mov $(KERNEL_DS), %eax
888 mov %eax, %ss
889 mov %eax, %ds
890 mov %eax, %es
891 mov %eax, %fs
892 mov $(CPU_DATA_GS), %eax
893 mov %eax, %gs
894
895 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
896 cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */
897 je 1f
898 movl $0, %ecx /* If so, reset DR7 (the control) */
899 movl %ecx, %dr7
900 1:
901 addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
902
903 /* Dispatch the designated lo handler */
904 jmp *%edx
905
906 .code64
907 L_64bit_entry_reject:
908 /*
909 * Here for a 64-bit user attempting an invalid kernel entry.
910 */
911 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
912 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
913 /* Fall through... */
914
915 L_64bit_enter:
916 /*
917 * Here for a 64-bit user task, or special 64-bit kernel code.
918 * Make space for the save area.
919 */
920 sub $(ISS64_OFFSET), %rsp
921 movl $(SS_64), SS_FLAVOR(%rsp)
922
923 /*
924 * Save segment regs
925 */
926 mov %fs, R64_FS(%rsp)
927 mov %gs, R64_GS(%rsp)
928
929 /* Save general-purpose registers */
930 mov %rax, R64_RAX(%rsp)
931 mov %rcx, R64_RCX(%rsp)
932 mov %rbx, R64_RBX(%rsp)
933 mov %rbp, R64_RBP(%rsp)
934 mov %r11, R64_R11(%rsp)
935 mov %r12, R64_R12(%rsp)
936 mov %r13, R64_R13(%rsp)
937 mov %r14, R64_R14(%rsp)
938 mov %r15, R64_R15(%rsp)
939
940 /* cr2 is significant only for page-faults */
941 mov %cr2, %rax
942 mov %rax, R64_CR2(%rsp)
943
944 /* Other registers (which may contain syscall args) */
945 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
946 mov %rsi, R64_RSI(%rsp)
947 mov %rdx, R64_RDX(%rsp)
948 mov %r10, R64_R10(%rsp)
949 mov %r8, R64_R8(%rsp)
950 mov %r9, R64_R9(%rsp) /* .. arg5 */
951
952 L_64bit_enter_after_fault:
953 /*
954 * At this point we're almost ready to join the common lo-entry code.
955 */
956 mov R64_TRAPNO(%rsp), %ebx
957 mov R64_TRAPFN(%rsp), %edx
958
959 jmp L_enter_lohandler2
960
961 /*
962 * Debug trap. Check for single-stepping across system call into
963 * kernel. If this is the case, taking the debug trap has turned
964 * off single-stepping - save the flags register with the trace
965 * bit set.
966 */
967 Entry(hi64_debug)
968 swapgs /* set %gs for cpu data */
969 push $0 /* error code */
970 push $(T_DEBUG)
971 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
972
973 testb $3, ISF64_CS(%rsp)
974 jnz L_enter_lohandler_continue
975
976 /*
977 * trap came from kernel mode
978 */
979 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
980 jne L_enter_lohandler_continue /* trap not in uber-space */
981
982 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
983 jne 6f
984 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
985 jmp L_mach_scall_continue /* continue system call entry */
986 6:
987 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
988 jne 5f
989 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
990 jmp L_mdep_scall_continue /* continue system call entry */
991 5:
992 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
993 jne 4f
994 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
995 jmp L_unix_scall_continue /* continue system call entry */
996 4:
997 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
998 jne L_enter_lohandler_continue
999 /*
1000 * Interrupt stack frame has been pushed on the temporary stack.
1001 * We have to switch to pcb stack and copy eflags.
1002 */
1003 add $32,%rsp /* remove trapno/trapfn/err/rip/cs */
1004 push %rcx /* save %rcx - user stack pointer */
1005 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1006 xchg %rcx,%rsp /* switch to pcb stack */
1007 push $(USER_DS) /* ss */
1008 push (%rcx) /* saved %rcx into rsp slot */
1009 push 8(%rcx) /* rflags */
1010 mov (%rcx),%rcx /* restore %rcx */
1011 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1012 jmp L_sysenter_continue /* continue sysenter entry */
1013
1014
1015 Entry(hi64_double_fault)
1016 swapgs /* set %gs for cpu data */
1017 push $(T_DOUBLE_FAULT)
1018 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1019
1020 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1021 jne L_enter_lohandler_continue /* trap not in uber-space */
1022
1023 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1024 jne L_enter_lohandler_continue
1025
1026 mov ISF64_RSP(%rsp), %rsp
1027 jmp L_syscall_continue
1028
1029
1030 /*
1031 * General protection or segment-not-present fault.
1032 * Check for a GP/NP fault in the kernel_return
1033 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1034 *
1035 * rsp-> 0: trap code (NP or GP) and trap function
1036 * 8: segment number in error (error code)
1037 * 16 rip
1038 * 24 cs
1039 * 32 rflags
1040 * 40 rsp
1041 * 48 ss
1042 * 56 old registers (trap is from kernel)
1043 */
1044 Entry(hi64_gen_prot)
1045 push $(T_GENERAL_PROTECTION)
1046 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1047
1048 Entry(hi64_segnp)
1049 push $(T_SEGMENT_NOT_PRESENT)
1050 /* indicate fault type */
1051 trap_check_kernel_exit:
1052 movl $(LO_ALLTRAPS), 4(%rsp)
1053 testb $3,24(%rsp)
1054 jnz hi64_take_trap
1055 /* trap was from kernel mode, so */
1056 /* check for the kernel exit sequence */
1057 cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1058 jne hi64_take_trap /* trap not in uber-space */
1059
1060 cmpl $(EXT(ret32_iret)), 16(%rsp)
1061 je L_fault_iret
1062 cmpl $(EXT(ret32_set_ds)), 16(%rsp)
1063 je L_32bit_fault_set_seg
1064 cmpl $(EXT(ret32_set_es)), 16(%rsp)
1065 je L_32bit_fault_set_seg
1066 cmpl $(EXT(ret32_set_fs)), 16(%rsp)
1067 je L_32bit_fault_set_seg
1068 cmpl $(EXT(ret32_set_gs)), 16(%rsp)
1069 je L_32bit_fault_set_seg
1070
1071 cmpl $(EXT(ret64_iret)), 16(%rsp)
1072 je L_fault_iret
1073
1074 hi64_take_trap:
1075 jmp L_enter_lohandler
1076
1077
1078 /*
1079 * GP/NP fault on IRET: CS or SS is in error.
1080 * All registers contain the user's values.
1081 *
1082 * on SP is
1083 * 0 trap number/function
1084 * 8 errcode
1085 * 16 rip
1086 * 24 cs
1087 * 32 rflags
1088 * 40 rsp --> new trapno
1089 * 48 ss --> new errcode
1090 * 56 user rip
1091 * 64 user cs
1092 * 72 user rflags
1093 * 80 user rsp
1094 * 88 user ss
1095 */
1096 L_fault_iret:
1097 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1098 pop %rax /* get trap number */
1099 mov %rax, 40-8(%rsp) /* put in user trap number */
1100 pop %rax /* get error code */
1101 mov %rax, 48-8-8(%rsp) /* put in user errcode */
1102 pop %rax /* restore rax */
1103 add $16,%rsp /* eat 2 more slots */
1104 /* now treat as fault from user */
1105 jmp L_enter_lohandler
1106
1107 /*
1108 * Fault restoring a segment register. All of the saved state is still
1109 * on the stack untouched since we haven't yet moved the stack pointer.
1110 */
1111 L_32bit_fault_set_seg:
1112 pop %rax /* get trap number/function */
1113 pop %rdx /* get error code */
1114 add $40,%rsp /* pop stack to saved state */
1115 mov %rax,ISC32_TRAPNO(%rsp)
1116 mov %rdx,ISC32_ERR(%rsp)
1117 /* now treat as fault from user */
1118 /* except that all the state is */
1119 /* already saved - we just have to */
1120 /* move the trapno and error into */
1121 /* the compatibility frame */
1122 swapgs
1123 jmp L_32bit_enter_after_fault
1124
1125
1126 /*
1127 * Fatal exception handlers:
1128 */
1129 Entry(db_task_dbl_fault64)
1130 push $(T_DOUBLE_FAULT)
1131 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1132 jmp L_enter_lohandler
1133
1134 Entry(db_task_stk_fault64)
1135 Entry(hi64_stack_fault)
1136 push $(T_STACK_FAULT)
1137 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1138 jmp L_enter_lohandler
1139
1140 Entry(mc64)
1141 push $(0) /* Error */
1142 push $(T_MACHINE_CHECK)
1143 movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1144 jmp L_enter_lohandler