]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/idt64.s
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / i386 / idt64.s
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <i386/asm.h>
29 #include <i386/asm64.h>
30 #include <assym.s>
31 #include <mach_kdb.h>
32 #include <i386/eflags.h>
33 #include <i386/trap.h>
34 #define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35 #include <mach/i386/syscall_sw.h>
36 #include <i386/postcode.h>
37 #include <i386/proc_reg.h>
38
39 /*
40 * Locore handlers.
41 */
42 #define LO_ALLINTRS EXT(lo_allintrs)
43 #define LO_ALLTRAPS EXT(lo_alltraps)
44 #define LO_SYSENTER EXT(lo_sysenter)
45 #define LO_SYSCALL EXT(lo_syscall)
46 #define LO_UNIX_SCALL EXT(lo_unix_scall)
47 #define LO_MACH_SCALL EXT(lo_mach_scall)
48 #define LO_MDEP_SCALL EXT(lo_mdep_scall)
49 #define LO_DIAG_SCALL EXT(lo_diag_scall)
50 #define LO_DOUBLE_FAULT EXT(lo_df64)
51 #define LO_MACHINE_CHECK EXT(lo_mc64)
52
53 /*
54 * Interrupt descriptor table and code vectors for it.
55 *
56 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
57 * reformatted ("fixed") before use.
58 * All vector are rebased in uber-space.
59 * Special vectors (e.g. double-fault) use a non-0 IST.
60 */
61 #define IDT64_BASE_ENTRY(vec,seg,ist,type) \
62 .data ;\
63 .long vec ;\
64 .long KERNEL_UBER_BASE_HI32 ;\
65 .word seg ;\
66 .byte ist*16 ;\
67 .byte type ;\
68 .long 0 ;\
69 .text
70
71 #define IDT64_ENTRY(vec,ist,type) \
72 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
73 #define IDT64_ENTRY_LOCAL(vec,ist,type) \
74 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
75
76 /*
77 * Push trap number and address of compatibility mode handler,
78 * then branch to common trampoline. Error already pushed.
79 */
80 #define EXCEP64_ERR(n,name) \
81 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
82 Entry(name) ;\
83 push $(n) ;\
84 movl $(LO_ALLTRAPS), 4(%rsp) ;\
85 jmp L_enter_lohandler
86
87
88 /*
89 * Push error(0), trap number and address of compatibility mode handler,
90 * then branch to common trampoline.
91 */
92 #define EXCEPTION64(n,name) \
93 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
94 Entry(name) ;\
95 push $0 ;\
96 push $(n) ;\
97 movl $(LO_ALLTRAPS), 4(%rsp) ;\
98 jmp L_enter_lohandler
99
100
101 /*
102 * Interrupt from user.
103 * Push error (0), trap number and address of compatibility mode handler,
104 * then branch to common trampoline.
105 */
106 #define EXCEP64_USR(n,name) \
107 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
108 Entry(name) ;\
109 push $0 ;\
110 push $(n) ;\
111 movl $(LO_ALLTRAPS), 4(%rsp) ;\
112 jmp L_enter_lohandler
113
114
115 /*
116 * Special interrupt code from user.
117 */
118 #define EXCEP64_SPC_USR(n,name) \
119 IDT64_ENTRY(name,0,U_INTR_GATE)
120
121
122 /*
123 * Special interrupt code.
124 * In 64-bit mode we may use an IST slot instead of task gates.
125 */
126 #define EXCEP64_IST(n,name,ist) \
127 IDT64_ENTRY(name,ist,K_INTR_GATE)
128 #define EXCEP64_SPC(n,name) \
129 IDT64_ENTRY(name,0,K_INTR_GATE)
130
131
132 /*
133 * Interrupt.
134 * Push zero err, interrupt vector and address of compatibility mode handler,
135 * then branch to common trampoline.
136 */
137 #define INTERRUPT64(n) \
138 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
139 .align FALIGN ;\
140 L_ ## n: ;\
141 push $0 ;\
142 push $(n) ;\
143 movl $(LO_ALLINTRS), 4(%rsp) ;\
144 jmp L_enter_lohandler
145
146
147 .data
148 .align 12
149 Entry(master_idt64)
150 Entry(hi64_data_base)
151 .text
152 .code64
153 Entry(hi64_text_base)
154
155 EXCEPTION64(0x00,t64_zero_div)
156 EXCEP64_SPC(0x01,hi64_debug)
157 INTERRUPT64(0x02) /* NMI */
158 EXCEP64_USR(0x03,t64_int3)
159 EXCEP64_USR(0x04,t64_into)
160 EXCEP64_USR(0x05,t64_bounds)
161 EXCEPTION64(0x06,t64_invop)
162 EXCEPTION64(0x07,t64_nofpu)
163 #if MACH_KDB
164 EXCEP64_IST(0x08,db_task_dbl_fault64,1)
165 #else
166 EXCEP64_IST(0x08,hi64_double_fault,1)
167 #endif
168 EXCEPTION64(0x09,a64_fpu_over)
169 EXCEPTION64(0x0a,a64_inv_tss)
170 EXCEP64_SPC(0x0b,hi64_segnp)
171 #if MACH_KDB
172 EXCEP64_IST(0x0c,db_task_stk_fault64,1)
173 #else
174 EXCEP64_SPC(0x0c,hi64_stack_fault)
175 #endif
176 EXCEP64_SPC(0x0d,hi64_gen_prot)
177 EXCEP64_ERR(0x0e,t64_page_fault)
178 EXCEPTION64(0x0f,t64_trap_0f)
179 EXCEPTION64(0x10,t64_fpu_err)
180 EXCEPTION64(0x11,t64_trap_11)
181 EXCEP64_IST(0x12,mc64,1)
182 EXCEPTION64(0x13,t64_sse_err)
183 EXCEPTION64(0x14,t64_trap_14)
184 EXCEPTION64(0x15,t64_trap_15)
185 EXCEPTION64(0x16,t64_trap_16)
186 EXCEPTION64(0x17,t64_trap_17)
187 EXCEPTION64(0x18,t64_trap_18)
188 EXCEPTION64(0x19,t64_trap_19)
189 EXCEPTION64(0x1a,t64_trap_1a)
190 EXCEPTION64(0x1b,t64_trap_1b)
191 EXCEPTION64(0x1c,t64_trap_1c)
192 EXCEPTION64(0x1d,t64_trap_1d)
193 EXCEPTION64(0x1e,t64_trap_1e)
194 EXCEPTION64(0x1f,t64_trap_1f)
195
196 INTERRUPT64(0x20)
197 INTERRUPT64(0x21)
198 INTERRUPT64(0x22)
199 INTERRUPT64(0x23)
200 INTERRUPT64(0x24)
201 INTERRUPT64(0x25)
202 INTERRUPT64(0x26)
203 INTERRUPT64(0x27)
204 INTERRUPT64(0x28)
205 INTERRUPT64(0x29)
206 INTERRUPT64(0x2a)
207 INTERRUPT64(0x2b)
208 INTERRUPT64(0x2c)
209 INTERRUPT64(0x2d)
210 INTERRUPT64(0x2e)
211 INTERRUPT64(0x2f)
212
213 INTERRUPT64(0x30)
214 INTERRUPT64(0x31)
215 INTERRUPT64(0x32)
216 INTERRUPT64(0x33)
217 INTERRUPT64(0x34)
218 INTERRUPT64(0x35)
219 INTERRUPT64(0x36)
220 INTERRUPT64(0x37)
221 INTERRUPT64(0x38)
222 INTERRUPT64(0x39)
223 INTERRUPT64(0x3a)
224 INTERRUPT64(0x3b)
225 INTERRUPT64(0x3c)
226 INTERRUPT64(0x3d)
227 INTERRUPT64(0x3e)
228 INTERRUPT64(0x3f)
229
230 INTERRUPT64(0x40)
231 INTERRUPT64(0x41)
232 INTERRUPT64(0x42)
233 INTERRUPT64(0x43)
234 INTERRUPT64(0x44)
235 INTERRUPT64(0x45)
236 INTERRUPT64(0x46)
237 INTERRUPT64(0x47)
238 INTERRUPT64(0x48)
239 INTERRUPT64(0x49)
240 INTERRUPT64(0x4a)
241 INTERRUPT64(0x4b)
242 INTERRUPT64(0x4c)
243 INTERRUPT64(0x4d)
244 INTERRUPT64(0x4e)
245 INTERRUPT64(0x4f)
246
247 INTERRUPT64(0x50)
248 INTERRUPT64(0x51)
249 INTERRUPT64(0x52)
250 INTERRUPT64(0x53)
251 INTERRUPT64(0x54)
252 INTERRUPT64(0x55)
253 INTERRUPT64(0x56)
254 INTERRUPT64(0x57)
255 INTERRUPT64(0x58)
256 INTERRUPT64(0x59)
257 INTERRUPT64(0x5a)
258 INTERRUPT64(0x5b)
259 INTERRUPT64(0x5c)
260 INTERRUPT64(0x5d)
261 INTERRUPT64(0x5e)
262 INTERRUPT64(0x5f)
263
264 INTERRUPT64(0x60)
265 INTERRUPT64(0x61)
266 INTERRUPT64(0x62)
267 INTERRUPT64(0x63)
268 INTERRUPT64(0x64)
269 INTERRUPT64(0x65)
270 INTERRUPT64(0x66)
271 INTERRUPT64(0x67)
272 INTERRUPT64(0x68)
273 INTERRUPT64(0x69)
274 INTERRUPT64(0x6a)
275 INTERRUPT64(0x6b)
276 INTERRUPT64(0x6c)
277 INTERRUPT64(0x6d)
278 INTERRUPT64(0x6e)
279 INTERRUPT64(0x6f)
280
281 INTERRUPT64(0x70)
282 INTERRUPT64(0x71)
283 INTERRUPT64(0x72)
284 INTERRUPT64(0x73)
285 INTERRUPT64(0x74)
286 INTERRUPT64(0x75)
287 INTERRUPT64(0x76)
288 INTERRUPT64(0x77)
289 INTERRUPT64(0x78)
290 INTERRUPT64(0x79)
291 INTERRUPT64(0x7a)
292 INTERRUPT64(0x7b)
293 INTERRUPT64(0x7c)
294 INTERRUPT64(0x7d)
295 INTERRUPT64(0x7e)
296 INTERRUPT64(0x7f)
297
298 EXCEP64_SPC_USR(0x80,hi64_unix_scall)
299 EXCEP64_SPC_USR(0x81,hi64_mach_scall)
300 EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
301 EXCEP64_SPC_USR(0x83,hi64_diag_scall)
302
303 INTERRUPT64(0x84)
304 INTERRUPT64(0x85)
305 INTERRUPT64(0x86)
306 INTERRUPT64(0x87)
307 INTERRUPT64(0x88)
308 INTERRUPT64(0x89)
309 INTERRUPT64(0x8a)
310 INTERRUPT64(0x8b)
311 INTERRUPT64(0x8c)
312 INTERRUPT64(0x8d)
313 INTERRUPT64(0x8e)
314 INTERRUPT64(0x8f)
315
316 INTERRUPT64(0x90)
317 INTERRUPT64(0x91)
318 INTERRUPT64(0x92)
319 INTERRUPT64(0x93)
320 INTERRUPT64(0x94)
321 INTERRUPT64(0x95)
322 INTERRUPT64(0x96)
323 INTERRUPT64(0x97)
324 INTERRUPT64(0x98)
325 INTERRUPT64(0x99)
326 INTERRUPT64(0x9a)
327 INTERRUPT64(0x9b)
328 INTERRUPT64(0x9c)
329 INTERRUPT64(0x9d)
330 INTERRUPT64(0x9e)
331 INTERRUPT64(0x9f)
332
333 INTERRUPT64(0xa0)
334 INTERRUPT64(0xa1)
335 INTERRUPT64(0xa2)
336 INTERRUPT64(0xa3)
337 INTERRUPT64(0xa4)
338 INTERRUPT64(0xa5)
339 INTERRUPT64(0xa6)
340 INTERRUPT64(0xa7)
341 INTERRUPT64(0xa8)
342 INTERRUPT64(0xa9)
343 INTERRUPT64(0xaa)
344 INTERRUPT64(0xab)
345 INTERRUPT64(0xac)
346 INTERRUPT64(0xad)
347 INTERRUPT64(0xae)
348 INTERRUPT64(0xaf)
349
350 INTERRUPT64(0xb0)
351 INTERRUPT64(0xb1)
352 INTERRUPT64(0xb2)
353 INTERRUPT64(0xb3)
354 INTERRUPT64(0xb4)
355 INTERRUPT64(0xb5)
356 INTERRUPT64(0xb6)
357 INTERRUPT64(0xb7)
358 INTERRUPT64(0xb8)
359 INTERRUPT64(0xb9)
360 INTERRUPT64(0xba)
361 INTERRUPT64(0xbb)
362 INTERRUPT64(0xbc)
363 INTERRUPT64(0xbd)
364 INTERRUPT64(0xbe)
365 INTERRUPT64(0xbf)
366
367 INTERRUPT64(0xc0)
368 INTERRUPT64(0xc1)
369 INTERRUPT64(0xc2)
370 INTERRUPT64(0xc3)
371 INTERRUPT64(0xc4)
372 INTERRUPT64(0xc5)
373 INTERRUPT64(0xc6)
374 INTERRUPT64(0xc7)
375 INTERRUPT64(0xc8)
376 INTERRUPT64(0xc9)
377 INTERRUPT64(0xca)
378 INTERRUPT64(0xcb)
379 INTERRUPT64(0xcc)
380 INTERRUPT64(0xcd)
381 INTERRUPT64(0xce)
382 INTERRUPT64(0xcf)
383
384 INTERRUPT64(0xd0)
385 INTERRUPT64(0xd1)
386 INTERRUPT64(0xd2)
387 INTERRUPT64(0xd3)
388 INTERRUPT64(0xd4)
389 INTERRUPT64(0xd5)
390 INTERRUPT64(0xd6)
391 INTERRUPT64(0xd7)
392 INTERRUPT64(0xd8)
393 INTERRUPT64(0xd9)
394 INTERRUPT64(0xda)
395 INTERRUPT64(0xdb)
396 INTERRUPT64(0xdc)
397 INTERRUPT64(0xdd)
398 INTERRUPT64(0xde)
399 INTERRUPT64(0xdf)
400
401 INTERRUPT64(0xe0)
402 INTERRUPT64(0xe1)
403 INTERRUPT64(0xe2)
404 INTERRUPT64(0xe3)
405 INTERRUPT64(0xe4)
406 INTERRUPT64(0xe5)
407 INTERRUPT64(0xe6)
408 INTERRUPT64(0xe7)
409 INTERRUPT64(0xe8)
410 INTERRUPT64(0xe9)
411 INTERRUPT64(0xea)
412 INTERRUPT64(0xeb)
413 INTERRUPT64(0xec)
414 INTERRUPT64(0xed)
415 INTERRUPT64(0xee)
416 INTERRUPT64(0xef)
417
418 INTERRUPT64(0xf0)
419 INTERRUPT64(0xf1)
420 INTERRUPT64(0xf2)
421 INTERRUPT64(0xf3)
422 INTERRUPT64(0xf4)
423 INTERRUPT64(0xf5)
424 INTERRUPT64(0xf6)
425 INTERRUPT64(0xf7)
426 INTERRUPT64(0xf8)
427 INTERRUPT64(0xf9)
428 INTERRUPT64(0xfa)
429 INTERRUPT64(0xfb)
430 INTERRUPT64(0xfc)
431 INTERRUPT64(0xfd)
432 INTERRUPT64(0xfe)
433 EXCEPTION64(0xff,t64_preempt)
434
435
436 .text
437 /*
438 *
439 * Trap/interrupt entry points.
440 *
441 * All traps must create the following 32-bit save area on the PCB "stack"
442 * - this is identical to the legacy mode 32-bit case:
443 *
444 * gs
445 * fs
446 * es
447 * ds
448 * edi
449 * esi
450 * ebp
451 * cr2 (defined only for page fault)
452 * ebx
453 * edx
454 * ecx
455 * eax
456 * trap number
457 * error code
458 * eip
459 * cs
460 * eflags
461 * user esp - if from user
462 * user ss - if from user
463 *
464 * Above this is the trap number and compatibility mode handler address
465 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
466 *
467 * (trapno, trapfn)
468 * err
469 * rip
470 * cs
471 * rflags
472 * rsp
473 * ss
474 *
475 */
476
477 .code32
478 /*
479 * Control is passed here to return to the compatibility mode user.
480 * At this stage we're in kernel space in compatibility mode
481 * but we need to switch into 64-bit mode in the 4G-based trampoline
482 * space before performing the iret.
483 */
484 Entry(lo64_ret_to_user)
485 movl %gs:CPU_ACTIVE_THREAD,%ecx
486
487 movl ACT_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
488 cmpl $0,%eax /* Is there a debug register context? */
489 je 2f /* branch if not */
490 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 64-bit task? */
491 jne 1f
492 movl DS_DR0(%eax), %ecx /* If not, load the 32 bit DRs */
493 movl %ecx, %db0
494 movl DS_DR1(%eax), %ecx
495 movl %ecx, %db1
496 movl DS_DR2(%eax), %ecx
497 movl %ecx, %db2
498 movl DS_DR3(%eax), %ecx
499 movl %ecx, %db3
500 movl DS_DR7(%eax), %ecx
501 movl %ecx, %gs:CPU_DR7
502 movl $0, %gs:CPU_DR7 + 4
503 jmp 2f
504 1:
505 ENTER_64BIT_MODE() /* Enter long mode */
506 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
507 mov %rcx, %dr0
508 mov DS64_DR1(%eax), %rcx
509 mov %rcx, %dr1
510 mov DS64_DR2(%eax), %rcx
511 mov %rcx, %dr2
512 mov DS64_DR3(%eax), %rcx
513 mov %rcx, %dr3
514 mov DS64_DR7(%eax), %rcx
515 mov %rcx, %gs:CPU_DR7
516 jmp 3f /* Enter uberspace */
517 2:
518 ENTER_64BIT_MODE()
519 3:
520 ENTER_UBERSPACE()
521
522 /*
523 * Now switch %cr3, if necessary.
524 */
525 swapgs /* switch back to uber-kernel gs base */
526 mov %gs:CPU_TASK_CR3,%rcx
527 mov %rcx,%gs:CPU_ACTIVE_CR3
528 mov %cr3, %rax
529 cmp %rcx, %rax
530 je 1f
531 /* flag the copyio engine state as WINDOWS_CLEAN */
532 mov %gs:CPU_ACTIVE_THREAD,%eax
533 movl $(WINDOWS_CLEAN),ACT_COPYIO_STATE(%eax)
534 mov %rcx,%cr3 /* switch to user's address space */
535 1:
536
537 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
538 cmp $0, %rax
539 je 1f
540 mov %rax, %dr7 /* Set DR7 */
541 movq $0, %gs:CPU_DR7
542 1:
543
544 /*
545 * Adjust stack to use uber-space.
546 */
547 mov $(KERNEL_UBER_BASE_HI32), %rax
548 shl $32, %rsp
549 shrd $32, %rax, %rsp /* relocate into uber-space */
550
551 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
552 jne L_64bit_return
553 jmp L_32bit_return
554
555 Entry(lo64_ret_to_kernel)
556 ENTER_64BIT_MODE()
557 ENTER_UBERSPACE()
558
559 swapgs /* switch back to uber-kernel gs base */
560
561 /*
562 * Adjust stack to use uber-space.
563 */
564 mov $(KERNEL_UBER_BASE_HI32), %rax
565 shl $32, %rsp
566 shrd $32, %rax, %rsp /* relocate into uber-space */
567
568 /* Check for return to 64-bit kernel space (EFI today) */
569 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
570 jne L_64bit_return
571 /* fall through for 32-bit return */
572
573 L_32bit_return:
574 /*
575 * Restore registers into the machine state for iret.
576 */
577 movl R_EIP(%rsp), %eax
578 movl %eax, ISC32_RIP(%rsp)
579 movl R_EFLAGS(%rsp), %eax
580 movl %eax, ISC32_RFLAGS(%rsp)
581 movl R_CS(%rsp), %eax
582 movl %eax, ISC32_CS(%rsp)
583 movl R_UESP(%rsp), %eax
584 movl %eax, ISC32_RSP(%rsp)
585 movl R_SS(%rsp), %eax
586 movl %eax, ISC32_SS(%rsp)
587
588 /*
589 * Restore general 32-bit registers
590 */
591 movl R_EAX(%rsp), %eax
592 movl R_EBX(%rsp), %ebx
593 movl R_ECX(%rsp), %ecx
594 movl R_EDX(%rsp), %edx
595 movl R_EBP(%rsp), %ebp
596 movl R_ESI(%rsp), %esi
597 movl R_EDI(%rsp), %edi
598
599 /*
600 * Restore segment registers. We make take an exception here but
601 * we've got enough space left in the save frame area to absorb
602 * a hardware frame plus the trapfn and trapno
603 */
604 swapgs
605 EXT(ret32_set_ds):
606 movw R_DS(%rsp), %ds
607 EXT(ret32_set_es):
608 movw R_ES(%rsp), %es
609 EXT(ret32_set_fs):
610 movw R_FS(%rsp), %fs
611 EXT(ret32_set_gs):
612 movw R_GS(%rsp), %gs
613
614 add $(ISC32_OFFSET)+8+8, %rsp /* pop compat frame +
615 trapno/trapfn and error */
616 cmp $(SYSENTER_CS),ISF64_CS-8-8(%rsp)
617 /* test for fast entry/exit */
618 je L_fast_exit
619 EXT(ret32_iret):
620 iretq /* return from interrupt */
621
622 L_fast_exit:
623 pop %rdx /* user return eip */
624 pop %rcx /* pop and toss cs */
625 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
626 popf /* flags - carry denotes failure */
627 pop %rcx /* user return esp */
628 .code32
629 sti /* interrupts enabled after sysexit */
630 sysexit /* 32-bit sysexit */
631 .code64
632
633 L_64bit_return:
634 /*
635 * Set the GS Base MSR with the user's gs base.
636 */
637 movl %gs:CPU_UBER_USER_GS_BASE, %eax
638 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
639 movl $(MSR_IA32_GS_BASE), %ecx
640 swapgs
641 testb $3, R64_CS(%rsp) /* returning to user-space? */
642 jz 1f
643 wrmsr /* set 64-bit base */
644 1:
645
646 /*
647 * Restore general 64-bit registers
648 */
649 mov R64_R15(%rsp), %r15
650 mov R64_R14(%rsp), %r14
651 mov R64_R13(%rsp), %r13
652 mov R64_R12(%rsp), %r12
653 mov R64_R11(%rsp), %r11
654 mov R64_R10(%rsp), %r10
655 mov R64_R9(%rsp), %r9
656 mov R64_R8(%rsp), %r8
657 mov R64_RSI(%rsp), %rsi
658 mov R64_RDI(%rsp), %rdi
659 mov R64_RBP(%rsp), %rbp
660 mov R64_RDX(%rsp), %rdx
661 mov R64_RBX(%rsp), %rbx
662 mov R64_RCX(%rsp), %rcx
663 mov R64_RAX(%rsp), %rax
664
665 add $(ISS64_OFFSET)+8+8, %rsp /* pop saved state frame +
666 trapno/trapfn and error */
667 cmpl $(SYSCALL_CS),ISF64_CS-8-8(%rsp)
668 /* test for fast entry/exit */
669 je L_sysret
670 EXT(ret64_iret):
671 iretq /* return from interrupt */
672
673 L_sysret:
674 /*
675 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
676 * rcx user rip
677 * r1 user rflags
678 * rsp user stack pointer
679 */
680 mov ISF64_RIP-16(%rsp), %rcx
681 mov ISF64_RFLAGS-16(%rsp), %r11
682 mov ISF64_RSP-16(%rsp), %rsp
683 sysretq /* return from system call */
684
685 /*
686 * Common path to enter locore handlers.
687 */
688 L_enter_lohandler:
689 swapgs /* switch to kernel gs (cpu_data) */
690 L_enter_lohandler_continue:
691 cmpl $(USER64_CS), ISF64_CS(%rsp)
692 je L_64bit_enter /* this is a 64-bit user task */
693 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
694 je L_64bit_enter /* we're in 64-bit (EFI) code */
695 jmp L_32bit_enter
696
697 /*
698 * System call handlers.
699 * These are entered via a syscall interrupt. The system call number in %rax
700 * is saved to the error code slot in the stack frame. We then branch to the
701 * common state saving code.
702 */
703
704 Entry(hi64_unix_scall)
705 swapgs /* switch to kernel gs (cpu_data) */
706 L_unix_scall_continue:
707 push %rax /* save system call number */
708 push $(UNIX_INT)
709 movl $(LO_UNIX_SCALL), 4(%rsp)
710 jmp L_32bit_enter_check
711
712
713 Entry(hi64_mach_scall)
714 swapgs /* switch to kernel gs (cpu_data) */
715 L_mach_scall_continue:
716 push %rax /* save system call number */
717 push $(MACH_INT)
718 movl $(LO_MACH_SCALL), 4(%rsp)
719 jmp L_32bit_enter_check
720
721
722 Entry(hi64_mdep_scall)
723 swapgs /* switch to kernel gs (cpu_data) */
724 L_mdep_scall_continue:
725 push %rax /* save system call number */
726 push $(MACHDEP_INT)
727 movl $(LO_MDEP_SCALL), 4(%rsp)
728 jmp L_32bit_enter_check
729
730
731 Entry(hi64_diag_scall)
732 swapgs /* switch to kernel gs (cpu_data) */
733 L_diag_scall_continue:
734 push %rax /* save system call number */
735 push $(DIAG_INT)
736 movl $(LO_DIAG_SCALL), 4(%rsp)
737 jmp L_32bit_enter_check
738
739 Entry(hi64_syscall)
740 swapgs /* Kapow! get per-cpu data area */
741 L_syscall_continue:
742 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
743 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
744
745 /*
746 * Save values in the ISF frame in the PCB
747 * to cons up the saved machine state.
748 */
749 movl $(USER_DS), ISF64_SS(%rsp)
750 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
751 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
752 mov %rcx, ISF64_RIP(%rsp) /* rip */
753 mov %gs:CPU_UBER_TMP, %rcx
754 mov %rcx, ISF64_RSP(%rsp) /* user stack */
755 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
756 movl $(0), ISF64_TRAPNO(%rsp) /* trapno */
757 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
758 jmp L_64bit_enter /* this can only be a 64-bit task */
759
760 /*
761 * sysenter entry point
762 * Requires user code to set up:
763 * edx: user instruction pointer (return address)
764 * ecx: user stack pointer
765 * on which is pushed stub ret addr and saved ebx
766 * Return to user-space is made using sysexit.
767 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
768 * or requiring ecx to be preserved.
769 */
770 Entry(hi64_sysenter)
771 mov (%rsp), %rsp /* switch from temporary stack to pcb */
772 /*
773 * Push values on to the PCB stack
774 * to cons up the saved machine state.
775 */
776 push $(USER_DS) /* ss */
777 push %rcx /* uesp */
778 pushf /* flags */
779 /*
780 * Clear, among others, the Nested Task (NT) flags bit;
781 * This is cleared by INT, but not by sysenter, which only
782 * clears RF, VM and IF.
783 */
784 push $0
785 popf
786 push $(SYSENTER_CS) /* cs */
787 swapgs /* switch to kernel gs (cpu_data) */
788 L_sysenter_continue:
789 push %rdx /* eip */
790 push %rax /* err/eax - syscall code */
791 push $(0)
792 movl $(LO_SYSENTER), ISF64_TRAPFN(%rsp)
793 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
794
795 L_32bit_enter_check:
796 /*
797 * Check we're not a confused 64-bit user.
798 */
799 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
800 jne L_64bit_entry_reject
801 /* fall through to 32-bit handler: */
802
803 L_32bit_enter:
804 /*
805 * Make space for the compatibility save area.
806 */
807 sub $(ISC32_OFFSET), %rsp
808 movl $(SS_32), SS_FLAVOR(%rsp)
809
810 /*
811 * Save segment regs
812 */
813 mov %ds, R_DS(%rsp)
814 mov %es, R_ES(%rsp)
815 mov %fs, R_FS(%rsp)
816 mov %gs, R_GS(%rsp)
817
818 /*
819 * Save general 32-bit registers
820 */
821 mov %eax, R_EAX(%rsp)
822 mov %ebx, R_EBX(%rsp)
823 mov %ecx, R_ECX(%rsp)
824 mov %edx, R_EDX(%rsp)
825 mov %ebp, R_EBP(%rsp)
826 mov %esi, R_ESI(%rsp)
827 mov %edi, R_EDI(%rsp)
828
829 /* Unconditionally save cr2; only meaningful on page faults */
830 mov %cr2, %rax
831 mov %eax, R_CR2(%rsp)
832
833 /*
834 * Copy registers already saved in the machine state
835 * (in the interrupt stack frame) into the compat save area.
836 */
837 mov ISC32_RIP(%rsp), %eax
838 mov %eax, R_EIP(%rsp)
839 mov ISC32_RFLAGS(%rsp), %eax
840 mov %eax, R_EFLAGS(%rsp)
841 mov ISC32_CS(%rsp), %eax
842 mov %eax, R_CS(%rsp)
843 mov ISC32_RSP(%rsp), %eax
844 mov %eax, R_UESP(%rsp)
845 mov ISC32_SS(%rsp), %eax
846 mov %eax, R_SS(%rsp)
847 L_32bit_enter_after_fault:
848 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
849 mov %ebx, R_TRAPNO(%rsp)
850 mov ISC32_ERR(%rsp), %eax
851 mov %eax, R_ERR(%rsp)
852 mov ISC32_TRAPFN(%rsp), %edx
853
854 /*
855 * Common point to enter lo_handler in compatibilty mode:
856 * %ebx trapno
857 * %edx locore handler address
858 */
859 L_enter_lohandler2:
860 /*
861 * Switch address space to kernel
862 * if not shared space and not already mapped.
863 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
864 */
865 mov %cr3, %rax
866 mov %gs:CPU_TASK_CR3, %rcx
867 cmp %rax, %rcx /* is the task's cr3 loaded? */
868 jne 1f
869 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
870 je 2f
871 1:
872 mov %gs:CPU_KERNEL_CR3, %rcx
873 cmp %rax, %rcx
874 je 2f
875 mov %rcx, %cr3
876 mov %rcx, %gs:CPU_ACTIVE_CR3
877 2:
878 /*
879 * Switch to compatibility mode.
880 * Then establish kernel segments.
881 */
882 swapgs /* Done with uber-kernel gs */
883 ENTER_COMPAT_MODE()
884
885 /*
886 * Now in compatibility mode and running in compatibility space
887 * prepare to enter the locore handler.
888 * %ebx trapno
889 * %edx lo_handler pointer
890 * Note: the stack pointer (now 32-bit) is now directly addressing the
891 * the kernel below 4G and therefore is automagically re-based.
892 */
893 mov $(KERNEL_DS), %eax
894 mov %eax, %ss
895 mov %eax, %ds
896 mov %eax, %es
897 mov %eax, %fs
898 mov $(CPU_DATA_GS), %eax
899 mov %eax, %gs
900
901 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
902 cmpl $0, ACT_PCB_IDS(%ecx) /* Is there a debug register state? */
903 je 1f
904 movl $0, %ecx /* If so, reset DR7 (the control) */
905 movl %ecx, %dr7
906 1:
907 addl $1,%gs:hwIntCnt(,%ebx,4) // Bump the trap/intr count
908
909 /* Dispatch the designated lo handler */
910 jmp *%edx
911
912 .code64
913 L_64bit_entry_reject:
914 /*
915 * Here for a 64-bit user attempting an invalid kernel entry.
916 */
917 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
918 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
919 /* Fall through... */
920
921 L_64bit_enter:
922 /*
923 * Here for a 64-bit user task, or special 64-bit kernel code.
924 * Make space for the save area.
925 */
926 sub $(ISS64_OFFSET), %rsp
927 movl $(SS_64), SS_FLAVOR(%rsp)
928
929 /*
930 * Save segment regs
931 */
932 mov %fs, R64_FS(%rsp)
933 mov %gs, R64_GS(%rsp)
934
935 /* Save general-purpose registers */
936 mov %rax, R64_RAX(%rsp)
937 mov %rcx, R64_RCX(%rsp)
938 mov %rbx, R64_RBX(%rsp)
939 mov %rbp, R64_RBP(%rsp)
940 mov %r11, R64_R11(%rsp)
941 mov %r12, R64_R12(%rsp)
942 mov %r13, R64_R13(%rsp)
943 mov %r14, R64_R14(%rsp)
944 mov %r15, R64_R15(%rsp)
945
946 /* cr2 is significant only for page-faults */
947 mov %cr2, %rax
948 mov %rax, R64_CR2(%rsp)
949
950 /* Other registers (which may contain syscall args) */
951 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
952 mov %rsi, R64_RSI(%rsp)
953 mov %rdx, R64_RDX(%rsp)
954 mov %r10, R64_R10(%rsp)
955 mov %r8, R64_R8(%rsp)
956 mov %r9, R64_R9(%rsp) /* .. arg5 */
957
958 L_64bit_enter_after_fault:
959 /*
960 * At this point we're almost ready to join the common lo-entry code.
961 */
962 mov R64_TRAPNO(%rsp), %ebx
963 mov R64_TRAPFN(%rsp), %edx
964
965 jmp L_enter_lohandler2
966
967 /*
968 * Debug trap. Check for single-stepping across system call into
969 * kernel. If this is the case, taking the debug trap has turned
970 * off single-stepping - save the flags register with the trace
971 * bit set.
972 */
973 Entry(hi64_debug)
974 swapgs /* set %gs for cpu data */
975 push $0 /* error code */
976 push $(T_DEBUG)
977 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
978
979 testb $3, ISF64_CS(%rsp)
980 jnz L_enter_lohandler_continue
981
982 /*
983 * trap came from kernel mode
984 */
985 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
986 jne L_enter_lohandler_continue /* trap not in uber-space */
987
988 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
989 jne 6f
990 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
991 jmp L_mach_scall_continue /* continue system call entry */
992 6:
993 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
994 jne 5f
995 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
996 jmp L_mdep_scall_continue /* continue system call entry */
997 5:
998 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
999 jne 4f
1000 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1001 jmp L_unix_scall_continue /* continue system call entry */
1002 4:
1003 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1004 jne L_enter_lohandler_continue
1005 /*
1006 * Interrupt stack frame has been pushed on the temporary stack.
1007 * We have to switch to pcb stack and copy eflags.
1008 */
1009 add $32,%rsp /* remove trapno/trapfn/err/rip/cs */
1010 push %rcx /* save %rcx - user stack pointer */
1011 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1012 xchg %rcx,%rsp /* switch to pcb stack */
1013 push $(USER_DS) /* ss */
1014 push (%rcx) /* saved %rcx into rsp slot */
1015 push 8(%rcx) /* rflags */
1016 mov (%rcx),%rcx /* restore %rcx */
1017 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1018 jmp L_sysenter_continue /* continue sysenter entry */
1019
1020
1021 Entry(hi64_double_fault)
1022 swapgs /* set %gs for cpu data */
1023 push $(T_DOUBLE_FAULT)
1024 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1025
1026 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1027 jne L_enter_lohandler_continue /* trap not in uber-space */
1028
1029 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1030 jne L_enter_lohandler_continue
1031
1032 mov ISF64_RSP(%rsp), %rsp
1033 jmp L_syscall_continue
1034
1035
1036 /*
1037 * General protection or segment-not-present fault.
1038 * Check for a GP/NP fault in the kernel_return
1039 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1040 *
1041 * rsp-> 0: trap code (NP or GP) and trap function
1042 * 8: segment number in error (error code)
1043 * 16 rip
1044 * 24 cs
1045 * 32 rflags
1046 * 40 rsp
1047 * 48 ss
1048 * 56 old registers (trap is from kernel)
1049 */
1050 Entry(hi64_gen_prot)
1051 push $(T_GENERAL_PROTECTION)
1052 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1053
1054 Entry(hi64_stack_fault)
1055 push $(T_STACK_FAULT)
1056 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1057
1058 Entry(hi64_segnp)
1059 push $(T_SEGMENT_NOT_PRESENT)
1060 /* indicate fault type */
1061 trap_check_kernel_exit:
1062 movl $(LO_ALLTRAPS), 4(%rsp)
1063 testb $3,24(%rsp)
1064 jnz hi64_take_trap
1065 /* trap was from kernel mode, so */
1066 /* check for the kernel exit sequence */
1067 cmpl $(KERNEL_UBER_BASE_HI32), 16+4(%rsp)
1068 jne hi64_take_trap /* trap not in uber-space */
1069
1070 cmpl $(EXT(ret32_iret)), 16(%rsp)
1071 je L_fault_iret32
1072 cmpl $(EXT(ret32_set_ds)), 16(%rsp)
1073 je L_32bit_fault_set_seg
1074 cmpl $(EXT(ret32_set_es)), 16(%rsp)
1075 je L_32bit_fault_set_seg
1076 cmpl $(EXT(ret32_set_fs)), 16(%rsp)
1077 je L_32bit_fault_set_seg
1078 cmpl $(EXT(ret32_set_gs)), 16(%rsp)
1079 je L_32bit_fault_set_seg
1080
1081 cmpl $(EXT(ret64_iret)), 16(%rsp)
1082 je L_fault_iret64
1083
1084 hi64_take_trap:
1085 jmp L_enter_lohandler
1086
1087
1088 /*
1089 * GP/NP fault on IRET: CS or SS is in error.
1090 * All registers contain the user's values.
1091 *
1092 * on SP is
1093 * 0 trap number/function
1094 * 8 errcode
1095 * 16 rip
1096 * 24 cs
1097 * 32 rflags
1098 * 40 rsp
1099 * 48 ss --> new trapno/trapfn
1100 * 56 (16-byte padding) --> new errcode
1101 * 64 user rip
1102 * 72 user cs
1103 * 80 user rflags
1104 * 88 user rsp
1105 * 96 user ss
1106 */
1107 L_fault_iret32:
1108 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1109 mov 0(%rsp), %rax /* get trap number */
1110 mov %rax, 48(%rsp) /* put in user trap number */
1111 mov 8(%rsp), %rax /* get error code */
1112 mov %rax, 56(%rsp) /* put in user errcode */
1113 mov 16(%rsp), %rax /* restore rax */
1114 add $48, %rsp /* reset to original frame */
1115 /* now treat as fault from user */
1116 swapgs
1117 jmp L_32bit_enter
1118
1119 L_fault_iret64:
1120 mov %rax, 16(%rsp) /* save rax (we don`t need saved rip) */
1121 mov 0(%rsp), %rax /* get trap number */
1122 mov %rax, 48(%rsp) /* put in user trap number */
1123 mov 8(%rsp), %rax /* get error code */
1124 mov %rax, 56(%rsp) /* put in user errcode */
1125 mov 16(%rsp), %rax /* restore rax */
1126 add $48, %rsp /* reset to original frame */
1127 /* now treat as fault from user */
1128 swapgs
1129 jmp L_64bit_enter
1130
1131 /*
1132 * Fault restoring a segment register. All of the saved state is still
1133 * on the stack untouched since we didn't move the stack pointer.
1134 */
1135 L_32bit_fault_set_seg:
1136 mov 0(%rsp), %rax /* get trap number/function */
1137 mov 8(%rsp), %rdx /* get error code */
1138 mov 40(%rsp), %rsp /* reload stack prior to fault */
1139 mov %rax,ISC32_TRAPNO(%rsp)
1140 mov %rdx,ISC32_ERR(%rsp)
1141 /* now treat as fault from user */
1142 /* except that all the state is */
1143 /* already saved - we just have to */
1144 /* move the trapno and error into */
1145 /* the compatibility frame */
1146 swapgs
1147 jmp L_32bit_enter_after_fault
1148
1149
1150 /*
1151 * Fatal exception handlers:
1152 */
1153 Entry(db_task_dbl_fault64)
1154 push $(T_DOUBLE_FAULT)
1155 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1156 jmp L_enter_lohandler
1157
1158 Entry(db_task_stk_fault64)
1159 push $(T_STACK_FAULT)
1160 movl $(LO_DOUBLE_FAULT), ISF64_TRAPFN(%rsp)
1161 jmp L_enter_lohandler
1162
1163 Entry(mc64)
1164 push $(0) /* Error */
1165 push $(T_MACHINE_CHECK)
1166 movl $(LO_MACHINE_CHECK), ISF64_TRAPFN(%rsp)
1167 jmp L_enter_lohandler