]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/idt64.s
xnu-2050.48.11.tar.gz
[apple/xnu.git] / osfmk / i386 / idt64.s
CommitLineData
0c530ab8 1/*
6d2010ae 2 * Copyright (c) 2010 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28#include <i386/asm.h>
29#include <i386/asm64.h>
30#include <assym.s>
0c530ab8
A
31#include <i386/eflags.h>
32#include <i386/trap.h>
6d2010ae 33#include <i386/rtclock_asm.h>
0c530ab8
A
34#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
35#include <mach/i386/syscall_sw.h>
36#include <i386/postcode.h>
37#include <i386/proc_reg.h>
6d2010ae
A
38#include <mach/exception_types.h>
39
0c530ab8
A
40
41/*
6d2010ae 42 * Low-memory compability-mode handlers.
0c530ab8
A
43 */
44#define LO_ALLINTRS EXT(lo_allintrs)
45#define LO_ALLTRAPS EXT(lo_alltraps)
0c530ab8
A
46#define LO_SYSCALL EXT(lo_syscall)
47#define LO_UNIX_SCALL EXT(lo_unix_scall)
48#define LO_MACH_SCALL EXT(lo_mach_scall)
49#define LO_MDEP_SCALL EXT(lo_mdep_scall)
0c530ab8
A
50#define LO_DOUBLE_FAULT EXT(lo_df64)
51#define LO_MACHINE_CHECK EXT(lo_mc64)
52
53/*
54 * Interrupt descriptor table and code vectors for it.
55 *
56 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
57 * reformatted ("fixed") before use.
58 * All vector are rebased in uber-space.
59 * Special vectors (e.g. double-fault) use a non-0 IST.
60 */
61#define IDT64_BASE_ENTRY(vec,seg,ist,type) \
62 .data ;\
63 .long vec ;\
64 .long KERNEL_UBER_BASE_HI32 ;\
65 .word seg ;\
66 .byte ist*16 ;\
67 .byte type ;\
68 .long 0 ;\
69 .text
70
71#define IDT64_ENTRY(vec,ist,type) \
72 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
73#define IDT64_ENTRY_LOCAL(vec,ist,type) \
74 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
75
76/*
77 * Push trap number and address of compatibility mode handler,
78 * then branch to common trampoline. Error already pushed.
79 */
80#define EXCEP64_ERR(n,name) \
81 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
82Entry(name) ;\
6d2010ae 83 push $(LO_ALLTRAPS) ;\
0c530ab8 84 push $(n) ;\
0c530ab8
A
85 jmp L_enter_lohandler
86
87
88/*
89 * Push error(0), trap number and address of compatibility mode handler,
90 * then branch to common trampoline.
91 */
92#define EXCEPTION64(n,name) \
93 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
94Entry(name) ;\
95 push $0 ;\
6d2010ae 96 push $(LO_ALLTRAPS) ;\
0c530ab8 97 push $(n) ;\
0c530ab8
A
98 jmp L_enter_lohandler
99
100
101/*
102 * Interrupt from user.
103 * Push error (0), trap number and address of compatibility mode handler,
104 * then branch to common trampoline.
105 */
106#define EXCEP64_USR(n,name) \
107 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
108Entry(name) ;\
109 push $0 ;\
6d2010ae 110 push $(LO_ALLTRAPS) ;\
0c530ab8 111 push $(n) ;\
0c530ab8
A
112 jmp L_enter_lohandler
113
114
115/*
116 * Special interrupt code from user.
117 */
118#define EXCEP64_SPC_USR(n,name) \
119 IDT64_ENTRY(name,0,U_INTR_GATE)
120
121
122/*
123 * Special interrupt code.
124 * In 64-bit mode we may use an IST slot instead of task gates.
125 */
126#define EXCEP64_IST(n,name,ist) \
127 IDT64_ENTRY(name,ist,K_INTR_GATE)
128#define EXCEP64_SPC(n,name) \
129 IDT64_ENTRY(name,0,K_INTR_GATE)
130
131
132/*
133 * Interrupt.
134 * Push zero err, interrupt vector and address of compatibility mode handler,
135 * then branch to common trampoline.
136 */
137#define INTERRUPT64(n) \
138 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
139 .align FALIGN ;\
140L_ ## n: ;\
141 push $0 ;\
6d2010ae 142 push $(LO_ALLINTRS) ;\
0c530ab8 143 push $(n) ;\
0c530ab8
A
144 jmp L_enter_lohandler
145
146
147 .data
148 .align 12
149Entry(master_idt64)
150Entry(hi64_data_base)
151 .text
152 .code64
153Entry(hi64_text_base)
154
155EXCEPTION64(0x00,t64_zero_div)
156EXCEP64_SPC(0x01,hi64_debug)
157INTERRUPT64(0x02) /* NMI */
158EXCEP64_USR(0x03,t64_int3)
159EXCEP64_USR(0x04,t64_into)
160EXCEP64_USR(0x05,t64_bounds)
161EXCEPTION64(0x06,t64_invop)
162EXCEPTION64(0x07,t64_nofpu)
0c530ab8 163EXCEP64_IST(0x08,hi64_double_fault,1)
0c530ab8
A
164EXCEPTION64(0x09,a64_fpu_over)
165EXCEPTION64(0x0a,a64_inv_tss)
166EXCEP64_SPC(0x0b,hi64_segnp)
0c530ab8 167EXCEP64_SPC(0x0c,hi64_stack_fault)
0c530ab8 168EXCEP64_SPC(0x0d,hi64_gen_prot)
2d21ac55 169EXCEP64_SPC(0x0e, hi64_page_fault)
0c530ab8
A
170EXCEPTION64(0x0f,t64_trap_0f)
171EXCEPTION64(0x10,t64_fpu_err)
172EXCEPTION64(0x11,t64_trap_11)
173EXCEP64_IST(0x12,mc64,1)
174EXCEPTION64(0x13,t64_sse_err)
175EXCEPTION64(0x14,t64_trap_14)
176EXCEPTION64(0x15,t64_trap_15)
177EXCEPTION64(0x16,t64_trap_16)
178EXCEPTION64(0x17,t64_trap_17)
179EXCEPTION64(0x18,t64_trap_18)
180EXCEPTION64(0x19,t64_trap_19)
181EXCEPTION64(0x1a,t64_trap_1a)
182EXCEPTION64(0x1b,t64_trap_1b)
183EXCEPTION64(0x1c,t64_trap_1c)
184EXCEPTION64(0x1d,t64_trap_1d)
185EXCEPTION64(0x1e,t64_trap_1e)
186EXCEPTION64(0x1f,t64_trap_1f)
187
188INTERRUPT64(0x20)
189INTERRUPT64(0x21)
190INTERRUPT64(0x22)
191INTERRUPT64(0x23)
192INTERRUPT64(0x24)
193INTERRUPT64(0x25)
194INTERRUPT64(0x26)
195INTERRUPT64(0x27)
196INTERRUPT64(0x28)
197INTERRUPT64(0x29)
198INTERRUPT64(0x2a)
199INTERRUPT64(0x2b)
200INTERRUPT64(0x2c)
201INTERRUPT64(0x2d)
202INTERRUPT64(0x2e)
203INTERRUPT64(0x2f)
204
205INTERRUPT64(0x30)
206INTERRUPT64(0x31)
207INTERRUPT64(0x32)
208INTERRUPT64(0x33)
209INTERRUPT64(0x34)
210INTERRUPT64(0x35)
211INTERRUPT64(0x36)
212INTERRUPT64(0x37)
213INTERRUPT64(0x38)
214INTERRUPT64(0x39)
215INTERRUPT64(0x3a)
216INTERRUPT64(0x3b)
217INTERRUPT64(0x3c)
218INTERRUPT64(0x3d)
219INTERRUPT64(0x3e)
220INTERRUPT64(0x3f)
221
222INTERRUPT64(0x40)
223INTERRUPT64(0x41)
224INTERRUPT64(0x42)
225INTERRUPT64(0x43)
226INTERRUPT64(0x44)
227INTERRUPT64(0x45)
228INTERRUPT64(0x46)
229INTERRUPT64(0x47)
230INTERRUPT64(0x48)
231INTERRUPT64(0x49)
232INTERRUPT64(0x4a)
233INTERRUPT64(0x4b)
234INTERRUPT64(0x4c)
235INTERRUPT64(0x4d)
236INTERRUPT64(0x4e)
237INTERRUPT64(0x4f)
238
239INTERRUPT64(0x50)
240INTERRUPT64(0x51)
241INTERRUPT64(0x52)
242INTERRUPT64(0x53)
243INTERRUPT64(0x54)
244INTERRUPT64(0x55)
245INTERRUPT64(0x56)
246INTERRUPT64(0x57)
247INTERRUPT64(0x58)
248INTERRUPT64(0x59)
249INTERRUPT64(0x5a)
250INTERRUPT64(0x5b)
251INTERRUPT64(0x5c)
252INTERRUPT64(0x5d)
253INTERRUPT64(0x5e)
254INTERRUPT64(0x5f)
255
256INTERRUPT64(0x60)
257INTERRUPT64(0x61)
258INTERRUPT64(0x62)
259INTERRUPT64(0x63)
260INTERRUPT64(0x64)
261INTERRUPT64(0x65)
262INTERRUPT64(0x66)
263INTERRUPT64(0x67)
264INTERRUPT64(0x68)
265INTERRUPT64(0x69)
266INTERRUPT64(0x6a)
267INTERRUPT64(0x6b)
268INTERRUPT64(0x6c)
269INTERRUPT64(0x6d)
270INTERRUPT64(0x6e)
271INTERRUPT64(0x6f)
272
273INTERRUPT64(0x70)
274INTERRUPT64(0x71)
275INTERRUPT64(0x72)
276INTERRUPT64(0x73)
277INTERRUPT64(0x74)
278INTERRUPT64(0x75)
279INTERRUPT64(0x76)
280INTERRUPT64(0x77)
281INTERRUPT64(0x78)
282INTERRUPT64(0x79)
283INTERRUPT64(0x7a)
284INTERRUPT64(0x7b)
285INTERRUPT64(0x7c)
286INTERRUPT64(0x7d)
287INTERRUPT64(0x7e)
2d21ac55 288EXCEP64_USR(0x7f, t64_dtrace_ret)
0c530ab8
A
289
290EXCEP64_SPC_USR(0x80,hi64_unix_scall)
291EXCEP64_SPC_USR(0x81,hi64_mach_scall)
292EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
316670eb 293INTERRUPT64(0x83)
0c530ab8
A
294INTERRUPT64(0x84)
295INTERRUPT64(0x85)
296INTERRUPT64(0x86)
297INTERRUPT64(0x87)
298INTERRUPT64(0x88)
299INTERRUPT64(0x89)
300INTERRUPT64(0x8a)
301INTERRUPT64(0x8b)
302INTERRUPT64(0x8c)
303INTERRUPT64(0x8d)
304INTERRUPT64(0x8e)
305INTERRUPT64(0x8f)
306
307INTERRUPT64(0x90)
308INTERRUPT64(0x91)
309INTERRUPT64(0x92)
310INTERRUPT64(0x93)
311INTERRUPT64(0x94)
312INTERRUPT64(0x95)
313INTERRUPT64(0x96)
314INTERRUPT64(0x97)
315INTERRUPT64(0x98)
316INTERRUPT64(0x99)
317INTERRUPT64(0x9a)
318INTERRUPT64(0x9b)
319INTERRUPT64(0x9c)
320INTERRUPT64(0x9d)
321INTERRUPT64(0x9e)
322INTERRUPT64(0x9f)
323
324INTERRUPT64(0xa0)
325INTERRUPT64(0xa1)
326INTERRUPT64(0xa2)
327INTERRUPT64(0xa3)
328INTERRUPT64(0xa4)
329INTERRUPT64(0xa5)
330INTERRUPT64(0xa6)
331INTERRUPT64(0xa7)
332INTERRUPT64(0xa8)
333INTERRUPT64(0xa9)
334INTERRUPT64(0xaa)
335INTERRUPT64(0xab)
336INTERRUPT64(0xac)
337INTERRUPT64(0xad)
338INTERRUPT64(0xae)
339INTERRUPT64(0xaf)
340
341INTERRUPT64(0xb0)
342INTERRUPT64(0xb1)
343INTERRUPT64(0xb2)
344INTERRUPT64(0xb3)
345INTERRUPT64(0xb4)
346INTERRUPT64(0xb5)
347INTERRUPT64(0xb6)
348INTERRUPT64(0xb7)
349INTERRUPT64(0xb8)
350INTERRUPT64(0xb9)
351INTERRUPT64(0xba)
352INTERRUPT64(0xbb)
353INTERRUPT64(0xbc)
354INTERRUPT64(0xbd)
355INTERRUPT64(0xbe)
356INTERRUPT64(0xbf)
357
358INTERRUPT64(0xc0)
359INTERRUPT64(0xc1)
360INTERRUPT64(0xc2)
361INTERRUPT64(0xc3)
362INTERRUPT64(0xc4)
363INTERRUPT64(0xc5)
364INTERRUPT64(0xc6)
365INTERRUPT64(0xc7)
366INTERRUPT64(0xc8)
367INTERRUPT64(0xc9)
368INTERRUPT64(0xca)
369INTERRUPT64(0xcb)
370INTERRUPT64(0xcc)
371INTERRUPT64(0xcd)
372INTERRUPT64(0xce)
373INTERRUPT64(0xcf)
374
375INTERRUPT64(0xd0)
376INTERRUPT64(0xd1)
377INTERRUPT64(0xd2)
378INTERRUPT64(0xd3)
379INTERRUPT64(0xd4)
380INTERRUPT64(0xd5)
381INTERRUPT64(0xd6)
382INTERRUPT64(0xd7)
383INTERRUPT64(0xd8)
384INTERRUPT64(0xd9)
385INTERRUPT64(0xda)
386INTERRUPT64(0xdb)
387INTERRUPT64(0xdc)
388INTERRUPT64(0xdd)
389INTERRUPT64(0xde)
390INTERRUPT64(0xdf)
391
392INTERRUPT64(0xe0)
393INTERRUPT64(0xe1)
394INTERRUPT64(0xe2)
395INTERRUPT64(0xe3)
396INTERRUPT64(0xe4)
397INTERRUPT64(0xe5)
398INTERRUPT64(0xe6)
399INTERRUPT64(0xe7)
400INTERRUPT64(0xe8)
401INTERRUPT64(0xe9)
402INTERRUPT64(0xea)
403INTERRUPT64(0xeb)
404INTERRUPT64(0xec)
405INTERRUPT64(0xed)
406INTERRUPT64(0xee)
407INTERRUPT64(0xef)
408
409INTERRUPT64(0xf0)
410INTERRUPT64(0xf1)
411INTERRUPT64(0xf2)
412INTERRUPT64(0xf3)
413INTERRUPT64(0xf4)
414INTERRUPT64(0xf5)
415INTERRUPT64(0xf6)
416INTERRUPT64(0xf7)
417INTERRUPT64(0xf8)
418INTERRUPT64(0xf9)
419INTERRUPT64(0xfa)
420INTERRUPT64(0xfb)
421INTERRUPT64(0xfc)
422INTERRUPT64(0xfd)
423INTERRUPT64(0xfe)
424EXCEPTION64(0xff,t64_preempt)
425
426
427 .text
428/*
429 *
430 * Trap/interrupt entry points.
431 *
432 * All traps must create the following 32-bit save area on the PCB "stack"
433 * - this is identical to the legacy mode 32-bit case:
434 *
435 * gs
436 * fs
437 * es
438 * ds
439 * edi
440 * esi
441 * ebp
442 * cr2 (defined only for page fault)
443 * ebx
444 * edx
445 * ecx
446 * eax
447 * trap number
448 * error code
449 * eip
450 * cs
451 * eflags
452 * user esp - if from user
453 * user ss - if from user
454 *
455 * Above this is the trap number and compatibility mode handler address
456 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
457 *
458 * (trapno, trapfn)
459 * err
460 * rip
461 * cs
462 * rflags
463 * rsp
464 * ss
465 *
466 */
467
468 .code32
6d2010ae 469
0c530ab8
A
470/*
471 * Control is passed here to return to the compatibility mode user.
472 * At this stage we're in kernel space in compatibility mode
473 * but we need to switch into 64-bit mode in the 4G-based trampoline
474 * space before performing the iret.
475 */
6d2010ae 476ret_to_user:
0c530ab8
A
477 movl %gs:CPU_ACTIVE_THREAD,%ecx
478
6d2010ae 479 movl TH_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
0c530ab8
A
480 cmpl $0,%eax /* Is there a debug register context? */
481 je 2f /* branch if not */
2d21ac55 482 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
0c530ab8 483 jne 1f
2d21ac55 484 movl DS_DR0(%eax), %ecx /* If so, load the 32 bit DRs */
0c530ab8
A
485 movl %ecx, %db0
486 movl DS_DR1(%eax), %ecx
487 movl %ecx, %db1
488 movl DS_DR2(%eax), %ecx
489 movl %ecx, %db2
490 movl DS_DR3(%eax), %ecx
491 movl %ecx, %db3
492 movl DS_DR7(%eax), %ecx
493 movl %ecx, %gs:CPU_DR7
494 movl $0, %gs:CPU_DR7 + 4
495 jmp 2f
4961:
497 ENTER_64BIT_MODE() /* Enter long mode */
498 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
499 mov %rcx, %dr0
500 mov DS64_DR1(%eax), %rcx
501 mov %rcx, %dr1
502 mov DS64_DR2(%eax), %rcx
503 mov %rcx, %dr2
504 mov DS64_DR3(%eax), %rcx
505 mov %rcx, %dr3
506 mov DS64_DR7(%eax), %rcx
507 mov %rcx, %gs:CPU_DR7
508 jmp 3f /* Enter uberspace */
5092:
510 ENTER_64BIT_MODE()
5113:
512 ENTER_UBERSPACE()
513
514 /*
515 * Now switch %cr3, if necessary.
516 */
517 swapgs /* switch back to uber-kernel gs base */
518 mov %gs:CPU_TASK_CR3,%rcx
519 mov %rcx,%gs:CPU_ACTIVE_CR3
520 mov %cr3, %rax
521 cmp %rcx, %rax
522 je 1f
523 /* flag the copyio engine state as WINDOWS_CLEAN */
524 mov %gs:CPU_ACTIVE_THREAD,%eax
6d2010ae 525 movl $(WINDOWS_CLEAN),TH_COPYIO_STATE(%eax)
0c530ab8
A
526 mov %rcx,%cr3 /* switch to user's address space */
5271:
528
529 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
530 cmp $0, %rax
531 je 1f
532 mov %rax, %dr7 /* Set DR7 */
533 movq $0, %gs:CPU_DR7
5341:
535
536 /*
537 * Adjust stack to use uber-space.
538 */
539 mov $(KERNEL_UBER_BASE_HI32), %rax
540 shl $32, %rsp
541 shrd $32, %rax, %rsp /* relocate into uber-space */
542
543 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
544 jne L_64bit_return
545 jmp L_32bit_return
546
6d2010ae 547ret_to_kernel:
0c530ab8
A
548 ENTER_64BIT_MODE()
549 ENTER_UBERSPACE()
550
551 swapgs /* switch back to uber-kernel gs base */
552
553 /*
554 * Adjust stack to use uber-space.
555 */
556 mov $(KERNEL_UBER_BASE_HI32), %rax
557 shl $32, %rsp
558 shrd $32, %rax, %rsp /* relocate into uber-space */
559
560 /* Check for return to 64-bit kernel space (EFI today) */
561 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
562 jne L_64bit_return
563 /* fall through for 32-bit return */
564
565L_32bit_return:
566 /*
567 * Restore registers into the machine state for iret.
568 */
b0d623f7 569 movl R32_EIP(%rsp), %eax
0c530ab8 570 movl %eax, ISC32_RIP(%rsp)
b0d623f7 571 movl R32_EFLAGS(%rsp), %eax
0c530ab8 572 movl %eax, ISC32_RFLAGS(%rsp)
b0d623f7 573 movl R32_CS(%rsp), %eax
0c530ab8 574 movl %eax, ISC32_CS(%rsp)
b0d623f7 575 movl R32_UESP(%rsp), %eax
0c530ab8 576 movl %eax, ISC32_RSP(%rsp)
b0d623f7 577 movl R32_SS(%rsp), %eax
0c530ab8
A
578 movl %eax, ISC32_SS(%rsp)
579
580 /*
581 * Restore general 32-bit registers
582 */
b0d623f7
A
583 movl R32_EAX(%rsp), %eax
584 movl R32_EBX(%rsp), %ebx
585 movl R32_ECX(%rsp), %ecx
586 movl R32_EDX(%rsp), %edx
587 movl R32_EBP(%rsp), %ebp
588 movl R32_ESI(%rsp), %esi
589 movl R32_EDI(%rsp), %edi
0c530ab8
A
590
591 /*
592 * Restore segment registers. We make take an exception here but
593 * we've got enough space left in the save frame area to absorb
594 * a hardware frame plus the trapfn and trapno
595 */
596 swapgs
597EXT(ret32_set_ds):
b0d623f7 598 movw R32_DS(%rsp), %ds
0c530ab8 599EXT(ret32_set_es):
b0d623f7 600 movw R32_ES(%rsp), %es
0c530ab8 601EXT(ret32_set_fs):
b0d623f7 602 movw R32_FS(%rsp), %fs
0c530ab8 603EXT(ret32_set_gs):
b0d623f7 604 movw R32_GS(%rsp), %gs
0c530ab8 605
6d2010ae
A
606 add $(ISC32_OFFSET)+8+8+8, %rsp /* pop compat frame +
607 trapno, trapfn and error */
316670eb 608 cmpl $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
0c530ab8
A
609 /* test for fast entry/exit */
610 je L_fast_exit
611EXT(ret32_iret):
612 iretq /* return from interrupt */
613
614L_fast_exit:
2d21ac55
A
615 pop %rdx /* user return eip */
616 pop %rcx /* pop and toss cs */
0c530ab8 617 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
2d21ac55
A
618 popf /* flags - carry denotes failure */
619 pop %rcx /* user return esp */
0c530ab8
A
620 .code32
621 sti /* interrupts enabled after sysexit */
316670eb 622 .byte 0x0f,0x35 /* 32-bit sysexit */
0c530ab8
A
623 .code64
624
625L_64bit_return:
626 /*
627 * Set the GS Base MSR with the user's gs base.
628 */
629 movl %gs:CPU_UBER_USER_GS_BASE, %eax
630 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
631 movl $(MSR_IA32_GS_BASE), %ecx
632 swapgs
633 testb $3, R64_CS(%rsp) /* returning to user-space? */
634 jz 1f
635 wrmsr /* set 64-bit base */
6361:
637
638 /*
639 * Restore general 64-bit registers
640 */
641 mov R64_R15(%rsp), %r15
642 mov R64_R14(%rsp), %r14
643 mov R64_R13(%rsp), %r13
644 mov R64_R12(%rsp), %r12
645 mov R64_R11(%rsp), %r11
646 mov R64_R10(%rsp), %r10
647 mov R64_R9(%rsp), %r9
648 mov R64_R8(%rsp), %r8
649 mov R64_RSI(%rsp), %rsi
650 mov R64_RDI(%rsp), %rdi
651 mov R64_RBP(%rsp), %rbp
652 mov R64_RDX(%rsp), %rdx
653 mov R64_RBX(%rsp), %rbx
654 mov R64_RCX(%rsp), %rcx
655 mov R64_RAX(%rsp), %rax
656
6d2010ae
A
657 add $(ISS64_OFFSET)+8+8+8, %rsp /* pop saved state frame +
658 trapno, trapfn and error */
659 cmpl $(SYSCALL_CS),ISF64_CS-8-8-8(%rsp)
0c530ab8
A
660 /* test for fast entry/exit */
661 je L_sysret
662EXT(ret64_iret):
663 iretq /* return from interrupt */
664
665L_sysret:
666 /*
667 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
668 * rcx user rip
669 * r1 user rflags
670 * rsp user stack pointer
671 */
6d2010ae
A
672 mov ISF64_RIP-8-8-8(%rsp), %rcx
673 mov ISF64_RFLAGS-8-8-8(%rsp), %r11
674 mov ISF64_RSP-8-8-8(%rsp), %rsp
2d21ac55 675 sysretq /* return from system call */
0c530ab8
A
676
677/*
678 * Common path to enter locore handlers.
679 */
680L_enter_lohandler:
681 swapgs /* switch to kernel gs (cpu_data) */
682L_enter_lohandler_continue:
683 cmpl $(USER64_CS), ISF64_CS(%rsp)
684 je L_64bit_enter /* this is a 64-bit user task */
685 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
686 je L_64bit_enter /* we're in 64-bit (EFI) code */
687 jmp L_32bit_enter
688
689/*
690 * System call handlers.
691 * These are entered via a syscall interrupt. The system call number in %rax
692 * is saved to the error code slot in the stack frame. We then branch to the
693 * common state saving code.
694 */
695
696Entry(hi64_unix_scall)
697 swapgs /* switch to kernel gs (cpu_data) */
698L_unix_scall_continue:
699 push %rax /* save system call number */
6d2010ae 700 push $(LO_UNIX_SCALL)
0c530ab8 701 push $(UNIX_INT)
0c530ab8
A
702 jmp L_32bit_enter_check
703
704
705Entry(hi64_mach_scall)
706 swapgs /* switch to kernel gs (cpu_data) */
707L_mach_scall_continue:
708 push %rax /* save system call number */
6d2010ae 709 push $(LO_MACH_SCALL)
0c530ab8 710 push $(MACH_INT)
0c530ab8
A
711 jmp L_32bit_enter_check
712
713
714Entry(hi64_mdep_scall)
715 swapgs /* switch to kernel gs (cpu_data) */
716L_mdep_scall_continue:
717 push %rax /* save system call number */
6d2010ae 718 push $(LO_MDEP_SCALL)
0c530ab8 719 push $(MACHDEP_INT)
0c530ab8
A
720 jmp L_32bit_enter_check
721
722
0c530ab8
A
723Entry(hi64_syscall)
724 swapgs /* Kapow! get per-cpu data area */
725L_syscall_continue:
726 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
727 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
728
729 /*
730 * Save values in the ISF frame in the PCB
731 * to cons up the saved machine state.
732 */
733 movl $(USER_DS), ISF64_SS(%rsp)
734 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
735 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
736 mov %rcx, ISF64_RIP(%rsp) /* rip */
737 mov %gs:CPU_UBER_TMP, %rcx
738 mov %rcx, ISF64_RSP(%rsp) /* user stack */
739 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
b0d623f7 740 movl $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
0c530ab8
A
741 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
742 jmp L_64bit_enter /* this can only be a 64-bit task */
2d21ac55
A
743
744
745L_32bit_enter_check:
746 /*
747 * Check we're not a confused 64-bit user.
748 */
749 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
750 jne L_64bit_entry_reject
751 jmp L_32bit_enter
0c530ab8
A
752/*
753 * sysenter entry point
754 * Requires user code to set up:
755 * edx: user instruction pointer (return address)
756 * ecx: user stack pointer
757 * on which is pushed stub ret addr and saved ebx
758 * Return to user-space is made using sysexit.
759 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
760 * or requiring ecx to be preserved.
761 */
762Entry(hi64_sysenter)
763 mov (%rsp), %rsp /* switch from temporary stack to pcb */
764 /*
765 * Push values on to the PCB stack
766 * to cons up the saved machine state.
767 */
768 push $(USER_DS) /* ss */
769 push %rcx /* uesp */
770 pushf /* flags */
771 /*
2d21ac55
A
772 * Clear, among others, the Nested Task (NT) flags bit;
773 * this is zeroed by INT, but not by SYSENTER.
774 */
0c530ab8
A
775 push $0
776 popf
777 push $(SYSENTER_CS) /* cs */
778 swapgs /* switch to kernel gs (cpu_data) */
779L_sysenter_continue:
780 push %rdx /* eip */
781 push %rax /* err/eax - syscall code */
6d2010ae 782 push $0
b0d623f7 783 push $(T_SYSENTER)
0c530ab8 784 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
2d21ac55
A
785 movl $(LO_MACH_SCALL), ISF64_TRAPFN(%rsp)
786 testl %eax, %eax
787 js L_32bit_enter_check
788 movl $(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp)
789 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
790 jne L_64bit_entry_reject
791/* If the caller (typically LibSystem) has recorded the cumulative size of
792 * the arguments in EAX, copy them over from the user stack directly.
793 * We recover from exceptions inline--if the copy loop doesn't complete
794 * due to an exception, we fall back to copyin from compatibility mode.
795 * We can potentially extend this mechanism to mach traps as well (DRK).
796 */
797L_sysenter_copy_args:
798 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
799 jz L_32bit_enter
800 xor %r9, %r9
801 mov %gs:CPU_UBER_ARG_STORE, %r8
802 movl %eax, %r9d
803 mov %gs:CPU_UBER_ARG_STORE_VALID, %r12
804 xor %r10, %r10
805 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d
806 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %r9d
807 movl $0, (%r12)
808EXT(hi64_sysenter_user_arg_copy):
8090:
810 movl 4(%rcx, %r10, 4), %r11d
811 movl %r11d, (%r8, %r10, 4)
812 incl %r10d
813 decl %r9d
814 jnz 0b
815 movl $1, (%r12)
816 /* Fall through to 32-bit handler */
0c530ab8
A
817
818L_32bit_enter:
6d2010ae 819 cld
0c530ab8
A
820 /*
821 * Make space for the compatibility save area.
822 */
823 sub $(ISC32_OFFSET), %rsp
824 movl $(SS_32), SS_FLAVOR(%rsp)
825
826 /*
827 * Save segment regs
828 */
b0d623f7
A
829 mov %ds, R32_DS(%rsp)
830 mov %es, R32_ES(%rsp)
831 mov %fs, R32_FS(%rsp)
832 mov %gs, R32_GS(%rsp)
0c530ab8
A
833
834 /*
835 * Save general 32-bit registers
836 */
b0d623f7
A
837 mov %eax, R32_EAX(%rsp)
838 mov %ebx, R32_EBX(%rsp)
839 mov %ecx, R32_ECX(%rsp)
840 mov %edx, R32_EDX(%rsp)
841 mov %ebp, R32_EBP(%rsp)
842 mov %esi, R32_ESI(%rsp)
843 mov %edi, R32_EDI(%rsp)
0c530ab8
A
844
845 /* Unconditionally save cr2; only meaningful on page faults */
846 mov %cr2, %rax
b0d623f7 847 mov %eax, R32_CR2(%rsp)
0c530ab8
A
848
849 /*
850 * Copy registers already saved in the machine state
851 * (in the interrupt stack frame) into the compat save area.
852 */
853 mov ISC32_RIP(%rsp), %eax
b0d623f7 854 mov %eax, R32_EIP(%rsp)
0c530ab8 855 mov ISC32_RFLAGS(%rsp), %eax
b0d623f7 856 mov %eax, R32_EFLAGS(%rsp)
0c530ab8 857 mov ISC32_CS(%rsp), %eax
b0d623f7
A
858 mov %eax, R32_CS(%rsp)
859 testb $3, %al
860 jz 1f
861 xor %ebp, %ebp
8621:
0c530ab8 863 mov ISC32_RSP(%rsp), %eax
b0d623f7 864 mov %eax, R32_UESP(%rsp)
0c530ab8 865 mov ISC32_SS(%rsp), %eax
b0d623f7 866 mov %eax, R32_SS(%rsp)
0c530ab8
A
867L_32bit_enter_after_fault:
868 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
b0d623f7 869 mov %ebx, R32_TRAPNO(%rsp)
0c530ab8 870 mov ISC32_ERR(%rsp), %eax
b0d623f7 871 mov %eax, R32_ERR(%rsp)
0c530ab8
A
872 mov ISC32_TRAPFN(%rsp), %edx
873
874/*
875 * Common point to enter lo_handler in compatibilty mode:
876 * %ebx trapno
877 * %edx locore handler address
878 */
879L_enter_lohandler2:
880 /*
881 * Switch address space to kernel
882 * if not shared space and not already mapped.
883 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
884 */
885 mov %cr3, %rax
886 mov %gs:CPU_TASK_CR3, %rcx
887 cmp %rax, %rcx /* is the task's cr3 loaded? */
888 jne 1f
889 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
890 je 2f
8911:
892 mov %gs:CPU_KERNEL_CR3, %rcx
893 cmp %rax, %rcx
894 je 2f
895 mov %rcx, %cr3
896 mov %rcx, %gs:CPU_ACTIVE_CR3
8972:
6d2010ae
A
898 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
899 cmpl $0, TH_PCB_IDS(%ecx) /* Is there a debug register state? */
900 jz 21f
901 xor %ecx, %ecx /* If so, reset DR7 (the control) */
902 mov %rcx, %dr7
90321:
0c530ab8
A
904 /*
905 * Switch to compatibility mode.
906 * Then establish kernel segments.
907 */
908 swapgs /* Done with uber-kernel gs */
909 ENTER_COMPAT_MODE()
910
911 /*
912 * Now in compatibility mode and running in compatibility space
913 * prepare to enter the locore handler.
914 * %ebx trapno
915 * %edx lo_handler pointer
916 * Note: the stack pointer (now 32-bit) is now directly addressing the
917 * the kernel below 4G and therefore is automagically re-based.
918 */
919 mov $(KERNEL_DS), %eax
920 mov %eax, %ss
921 mov %eax, %ds
922 mov %eax, %es
923 mov %eax, %fs
924 mov $(CPU_DATA_GS), %eax
925 mov %eax, %gs
926
6d2010ae
A
927 incl %gs:hwIntCnt(,%ebx,4) /* Bump the trap/intr count */
928
0c530ab8
A
929 /* Dispatch the designated lo handler */
930 jmp *%edx
931
932 .code64
933L_64bit_entry_reject:
934 /*
935 * Here for a 64-bit user attempting an invalid kernel entry.
936 */
937 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
938 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
939 /* Fall through... */
940
941L_64bit_enter:
942 /*
943 * Here for a 64-bit user task, or special 64-bit kernel code.
944 * Make space for the save area.
945 */
946 sub $(ISS64_OFFSET), %rsp
947 movl $(SS_64), SS_FLAVOR(%rsp)
948
6d2010ae 949 cld
0c530ab8
A
950 /*
951 * Save segment regs
952 */
953 mov %fs, R64_FS(%rsp)
954 mov %gs, R64_GS(%rsp)
955
956 /* Save general-purpose registers */
957 mov %rax, R64_RAX(%rsp)
958 mov %rcx, R64_RCX(%rsp)
959 mov %rbx, R64_RBX(%rsp)
960 mov %rbp, R64_RBP(%rsp)
961 mov %r11, R64_R11(%rsp)
962 mov %r12, R64_R12(%rsp)
963 mov %r13, R64_R13(%rsp)
964 mov %r14, R64_R14(%rsp)
965 mov %r15, R64_R15(%rsp)
966
967 /* cr2 is significant only for page-faults */
968 mov %cr2, %rax
969 mov %rax, R64_CR2(%rsp)
970
971 /* Other registers (which may contain syscall args) */
972 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
973 mov %rsi, R64_RSI(%rsp)
974 mov %rdx, R64_RDX(%rsp)
975 mov %r10, R64_R10(%rsp)
976 mov %r8, R64_R8(%rsp)
977 mov %r9, R64_R9(%rsp) /* .. arg5 */
978
979L_64bit_enter_after_fault:
980 /*
981 * At this point we're almost ready to join the common lo-entry code.
982 */
983 mov R64_TRAPNO(%rsp), %ebx
984 mov R64_TRAPFN(%rsp), %edx
985
b0d623f7
A
986 testb $3, ISF64_CS+ISS64_OFFSET(%rsp)
987 jz 1f
988 xor %rbp, %rbp
9891:
0c530ab8
A
990 jmp L_enter_lohandler2
991
2d21ac55 992Entry(hi64_page_fault)
6d2010ae 993 push $(LO_ALLTRAPS)
2d21ac55 994 push $(T_PAGE_FAULT)
2d21ac55
A
995 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
996 jne L_enter_lohandler
997 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
6d2010ae 998 jne hi64_kernel_trap
2d21ac55
A
999 mov ISF64_RSP(%rsp), %rsp
1000 jmp L_32bit_enter
1001
0c530ab8
A
1002/*
1003 * Debug trap. Check for single-stepping across system call into
1004 * kernel. If this is the case, taking the debug trap has turned
1005 * off single-stepping - save the flags register with the trace
1006 * bit set.
1007 */
1008Entry(hi64_debug)
1009 swapgs /* set %gs for cpu data */
1010 push $0 /* error code */
6d2010ae 1011 push $(LO_ALLTRAPS)
0c530ab8 1012 push $(T_DEBUG)
0c530ab8
A
1013
1014 testb $3, ISF64_CS(%rsp)
1015 jnz L_enter_lohandler_continue
1016
1017 /*
1018 * trap came from kernel mode
1019 */
1020 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1021 jne L_enter_lohandler_continue /* trap not in uber-space */
1022
1023 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
1024 jne 6f
1025 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1026 jmp L_mach_scall_continue /* continue system call entry */
10276:
1028 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
1029 jne 5f
1030 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1031 jmp L_mdep_scall_continue /* continue system call entry */
10325:
1033 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1034 jne 4f
1035 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1036 jmp L_unix_scall_continue /* continue system call entry */
10374:
1038 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1039 jne L_enter_lohandler_continue
1040 /*
1041 * Interrupt stack frame has been pushed on the temporary stack.
1042 * We have to switch to pcb stack and copy eflags.
1043 */
6d2010ae 1044 add $40,%rsp /* remove trapno/trapfn/err/rip/cs */
0c530ab8
A
1045 push %rcx /* save %rcx - user stack pointer */
1046 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1047 xchg %rcx,%rsp /* switch to pcb stack */
1048 push $(USER_DS) /* ss */
1049 push (%rcx) /* saved %rcx into rsp slot */
1050 push 8(%rcx) /* rflags */
1051 mov (%rcx),%rcx /* restore %rcx */
1052 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1053 jmp L_sysenter_continue /* continue sysenter entry */
1054
1055
1056Entry(hi64_double_fault)
1057 swapgs /* set %gs for cpu data */
6d2010ae 1058 push $(LO_DOUBLE_FAULT)
0c530ab8 1059 push $(T_DOUBLE_FAULT)
0c530ab8
A
1060
1061 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1062 jne L_enter_lohandler_continue /* trap not in uber-space */
1063
1064 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1065 jne L_enter_lohandler_continue
1066
1067 mov ISF64_RSP(%rsp), %rsp
1068 jmp L_syscall_continue
1069
1070
1071/*
1072 * General protection or segment-not-present fault.
1073 * Check for a GP/NP fault in the kernel_return
1074 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1075 *
6d2010ae
A
1076 * rsp-> 0 ISF64_TRAPNO: trap code (NP or GP)
1077 * 8 ISF64_TRAPFN: trap function
1078 * 16 ISF64_ERR: segment number in error (error code)
1079 * 24 ISF64_RIP: rip
1080 * 32 ISF64_CS: cs
1081 * 40 ISF64_RFLAGS: rflags
1082 * 48 ISF64_RSP: rsp
1083 * 56 ISF64_SS: ss
1084 * 64 old registers (trap is from kernel)
0c530ab8
A
1085 */
1086Entry(hi64_gen_prot)
6d2010ae 1087 push $(LO_ALLTRAPS)
0c530ab8
A
1088 push $(T_GENERAL_PROTECTION)
1089 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1090
1091Entry(hi64_stack_fault)
6d2010ae 1092 push $(LO_ALLTRAPS)
0c530ab8
A
1093 push $(T_STACK_FAULT)
1094 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1095
1096Entry(hi64_segnp)
6d2010ae 1097 push $(LO_ALLTRAPS)
0c530ab8
A
1098 push $(T_SEGMENT_NOT_PRESENT)
1099 /* indicate fault type */
1100trap_check_kernel_exit:
6d2010ae
A
1101 testb $3,ISF64_CS(%rsp)
1102 jnz L_enter_lohandler
0c530ab8
A
1103 /* trap was from kernel mode, so */
1104 /* check for the kernel exit sequence */
6d2010ae
A
1105 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1106 jne L_enter_lohandler_continue /* trap not in uber-space */
0c530ab8 1107
6d2010ae 1108 cmpl $(EXT(ret32_iret)), ISF64_RIP(%rsp)
0c530ab8 1109 je L_fault_iret32
6d2010ae 1110 cmpl $(EXT(ret32_set_ds)), ISF64_RIP(%rsp)
0c530ab8 1111 je L_32bit_fault_set_seg
6d2010ae 1112 cmpl $(EXT(ret32_set_es)), ISF64_RIP(%rsp)
0c530ab8 1113 je L_32bit_fault_set_seg
6d2010ae 1114 cmpl $(EXT(ret32_set_fs)), ISF64_RIP(%rsp)
0c530ab8 1115 je L_32bit_fault_set_seg
6d2010ae 1116 cmpl $(EXT(ret32_set_gs)), ISF64_RIP(%rsp)
0c530ab8
A
1117 je L_32bit_fault_set_seg
1118
6d2010ae 1119 cmpl $(EXT(ret64_iret)), ISF64_RIP(%rsp)
0c530ab8
A
1120 je L_fault_iret64
1121
2d21ac55 1122 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
6d2010ae
A
1123 cmove ISF64_RSP(%rsp), %rsp
1124 je L_32bit_enter
1125
1126hi64_kernel_trap:
1127 /*
1128 * Here after taking an unexpected trap from kernel mode - perhaps
1129 * while running in the trampolines hereabouts.
1130 * Make sure we're not on the PCB stack, if so move to the kernel stack.
1131 * This is likely a fatal condition.
1132 * But first, try to be sure we have the kernel gs base active...
1133 */
1134 cmpq $0, %gs:CPU_THIS /* test gs_base */
1135 js 1f /* -ve kernel addr, no swap */
1136 swapgs /* +ve user addr, swap */
11371:
1138 movq %rax, %gs:CPU_UBER_TMP /* save %rax */
1139 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
1140 subq %rsp, %rax
1141 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
1142 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1143 ja L_enter_lohandler_continue /* stack not in PCB */
1144
1145 /*
1146 * Here if %rsp is in the PCB
1147 * Copy the interrupt stack frame from PCB stack to kernel stack
1148 */
1149 movq %gs:CPU_KERNEL_STACK, %rax /* note: %rax restored below */
1150 xchgq %rax, %rsp
1151 pushq ISF64_SS(%rax)
1152 pushq ISF64_RSP(%rax)
1153 pushq ISF64_RFLAGS(%rax)
1154 pushq ISF64_CS(%rax)
1155 pushq ISF64_RIP(%rax)
1156 pushq ISF64_ERR(%rax)
1157 pushq ISF64_TRAPFN(%rax)
1158 pushq ISF64_TRAPNO(%rax)
1159 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1160 jmp L_enter_lohandler_continue
1161
0c530ab8 1162
0c530ab8
A
1163/*
1164 * GP/NP fault on IRET: CS or SS is in error.
1165 * All registers contain the user's values.
1166 *
1167 * on SP is
6d2010ae
A
1168 * 0 ISF64_TRAPNO: trap code (NP or GP)
1169 * 8 ISF64_TRAPFN: trap function
1170 * 16 ISF64_ERR: segment number in error (error code)
1171 * 24 ISF64_RIP: rip
1172 * 32 ISF64_CS: cs
1173 * 40 ISF64_RFLAGS: rflags
1174 * 48 ISF64_RSP: rsp
1175 * 56 ISF64_SS: ss --> new new trapno/trapfn
1176 * 64 pad --> new errcode
1177 * 72 user rip
1178 * 80 user cs
1179 * 88 user rflags
1180 * 96 user rsp
1181 * 104 user ss (16-byte aligned)
0c530ab8
A
1182 */
1183L_fault_iret32:
6d2010ae
A
1184 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1185 mov ISF64_TRAPNO(%rsp), %rax
1186 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1187 mov ISF64_ERR(%rsp), %rax
1188 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1189 mov ISF64_RIP(%rsp), %rax /* restore rax */
1190 add $(ISF64_SS), %rsp /* reset to original frame */
0c530ab8
A
1191 /* now treat as fault from user */
1192 swapgs
1193 jmp L_32bit_enter
1194
1195L_fault_iret64:
6d2010ae
A
1196 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1197 mov ISF64_TRAPNO(%rsp), %rax
1198 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1199 mov ISF64_ERR(%rsp), %rax
1200 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1201 mov ISF64_RIP(%rsp), %rax /* restore rax */
1202 add $(ISF64_SS), %rsp /* reset to original frame */
0c530ab8
A
1203 /* now treat as fault from user */
1204 swapgs
1205 jmp L_64bit_enter
1206
1207/*
1208 * Fault restoring a segment register. All of the saved state is still
1209 * on the stack untouched since we didn't move the stack pointer.
1210 */
1211L_32bit_fault_set_seg:
6d2010ae
A
1212 mov ISF64_TRAPNO(%rsp), %rax
1213 mov ISF64_ERR(%rsp), %rdx
1214 mov ISF64_RSP(%rsp), %rsp /* reload stack prior to fault */
0c530ab8
A
1215 mov %rax,ISC32_TRAPNO(%rsp)
1216 mov %rdx,ISC32_ERR(%rsp)
1217 /* now treat as fault from user */
1218 /* except that all the state is */
1219 /* already saved - we just have to */
1220 /* move the trapno and error into */
1221 /* the compatibility frame */
1222 swapgs
1223 jmp L_32bit_enter_after_fault
1224
1225
1226/*
1227 * Fatal exception handlers:
1228 */
1229Entry(db_task_dbl_fault64)
6d2010ae 1230 push $(LO_DOUBLE_FAULT)
0c530ab8 1231 push $(T_DOUBLE_FAULT)
0c530ab8
A
1232 jmp L_enter_lohandler
1233
1234Entry(db_task_stk_fault64)
6d2010ae 1235 push $(LO_DOUBLE_FAULT)
0c530ab8 1236 push $(T_STACK_FAULT)
0c530ab8
A
1237 jmp L_enter_lohandler
1238
1239Entry(mc64)
1240 push $(0) /* Error */
6d2010ae 1241 push $(LO_MACHINE_CHECK)
0c530ab8 1242 push $(T_MACHINE_CHECK)
0c530ab8 1243 jmp L_enter_lohandler
6d2010ae
A
1244
1245
1246 .code32
1247
1248/*
1249 * All task 'exceptions' enter lo_alltraps:
1250 * esp -> x86_saved_state_t
1251 *
1252 * The rest of the state is set up as:
1253 * cr3 -> kernel directory
1254 * esp -> low based stack
1255 * gs -> CPU_DATA_GS
1256 * cs -> KERNEL32_CS
1257 * ss/ds/es -> KERNEL_DS
1258 *
1259 * interrupts disabled
1260 * direction flag cleared
1261 */
1262Entry(lo_alltraps)
1263 movl R32_CS(%esp),%eax /* assume 32-bit state */
1264 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1265 jne 1f
1266 movl R64_CS(%esp),%eax /* 64-bit user mode */
12671:
1268 testb $3,%al
1269 jz trap_from_kernel
1270 /* user mode trap */
1271 TIME_TRAP_UENTRY
1272
1273 movl %gs:CPU_ACTIVE_THREAD,%ecx
1274 movl TH_TASK(%ecx),%ebx
1275
1276 /* Check for active vtimers in the current task */
1277 TASK_VTIMER_CHECK(%ebx, %ecx)
1278
1279 movl %gs:CPU_KERNEL_STACK,%ebx
1280 xchgl %ebx,%esp /* switch to kernel stack */
1281
1282 CCALL1(user_trap, %ebx) /* call user trap routine */
1283 /* user_trap() unmasks interrupts */
1284 cli /* hold off intrs - critical section */
1285 xorl %ecx,%ecx /* don't check if we're in the PFZ */
1286
1287/*
1288 * Return from trap or system call, checking for ASTs.
1289 * On lowbase PCB stack with intrs disabled
1290 */
1291Entry(return_from_trap)
1292 movl %gs:CPU_ACTIVE_THREAD, %esp
1293 movl TH_PCB_ISS(%esp),%esp /* switch back to PCB stack */
1294 movl %gs:CPU_PENDING_AST, %eax
1295 testl %eax, %eax
1296 je return_to_user /* branch if no AST */
1297LEXT(return_from_trap_with_ast)
1298 movl %gs:CPU_KERNEL_STACK, %ebx
1299 xchgl %ebx, %esp /* switch to kernel stack */
1300
1301 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
1302 je 2f /* no, go handle the AST */
1303 cmpl $(SS_64), SS_FLAVOR(%ebx) /* are we a 64-bit task? */
1304 je 1f
1305 /* no... 32-bit user mode */
1306 movl R32_EIP(%ebx), %eax
1307 pushl %ebx /* save PCB stack */
1308 xorl %ebp, %ebp /* clear frame pointer */
1309 CCALL1(commpage_is_in_pfz32, %eax)
1310 popl %ebx /* retrieve pointer to PCB stack */
1311 testl %eax, %eax
1312 je 2f /* not in the PFZ... go service AST */
1313 movl %eax, R32_EBX(%ebx) /* let the PFZ know we've pended an AST */
1314 xchgl %ebx, %esp /* switch back to PCB stack */
1315 jmp return_to_user
13161: /* 64-bit user mode */
1317 movl R64_RIP(%ebx), %ecx
1318 movl R64_RIP+4(%ebx), %eax
1319 pushl %ebx /* save PCB stack */
1320 xorl %ebp, %ebp /* clear frame pointer */
1321 CCALL2(commpage_is_in_pfz64, %ecx, %eax)
1322 popl %ebx /* retrieve pointer to PCB stack */
1323 testl %eax, %eax
1324 je 2f /* not in the PFZ... go service AST */
1325 movl %eax, R64_RBX(%ebx) /* let the PFZ know we've pended an AST */
1326 xchgl %ebx, %esp /* switch back to PCB stack */
1327 jmp return_to_user
13282:
1329 sti /* interrupts always enabled on return to user mode */
1330 pushl %ebx /* save PCB stack */
1331 xorl %ebp, %ebp /* Clear framepointer */
1332 CCALL1(i386_astintr, $0) /* take the AST */
1333 cli
1334
1335 popl %esp /* switch back to PCB stack (w/exc link) */
1336
1337 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1338 jmp EXT(return_from_trap) /* and check again (rare) */
1339
1340
1341
1342/*
1343 * Trap from kernel mode. No need to switch stacks.
1344 * Interrupts must be off here - we will set them to state at time of trap
1345 * as soon as it's safe for us to do so and not recurse doing preemption
1346 */
1347trap_from_kernel:
1348 movl %esp, %eax /* saved state addr */
1349 pushl R32_EIP(%esp) /* Simulate a CALL from fault point */
1350 pushl %ebp /* Extend framepointer chain */
1351 movl %esp, %ebp
1352 CCALL1WITHSP(kernel_trap, %eax) /* Call kernel trap handler */
1353 popl %ebp
1354 addl $4, %esp
1355 cli
1356
1357 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
1358 testl $ AST_URGENT,%eax /* any urgent preemption? */
1359 je ret_to_kernel /* no, nothing to do */
1360 cmpl $ T_PREEMPT,R32_TRAPNO(%esp)
1361 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
1362 testl $ EFL_IF,R32_EFLAGS(%esp) /* interrupts disabled? */
1363 je ret_to_kernel
1364 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1365 jne ret_to_kernel
1366 movl %gs:CPU_KERNEL_STACK,%eax
1367 movl %esp,%ecx
1368 xorl %eax,%ecx
1369 and EXT(kernel_stack_mask),%ecx
1370 testl %ecx,%ecx /* are we on the kernel stack? */
1371 jne ret_to_kernel /* no, skip it */
1372
1373 CCALL1(i386_astintr, $1) /* take the AST */
1374
1375
1376/*
1377 * All interrupts on all tasks enter here with:
1378 * esp-> -> x86_saved_state_t
1379 *
1380 * cr3 -> kernel directory
1381 * esp -> low based stack
1382 * gs -> CPU_DATA_GS
1383 * cs -> KERNEL32_CS
1384 * ss/ds/es -> KERNEL_DS
1385 *
1386 * interrupts disabled
1387 * direction flag cleared
1388 */
1389Entry(lo_allintrs)
1390 /*
1391 * test whether already on interrupt stack
1392 */
1393 movl %gs:CPU_INT_STACK_TOP,%ecx
1394 cmpl %esp,%ecx
1395 jb 1f
1396 leal -INTSTACK_SIZE(%ecx),%edx
1397 cmpl %esp,%edx
1398 jb int_from_intstack
13991:
1400 xchgl %ecx,%esp /* switch to interrupt stack */
1401
1402 movl %cr0,%eax /* get cr0 */
1403 orl $(CR0_TS),%eax /* or in TS bit */
1404 movl %eax,%cr0 /* set cr0 */
1405
1406 subl $8, %esp /* for 16-byte stack alignment */
1407 pushl %ecx /* save pointer to old stack */
1408 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
1409
1410 TIME_INT_ENTRY /* do timing */
1411
1412 movl %gs:CPU_ACTIVE_THREAD,%ecx
1413 movl TH_TASK(%ecx),%ebx
1414
1415 /* Check for active vtimers in the current task */
1416 TASK_VTIMER_CHECK(%ebx, %ecx)
1417
1418 incl %gs:CPU_PREEMPTION_LEVEL
1419 incl %gs:CPU_INTERRUPT_LEVEL
1420
1421 movl %gs:CPU_INT_STATE, %eax
1422 CCALL1(interrupt, %eax) /* call generic interrupt routine */
1423
1424 cli /* just in case we returned with intrs enabled */
1425 xorl %eax,%eax
1426 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1427
1428 decl %gs:CPU_INTERRUPT_LEVEL
1429 decl %gs:CPU_PREEMPTION_LEVEL
1430
1431 TIME_INT_EXIT /* do timing */
1432
1433 movl %gs:CPU_ACTIVE_THREAD,%eax
1434 movl TH_PCB_FPS(%eax),%eax /* get pcb's ifps */
1435 testl %eax, %eax /* Is there a context */
1436 je 1f /* Branch if not */
1437 cmpl $0, FP_VALID(%eax) /* Check fp_valid */
1438 jne 1f /* Branch if valid */
1439 clts /* Clear TS */
1440 jmp 2f
14411:
1442 movl %cr0,%eax /* get cr0 */
1443 orl $(CR0_TS),%eax /* or in TS bit */
1444 movl %eax,%cr0 /* set cr0 */
14452:
1446 popl %esp /* switch back to old stack */
1447
1448 /* Load interrupted code segment into %eax */
1449 movl R32_CS(%esp),%eax /* assume 32-bit state */
1450 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1451 jne 3f
1452 movl R64_CS(%esp),%eax /* 64-bit user mode */
14533:
1454 testb $3,%al /* user mode, */
1455 jnz ast_from_interrupt_user /* go handle potential ASTs */
1456 /*
1457 * we only want to handle preemption requests if
1458 * the interrupt fell in the kernel context
1459 * and preemption isn't disabled
1460 */
1461 movl %gs:CPU_PENDING_AST,%eax
1462 testl $ AST_URGENT,%eax /* any urgent requests? */
1463 je ret_to_kernel /* no, nothing to do */
1464
1465 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1466 jne ret_to_kernel /* yes, skip it */
1467
1468 movl %gs:CPU_KERNEL_STACK,%eax
1469 movl %esp,%ecx
1470 xorl %eax,%ecx
1471 and EXT(kernel_stack_mask),%ecx
1472 testl %ecx,%ecx /* are we on the kernel stack? */
1473 jne ret_to_kernel /* no, skip it */
1474
1475 /*
1476 * Take an AST from kernel space. We don't need (and don't want)
1477 * to do as much as the case where the interrupt came from user
1478 * space.
1479 */
1480 CCALL1(i386_astintr, $1)
1481
1482 jmp ret_to_kernel
1483
1484
1485/*
1486 * nested int - simple path, can't preempt etc on way out
1487 */
1488int_from_intstack:
1489 incl %gs:CPU_PREEMPTION_LEVEL
1490 incl %gs:CPU_INTERRUPT_LEVEL
1491 incl %gs:CPU_NESTED_ISTACK
1492
1493 movl %esp, %edx /* x86_saved_state */
1494 CCALL1(interrupt, %edx)
1495
1496 decl %gs:CPU_INTERRUPT_LEVEL
1497 decl %gs:CPU_PREEMPTION_LEVEL
1498 decl %gs:CPU_NESTED_ISTACK
1499
1500 jmp ret_to_kernel
1501
1502/*
1503 * Take an AST from an interrupted user
1504 */
1505ast_from_interrupt_user:
1506 movl %gs:CPU_PENDING_AST,%eax
1507 testl %eax,%eax /* pending ASTs? */
1508 je ret_to_user /* no, nothing to do */
1509
1510 TIME_TRAP_UENTRY
1511
1512 movl $1, %ecx /* check if we're in the PFZ */
1513 jmp EXT(return_from_trap_with_ast) /* return */
1514
1515
1516/*
1517 * 32bit Tasks
1518 * System call entries via INTR_GATE or sysenter:
1519 *
1520 * esp -> x86_saved_state32_t
1521 * cr3 -> kernel directory
1522 * esp -> low based stack
1523 * gs -> CPU_DATA_GS
1524 * cs -> KERNEL32_CS
1525 * ss/ds/es -> KERNEL_DS
1526 *
1527 * interrupts disabled
1528 * direction flag cleared
1529 */
1530
1531Entry(lo_unix_scall)
1532 TIME_TRAP_UENTRY
1533
1534 movl %gs:CPU_KERNEL_STACK,%edi
1535 xchgl %edi,%esp /* switch to kernel stack */
1536 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1537 movl TH_TASK(%ecx),%ebx /* point to current task */
1538 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1539
1540 /* Check for active vtimers in the current task */
1541 TASK_VTIMER_CHECK(%ebx, %ecx)
1542
1543 sti
1544
1545 CCALL1(unix_syscall, %edi)
1546 /*
1547 * always returns through thread_exception_return
1548 */
1549
1550
1551Entry(lo_mach_scall)
1552 TIME_TRAP_UENTRY
1553
1554 movl %gs:CPU_KERNEL_STACK,%edi
1555 xchgl %edi,%esp /* switch to kernel stack */
1556 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1557 movl TH_TASK(%ecx),%ebx /* point to current task */
1558 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1559
1560 /* Check for active vtimers in the current task */
1561 TASK_VTIMER_CHECK(%ebx, %ecx)
1562
1563 sti
1564
1565 CCALL1(mach_call_munger, %edi)
1566 /*
1567 * always returns through thread_exception_return
1568 */
1569
1570
1571Entry(lo_mdep_scall)
1572 TIME_TRAP_UENTRY
1573
1574 movl %gs:CPU_KERNEL_STACK,%edi
1575 xchgl %edi,%esp /* switch to kernel stack */
1576 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1577 movl TH_TASK(%ecx),%ebx /* point to current task */
1578
1579 /* Check for active vtimers in the current task */
1580 TASK_VTIMER_CHECK(%ebx, %ecx)
1581
1582 sti
1583
1584 CCALL1(machdep_syscall, %edi)
1585 /*
1586 * always returns through thread_exception_return
1587 */
1588
6d2010ae
A
1589return_to_user:
1590 TIME_TRAP_UEXIT
1591 jmp ret_to_user
1592
1593
1594/*
1595 * 64bit Tasks
1596 * System call entries via syscall only:
1597 *
1598 * esp -> x86_saved_state64_t
1599 * cr3 -> kernel directory
1600 * esp -> low based stack
1601 * gs -> CPU_DATA_GS
1602 * cs -> KERNEL32_CS
1603 * ss/ds/es -> KERNEL_DS
1604 *
1605 * interrupts disabled
1606 * direction flag cleared
1607 */
1608
1609Entry(lo_syscall)
1610 TIME_TRAP_UENTRY
1611
1612 movl %gs:CPU_KERNEL_STACK,%edi
1613 xchgl %edi,%esp /* switch to kernel stack */
1614
1615 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1616 movl TH_TASK(%ecx),%ebx /* point to current task */
1617
1618 /* Check for active vtimers in the current task */
1619 TASK_VTIMER_CHECK(%ebx, %ecx)
1620
1621 /*
1622 * We can be here either for a mach, unix machdep or diag syscall,
1623 * as indicated by the syscall class:
1624 */
1625 movl R64_RAX(%edi), %eax /* syscall number/class */
1626 movl %eax, %edx
1627 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1628 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1629 je EXT(lo64_mach_scall)
1630 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1631 je EXT(lo64_unix_scall)
1632 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1633 je EXT(lo64_mdep_scall)
1634 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1635 je EXT(lo64_diag_scall)
1636
1637 sti
1638
1639 /* Syscall class unknown */
1640 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
1641 /* no return */
1642
1643
1644Entry(lo64_unix_scall)
1645 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1646 sti
1647
1648 CCALL1(unix_syscall64, %edi)
1649 /*
1650 * always returns through thread_exception_return
1651 */
1652
1653
1654Entry(lo64_mach_scall)
1655 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1656 sti
1657
1658 CCALL1(mach_call_munger64, %edi)
1659 /*
1660 * always returns through thread_exception_return
1661 */
1662
1663
1664
1665Entry(lo64_mdep_scall)
1666 sti
1667
1668 CCALL1(machdep_syscall64, %edi)
1669 /*
1670 * always returns through thread_exception_return
1671 */
1672
1673
1674Entry(lo64_diag_scall)
1675 CCALL1(diagCall64, %edi) // Call diagnostics
1676
1677 cli // Disable interruptions just in case
1678 cmpl $0,%eax // What kind of return is this?
1679 je 1f
1680 movl %edi, %esp // Get back the original stack
1681 jmp return_to_user // Normal return, do not check asts...
16821:
1683 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1684 // pass what would be the diag syscall
1685 // error return - cause an exception
1686 /* no return */
1687
1688
1689
1690/*
1691 * Compatibility mode's last gasp...
1692 */
1693Entry(lo_df64)
1694 movl %esp, %eax
1695 CCALL1(panic_double_fault64, %eax)
1696 hlt
1697
1698Entry(lo_mc64)
1699 movl %esp, %eax
1700 CCALL1(panic_machine_check64, %eax)
1701 hlt