]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/idt64.s
xnu-1699.22.73.tar.gz
[apple/xnu.git] / osfmk / i386 / idt64.s
CommitLineData
0c530ab8 1/*
6d2010ae 2 * Copyright (c) 2010 Apple Inc. All rights reserved.
0c530ab8 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28#include <i386/asm.h>
29#include <i386/asm64.h>
30#include <assym.s>
31#include <mach_kdb.h>
32#include <i386/eflags.h>
33#include <i386/trap.h>
6d2010ae 34#include <i386/rtclock_asm.h>
0c530ab8
A
35#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */
36#include <mach/i386/syscall_sw.h>
37#include <i386/postcode.h>
38#include <i386/proc_reg.h>
6d2010ae
A
39#include <mach/exception_types.h>
40
0c530ab8
A
41
42/*
6d2010ae 43 * Low-memory compability-mode handlers.
0c530ab8
A
44 */
45#define LO_ALLINTRS EXT(lo_allintrs)
46#define LO_ALLTRAPS EXT(lo_alltraps)
0c530ab8
A
47#define LO_SYSCALL EXT(lo_syscall)
48#define LO_UNIX_SCALL EXT(lo_unix_scall)
49#define LO_MACH_SCALL EXT(lo_mach_scall)
50#define LO_MDEP_SCALL EXT(lo_mdep_scall)
51#define LO_DIAG_SCALL EXT(lo_diag_scall)
52#define LO_DOUBLE_FAULT EXT(lo_df64)
53#define LO_MACHINE_CHECK EXT(lo_mc64)
54
55/*
56 * Interrupt descriptor table and code vectors for it.
57 *
58 * The IDT64_BASE_ENTRY macro lays down a fake descriptor that must be
59 * reformatted ("fixed") before use.
60 * All vector are rebased in uber-space.
61 * Special vectors (e.g. double-fault) use a non-0 IST.
62 */
63#define IDT64_BASE_ENTRY(vec,seg,ist,type) \
64 .data ;\
65 .long vec ;\
66 .long KERNEL_UBER_BASE_HI32 ;\
67 .word seg ;\
68 .byte ist*16 ;\
69 .byte type ;\
70 .long 0 ;\
71 .text
72
73#define IDT64_ENTRY(vec,ist,type) \
74 IDT64_BASE_ENTRY(EXT(vec),KERNEL64_CS,ist,type)
75#define IDT64_ENTRY_LOCAL(vec,ist,type) \
76 IDT64_BASE_ENTRY(vec,KERNEL64_CS,ist,type)
77
78/*
79 * Push trap number and address of compatibility mode handler,
80 * then branch to common trampoline. Error already pushed.
81 */
82#define EXCEP64_ERR(n,name) \
83 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
84Entry(name) ;\
6d2010ae 85 push $(LO_ALLTRAPS) ;\
0c530ab8 86 push $(n) ;\
0c530ab8
A
87 jmp L_enter_lohandler
88
89
90/*
91 * Push error(0), trap number and address of compatibility mode handler,
92 * then branch to common trampoline.
93 */
94#define EXCEPTION64(n,name) \
95 IDT64_ENTRY(name,0,K_INTR_GATE) ;\
96Entry(name) ;\
97 push $0 ;\
6d2010ae 98 push $(LO_ALLTRAPS) ;\
0c530ab8 99 push $(n) ;\
0c530ab8
A
100 jmp L_enter_lohandler
101
102
103/*
104 * Interrupt from user.
105 * Push error (0), trap number and address of compatibility mode handler,
106 * then branch to common trampoline.
107 */
108#define EXCEP64_USR(n,name) \
109 IDT64_ENTRY(name,0,U_INTR_GATE) ;\
110Entry(name) ;\
111 push $0 ;\
6d2010ae 112 push $(LO_ALLTRAPS) ;\
0c530ab8 113 push $(n) ;\
0c530ab8
A
114 jmp L_enter_lohandler
115
116
117/*
118 * Special interrupt code from user.
119 */
120#define EXCEP64_SPC_USR(n,name) \
121 IDT64_ENTRY(name,0,U_INTR_GATE)
122
123
124/*
125 * Special interrupt code.
126 * In 64-bit mode we may use an IST slot instead of task gates.
127 */
128#define EXCEP64_IST(n,name,ist) \
129 IDT64_ENTRY(name,ist,K_INTR_GATE)
130#define EXCEP64_SPC(n,name) \
131 IDT64_ENTRY(name,0,K_INTR_GATE)
132
133
134/*
135 * Interrupt.
136 * Push zero err, interrupt vector and address of compatibility mode handler,
137 * then branch to common trampoline.
138 */
139#define INTERRUPT64(n) \
140 IDT64_ENTRY_LOCAL(L_ ## n,0,K_INTR_GATE) ;\
141 .align FALIGN ;\
142L_ ## n: ;\
143 push $0 ;\
6d2010ae 144 push $(LO_ALLINTRS) ;\
0c530ab8 145 push $(n) ;\
0c530ab8
A
146 jmp L_enter_lohandler
147
148
149 .data
150 .align 12
151Entry(master_idt64)
152Entry(hi64_data_base)
153 .text
154 .code64
155Entry(hi64_text_base)
156
157EXCEPTION64(0x00,t64_zero_div)
158EXCEP64_SPC(0x01,hi64_debug)
159INTERRUPT64(0x02) /* NMI */
160EXCEP64_USR(0x03,t64_int3)
161EXCEP64_USR(0x04,t64_into)
162EXCEP64_USR(0x05,t64_bounds)
163EXCEPTION64(0x06,t64_invop)
164EXCEPTION64(0x07,t64_nofpu)
165#if MACH_KDB
166EXCEP64_IST(0x08,db_task_dbl_fault64,1)
167#else
168EXCEP64_IST(0x08,hi64_double_fault,1)
169#endif
170EXCEPTION64(0x09,a64_fpu_over)
171EXCEPTION64(0x0a,a64_inv_tss)
172EXCEP64_SPC(0x0b,hi64_segnp)
173#if MACH_KDB
174EXCEP64_IST(0x0c,db_task_stk_fault64,1)
175#else
176EXCEP64_SPC(0x0c,hi64_stack_fault)
177#endif
178EXCEP64_SPC(0x0d,hi64_gen_prot)
2d21ac55 179EXCEP64_SPC(0x0e, hi64_page_fault)
0c530ab8
A
180EXCEPTION64(0x0f,t64_trap_0f)
181EXCEPTION64(0x10,t64_fpu_err)
182EXCEPTION64(0x11,t64_trap_11)
183EXCEP64_IST(0x12,mc64,1)
184EXCEPTION64(0x13,t64_sse_err)
185EXCEPTION64(0x14,t64_trap_14)
186EXCEPTION64(0x15,t64_trap_15)
187EXCEPTION64(0x16,t64_trap_16)
188EXCEPTION64(0x17,t64_trap_17)
189EXCEPTION64(0x18,t64_trap_18)
190EXCEPTION64(0x19,t64_trap_19)
191EXCEPTION64(0x1a,t64_trap_1a)
192EXCEPTION64(0x1b,t64_trap_1b)
193EXCEPTION64(0x1c,t64_trap_1c)
194EXCEPTION64(0x1d,t64_trap_1d)
195EXCEPTION64(0x1e,t64_trap_1e)
196EXCEPTION64(0x1f,t64_trap_1f)
197
198INTERRUPT64(0x20)
199INTERRUPT64(0x21)
200INTERRUPT64(0x22)
201INTERRUPT64(0x23)
202INTERRUPT64(0x24)
203INTERRUPT64(0x25)
204INTERRUPT64(0x26)
205INTERRUPT64(0x27)
206INTERRUPT64(0x28)
207INTERRUPT64(0x29)
208INTERRUPT64(0x2a)
209INTERRUPT64(0x2b)
210INTERRUPT64(0x2c)
211INTERRUPT64(0x2d)
212INTERRUPT64(0x2e)
213INTERRUPT64(0x2f)
214
215INTERRUPT64(0x30)
216INTERRUPT64(0x31)
217INTERRUPT64(0x32)
218INTERRUPT64(0x33)
219INTERRUPT64(0x34)
220INTERRUPT64(0x35)
221INTERRUPT64(0x36)
222INTERRUPT64(0x37)
223INTERRUPT64(0x38)
224INTERRUPT64(0x39)
225INTERRUPT64(0x3a)
226INTERRUPT64(0x3b)
227INTERRUPT64(0x3c)
228INTERRUPT64(0x3d)
229INTERRUPT64(0x3e)
230INTERRUPT64(0x3f)
231
232INTERRUPT64(0x40)
233INTERRUPT64(0x41)
234INTERRUPT64(0x42)
235INTERRUPT64(0x43)
236INTERRUPT64(0x44)
237INTERRUPT64(0x45)
238INTERRUPT64(0x46)
239INTERRUPT64(0x47)
240INTERRUPT64(0x48)
241INTERRUPT64(0x49)
242INTERRUPT64(0x4a)
243INTERRUPT64(0x4b)
244INTERRUPT64(0x4c)
245INTERRUPT64(0x4d)
246INTERRUPT64(0x4e)
247INTERRUPT64(0x4f)
248
249INTERRUPT64(0x50)
250INTERRUPT64(0x51)
251INTERRUPT64(0x52)
252INTERRUPT64(0x53)
253INTERRUPT64(0x54)
254INTERRUPT64(0x55)
255INTERRUPT64(0x56)
256INTERRUPT64(0x57)
257INTERRUPT64(0x58)
258INTERRUPT64(0x59)
259INTERRUPT64(0x5a)
260INTERRUPT64(0x5b)
261INTERRUPT64(0x5c)
262INTERRUPT64(0x5d)
263INTERRUPT64(0x5e)
264INTERRUPT64(0x5f)
265
266INTERRUPT64(0x60)
267INTERRUPT64(0x61)
268INTERRUPT64(0x62)
269INTERRUPT64(0x63)
270INTERRUPT64(0x64)
271INTERRUPT64(0x65)
272INTERRUPT64(0x66)
273INTERRUPT64(0x67)
274INTERRUPT64(0x68)
275INTERRUPT64(0x69)
276INTERRUPT64(0x6a)
277INTERRUPT64(0x6b)
278INTERRUPT64(0x6c)
279INTERRUPT64(0x6d)
280INTERRUPT64(0x6e)
281INTERRUPT64(0x6f)
282
283INTERRUPT64(0x70)
284INTERRUPT64(0x71)
285INTERRUPT64(0x72)
286INTERRUPT64(0x73)
287INTERRUPT64(0x74)
288INTERRUPT64(0x75)
289INTERRUPT64(0x76)
290INTERRUPT64(0x77)
291INTERRUPT64(0x78)
292INTERRUPT64(0x79)
293INTERRUPT64(0x7a)
294INTERRUPT64(0x7b)
295INTERRUPT64(0x7c)
296INTERRUPT64(0x7d)
297INTERRUPT64(0x7e)
2d21ac55 298EXCEP64_USR(0x7f, t64_dtrace_ret)
0c530ab8
A
299
300EXCEP64_SPC_USR(0x80,hi64_unix_scall)
301EXCEP64_SPC_USR(0x81,hi64_mach_scall)
302EXCEP64_SPC_USR(0x82,hi64_mdep_scall)
303EXCEP64_SPC_USR(0x83,hi64_diag_scall)
304
305INTERRUPT64(0x84)
306INTERRUPT64(0x85)
307INTERRUPT64(0x86)
308INTERRUPT64(0x87)
309INTERRUPT64(0x88)
310INTERRUPT64(0x89)
311INTERRUPT64(0x8a)
312INTERRUPT64(0x8b)
313INTERRUPT64(0x8c)
314INTERRUPT64(0x8d)
315INTERRUPT64(0x8e)
316INTERRUPT64(0x8f)
317
318INTERRUPT64(0x90)
319INTERRUPT64(0x91)
320INTERRUPT64(0x92)
321INTERRUPT64(0x93)
322INTERRUPT64(0x94)
323INTERRUPT64(0x95)
324INTERRUPT64(0x96)
325INTERRUPT64(0x97)
326INTERRUPT64(0x98)
327INTERRUPT64(0x99)
328INTERRUPT64(0x9a)
329INTERRUPT64(0x9b)
330INTERRUPT64(0x9c)
331INTERRUPT64(0x9d)
332INTERRUPT64(0x9e)
333INTERRUPT64(0x9f)
334
335INTERRUPT64(0xa0)
336INTERRUPT64(0xa1)
337INTERRUPT64(0xa2)
338INTERRUPT64(0xa3)
339INTERRUPT64(0xa4)
340INTERRUPT64(0xa5)
341INTERRUPT64(0xa6)
342INTERRUPT64(0xa7)
343INTERRUPT64(0xa8)
344INTERRUPT64(0xa9)
345INTERRUPT64(0xaa)
346INTERRUPT64(0xab)
347INTERRUPT64(0xac)
348INTERRUPT64(0xad)
349INTERRUPT64(0xae)
350INTERRUPT64(0xaf)
351
352INTERRUPT64(0xb0)
353INTERRUPT64(0xb1)
354INTERRUPT64(0xb2)
355INTERRUPT64(0xb3)
356INTERRUPT64(0xb4)
357INTERRUPT64(0xb5)
358INTERRUPT64(0xb6)
359INTERRUPT64(0xb7)
360INTERRUPT64(0xb8)
361INTERRUPT64(0xb9)
362INTERRUPT64(0xba)
363INTERRUPT64(0xbb)
364INTERRUPT64(0xbc)
365INTERRUPT64(0xbd)
366INTERRUPT64(0xbe)
367INTERRUPT64(0xbf)
368
369INTERRUPT64(0xc0)
370INTERRUPT64(0xc1)
371INTERRUPT64(0xc2)
372INTERRUPT64(0xc3)
373INTERRUPT64(0xc4)
374INTERRUPT64(0xc5)
375INTERRUPT64(0xc6)
376INTERRUPT64(0xc7)
377INTERRUPT64(0xc8)
378INTERRUPT64(0xc9)
379INTERRUPT64(0xca)
380INTERRUPT64(0xcb)
381INTERRUPT64(0xcc)
382INTERRUPT64(0xcd)
383INTERRUPT64(0xce)
384INTERRUPT64(0xcf)
385
386INTERRUPT64(0xd0)
387INTERRUPT64(0xd1)
388INTERRUPT64(0xd2)
389INTERRUPT64(0xd3)
390INTERRUPT64(0xd4)
391INTERRUPT64(0xd5)
392INTERRUPT64(0xd6)
393INTERRUPT64(0xd7)
394INTERRUPT64(0xd8)
395INTERRUPT64(0xd9)
396INTERRUPT64(0xda)
397INTERRUPT64(0xdb)
398INTERRUPT64(0xdc)
399INTERRUPT64(0xdd)
400INTERRUPT64(0xde)
401INTERRUPT64(0xdf)
402
403INTERRUPT64(0xe0)
404INTERRUPT64(0xe1)
405INTERRUPT64(0xe2)
406INTERRUPT64(0xe3)
407INTERRUPT64(0xe4)
408INTERRUPT64(0xe5)
409INTERRUPT64(0xe6)
410INTERRUPT64(0xe7)
411INTERRUPT64(0xe8)
412INTERRUPT64(0xe9)
413INTERRUPT64(0xea)
414INTERRUPT64(0xeb)
415INTERRUPT64(0xec)
416INTERRUPT64(0xed)
417INTERRUPT64(0xee)
418INTERRUPT64(0xef)
419
420INTERRUPT64(0xf0)
421INTERRUPT64(0xf1)
422INTERRUPT64(0xf2)
423INTERRUPT64(0xf3)
424INTERRUPT64(0xf4)
425INTERRUPT64(0xf5)
426INTERRUPT64(0xf6)
427INTERRUPT64(0xf7)
428INTERRUPT64(0xf8)
429INTERRUPT64(0xf9)
430INTERRUPT64(0xfa)
431INTERRUPT64(0xfb)
432INTERRUPT64(0xfc)
433INTERRUPT64(0xfd)
434INTERRUPT64(0xfe)
435EXCEPTION64(0xff,t64_preempt)
436
437
438 .text
439/*
440 *
441 * Trap/interrupt entry points.
442 *
443 * All traps must create the following 32-bit save area on the PCB "stack"
444 * - this is identical to the legacy mode 32-bit case:
445 *
446 * gs
447 * fs
448 * es
449 * ds
450 * edi
451 * esi
452 * ebp
453 * cr2 (defined only for page fault)
454 * ebx
455 * edx
456 * ecx
457 * eax
458 * trap number
459 * error code
460 * eip
461 * cs
462 * eflags
463 * user esp - if from user
464 * user ss - if from user
465 *
466 * Above this is the trap number and compatibility mode handler address
467 * (packed into an 8-byte stack entry) and the 64-bit interrupt stack frame:
468 *
469 * (trapno, trapfn)
470 * err
471 * rip
472 * cs
473 * rflags
474 * rsp
475 * ss
476 *
477 */
478
479 .code32
6d2010ae 480
0c530ab8
A
481/*
482 * Control is passed here to return to the compatibility mode user.
483 * At this stage we're in kernel space in compatibility mode
484 * but we need to switch into 64-bit mode in the 4G-based trampoline
485 * space before performing the iret.
486 */
6d2010ae 487ret_to_user:
0c530ab8
A
488 movl %gs:CPU_ACTIVE_THREAD,%ecx
489
6d2010ae 490 movl TH_PCB_IDS(%ecx),%eax /* Obtain this thread's debug state */
0c530ab8
A
491 cmpl $0,%eax /* Is there a debug register context? */
492 je 2f /* branch if not */
2d21ac55 493 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP /* Are we a 32-bit task? */
0c530ab8 494 jne 1f
2d21ac55 495 movl DS_DR0(%eax), %ecx /* If so, load the 32 bit DRs */
0c530ab8
A
496 movl %ecx, %db0
497 movl DS_DR1(%eax), %ecx
498 movl %ecx, %db1
499 movl DS_DR2(%eax), %ecx
500 movl %ecx, %db2
501 movl DS_DR3(%eax), %ecx
502 movl %ecx, %db3
503 movl DS_DR7(%eax), %ecx
504 movl %ecx, %gs:CPU_DR7
505 movl $0, %gs:CPU_DR7 + 4
506 jmp 2f
5071:
508 ENTER_64BIT_MODE() /* Enter long mode */
509 mov DS64_DR0(%eax), %rcx /* Load the full width DRs*/
510 mov %rcx, %dr0
511 mov DS64_DR1(%eax), %rcx
512 mov %rcx, %dr1
513 mov DS64_DR2(%eax), %rcx
514 mov %rcx, %dr2
515 mov DS64_DR3(%eax), %rcx
516 mov %rcx, %dr3
517 mov DS64_DR7(%eax), %rcx
518 mov %rcx, %gs:CPU_DR7
519 jmp 3f /* Enter uberspace */
5202:
521 ENTER_64BIT_MODE()
5223:
523 ENTER_UBERSPACE()
524
525 /*
526 * Now switch %cr3, if necessary.
527 */
528 swapgs /* switch back to uber-kernel gs base */
529 mov %gs:CPU_TASK_CR3,%rcx
530 mov %rcx,%gs:CPU_ACTIVE_CR3
531 mov %cr3, %rax
532 cmp %rcx, %rax
533 je 1f
534 /* flag the copyio engine state as WINDOWS_CLEAN */
535 mov %gs:CPU_ACTIVE_THREAD,%eax
6d2010ae 536 movl $(WINDOWS_CLEAN),TH_COPYIO_STATE(%eax)
0c530ab8
A
537 mov %rcx,%cr3 /* switch to user's address space */
5381:
539
540 mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
541 cmp $0, %rax
542 je 1f
543 mov %rax, %dr7 /* Set DR7 */
544 movq $0, %gs:CPU_DR7
5451:
546
547 /*
548 * Adjust stack to use uber-space.
549 */
550 mov $(KERNEL_UBER_BASE_HI32), %rax
551 shl $32, %rsp
552 shrd $32, %rax, %rsp /* relocate into uber-space */
553
554 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
555 jne L_64bit_return
556 jmp L_32bit_return
557
6d2010ae 558ret_to_kernel:
0c530ab8
A
559 ENTER_64BIT_MODE()
560 ENTER_UBERSPACE()
561
562 swapgs /* switch back to uber-kernel gs base */
563
564 /*
565 * Adjust stack to use uber-space.
566 */
567 mov $(KERNEL_UBER_BASE_HI32), %rax
568 shl $32, %rsp
569 shrd $32, %rax, %rsp /* relocate into uber-space */
570
571 /* Check for return to 64-bit kernel space (EFI today) */
572 cmpl $(SS_32), SS_FLAVOR(%rsp) /* 32-bit state? */
573 jne L_64bit_return
574 /* fall through for 32-bit return */
575
576L_32bit_return:
577 /*
578 * Restore registers into the machine state for iret.
579 */
b0d623f7 580 movl R32_EIP(%rsp), %eax
0c530ab8 581 movl %eax, ISC32_RIP(%rsp)
b0d623f7 582 movl R32_EFLAGS(%rsp), %eax
0c530ab8 583 movl %eax, ISC32_RFLAGS(%rsp)
b0d623f7 584 movl R32_CS(%rsp), %eax
0c530ab8 585 movl %eax, ISC32_CS(%rsp)
b0d623f7 586 movl R32_UESP(%rsp), %eax
0c530ab8 587 movl %eax, ISC32_RSP(%rsp)
b0d623f7 588 movl R32_SS(%rsp), %eax
0c530ab8
A
589 movl %eax, ISC32_SS(%rsp)
590
591 /*
592 * Restore general 32-bit registers
593 */
b0d623f7
A
594 movl R32_EAX(%rsp), %eax
595 movl R32_EBX(%rsp), %ebx
596 movl R32_ECX(%rsp), %ecx
597 movl R32_EDX(%rsp), %edx
598 movl R32_EBP(%rsp), %ebp
599 movl R32_ESI(%rsp), %esi
600 movl R32_EDI(%rsp), %edi
0c530ab8
A
601
602 /*
603 * Restore segment registers. We make take an exception here but
604 * we've got enough space left in the save frame area to absorb
605 * a hardware frame plus the trapfn and trapno
606 */
607 swapgs
608EXT(ret32_set_ds):
b0d623f7 609 movw R32_DS(%rsp), %ds
0c530ab8 610EXT(ret32_set_es):
b0d623f7 611 movw R32_ES(%rsp), %es
0c530ab8 612EXT(ret32_set_fs):
b0d623f7 613 movw R32_FS(%rsp), %fs
0c530ab8 614EXT(ret32_set_gs):
b0d623f7 615 movw R32_GS(%rsp), %gs
0c530ab8 616
6d2010ae
A
617 add $(ISC32_OFFSET)+8+8+8, %rsp /* pop compat frame +
618 trapno, trapfn and error */
619 cmp $(SYSENTER_CS),ISF64_CS-8-8-8(%rsp)
0c530ab8
A
620 /* test for fast entry/exit */
621 je L_fast_exit
622EXT(ret32_iret):
623 iretq /* return from interrupt */
624
625L_fast_exit:
2d21ac55
A
626 pop %rdx /* user return eip */
627 pop %rcx /* pop and toss cs */
0c530ab8 628 andl $(~EFL_IF), (%rsp) /* clear interrupts enable, sti below */
2d21ac55
A
629 popf /* flags - carry denotes failure */
630 pop %rcx /* user return esp */
0c530ab8
A
631 .code32
632 sti /* interrupts enabled after sysexit */
2d21ac55 633 sysexit /* 32-bit sysexit */
0c530ab8
A
634 .code64
635
636L_64bit_return:
637 /*
638 * Set the GS Base MSR with the user's gs base.
639 */
640 movl %gs:CPU_UBER_USER_GS_BASE, %eax
641 movl %gs:CPU_UBER_USER_GS_BASE+4, %edx
642 movl $(MSR_IA32_GS_BASE), %ecx
643 swapgs
644 testb $3, R64_CS(%rsp) /* returning to user-space? */
645 jz 1f
646 wrmsr /* set 64-bit base */
6471:
648
649 /*
650 * Restore general 64-bit registers
651 */
652 mov R64_R15(%rsp), %r15
653 mov R64_R14(%rsp), %r14
654 mov R64_R13(%rsp), %r13
655 mov R64_R12(%rsp), %r12
656 mov R64_R11(%rsp), %r11
657 mov R64_R10(%rsp), %r10
658 mov R64_R9(%rsp), %r9
659 mov R64_R8(%rsp), %r8
660 mov R64_RSI(%rsp), %rsi
661 mov R64_RDI(%rsp), %rdi
662 mov R64_RBP(%rsp), %rbp
663 mov R64_RDX(%rsp), %rdx
664 mov R64_RBX(%rsp), %rbx
665 mov R64_RCX(%rsp), %rcx
666 mov R64_RAX(%rsp), %rax
667
6d2010ae
A
668 add $(ISS64_OFFSET)+8+8+8, %rsp /* pop saved state frame +
669 trapno, trapfn and error */
670 cmpl $(SYSCALL_CS),ISF64_CS-8-8-8(%rsp)
0c530ab8
A
671 /* test for fast entry/exit */
672 je L_sysret
673EXT(ret64_iret):
674 iretq /* return from interrupt */
675
676L_sysret:
677 /*
678 * Here to load rcx/r11/rsp and perform the sysret back to user-space.
679 * rcx user rip
680 * r1 user rflags
681 * rsp user stack pointer
682 */
6d2010ae
A
683 mov ISF64_RIP-8-8-8(%rsp), %rcx
684 mov ISF64_RFLAGS-8-8-8(%rsp), %r11
685 mov ISF64_RSP-8-8-8(%rsp), %rsp
2d21ac55 686 sysretq /* return from system call */
0c530ab8
A
687
688/*
689 * Common path to enter locore handlers.
690 */
691L_enter_lohandler:
692 swapgs /* switch to kernel gs (cpu_data) */
693L_enter_lohandler_continue:
694 cmpl $(USER64_CS), ISF64_CS(%rsp)
695 je L_64bit_enter /* this is a 64-bit user task */
696 cmpl $(KERNEL64_CS), ISF64_CS(%rsp)
697 je L_64bit_enter /* we're in 64-bit (EFI) code */
698 jmp L_32bit_enter
699
700/*
701 * System call handlers.
702 * These are entered via a syscall interrupt. The system call number in %rax
703 * is saved to the error code slot in the stack frame. We then branch to the
704 * common state saving code.
705 */
706
707Entry(hi64_unix_scall)
708 swapgs /* switch to kernel gs (cpu_data) */
709L_unix_scall_continue:
710 push %rax /* save system call number */
6d2010ae 711 push $(LO_UNIX_SCALL)
0c530ab8 712 push $(UNIX_INT)
0c530ab8
A
713 jmp L_32bit_enter_check
714
715
716Entry(hi64_mach_scall)
717 swapgs /* switch to kernel gs (cpu_data) */
718L_mach_scall_continue:
719 push %rax /* save system call number */
6d2010ae 720 push $(LO_MACH_SCALL)
0c530ab8 721 push $(MACH_INT)
0c530ab8
A
722 jmp L_32bit_enter_check
723
724
725Entry(hi64_mdep_scall)
726 swapgs /* switch to kernel gs (cpu_data) */
727L_mdep_scall_continue:
728 push %rax /* save system call number */
6d2010ae 729 push $(LO_MDEP_SCALL)
0c530ab8 730 push $(MACHDEP_INT)
0c530ab8
A
731 jmp L_32bit_enter_check
732
733
734Entry(hi64_diag_scall)
735 swapgs /* switch to kernel gs (cpu_data) */
736L_diag_scall_continue:
737 push %rax /* save system call number */
6d2010ae 738 push $(LO_DIAG_SCALL)
0c530ab8 739 push $(DIAG_INT)
0c530ab8
A
740 jmp L_32bit_enter_check
741
742Entry(hi64_syscall)
743 swapgs /* Kapow! get per-cpu data area */
744L_syscall_continue:
745 mov %rsp, %gs:CPU_UBER_TMP /* save user stack */
746 mov %gs:CPU_UBER_ISF, %rsp /* switch stack to pcb */
747
748 /*
749 * Save values in the ISF frame in the PCB
750 * to cons up the saved machine state.
751 */
752 movl $(USER_DS), ISF64_SS(%rsp)
753 movl $(SYSCALL_CS), ISF64_CS(%rsp) /* cs - a pseudo-segment */
754 mov %r11, ISF64_RFLAGS(%rsp) /* rflags */
755 mov %rcx, ISF64_RIP(%rsp) /* rip */
756 mov %gs:CPU_UBER_TMP, %rcx
757 mov %rcx, ISF64_RSP(%rsp) /* user stack */
758 mov %rax, ISF64_ERR(%rsp) /* err/rax - syscall code */
b0d623f7 759 movl $(T_SYSCALL), ISF64_TRAPNO(%rsp) /* trapno */
0c530ab8
A
760 movl $(LO_SYSCALL), ISF64_TRAPFN(%rsp)
761 jmp L_64bit_enter /* this can only be a 64-bit task */
2d21ac55
A
762
763
764L_32bit_enter_check:
765 /*
766 * Check we're not a confused 64-bit user.
767 */
768 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
769 jne L_64bit_entry_reject
770 jmp L_32bit_enter
0c530ab8
A
771/*
772 * sysenter entry point
773 * Requires user code to set up:
774 * edx: user instruction pointer (return address)
775 * ecx: user stack pointer
776 * on which is pushed stub ret addr and saved ebx
777 * Return to user-space is made using sysexit.
778 * Note: sysenter/sysexit cannot be used for calls returning a value in edx,
779 * or requiring ecx to be preserved.
780 */
781Entry(hi64_sysenter)
782 mov (%rsp), %rsp /* switch from temporary stack to pcb */
783 /*
784 * Push values on to the PCB stack
785 * to cons up the saved machine state.
786 */
787 push $(USER_DS) /* ss */
788 push %rcx /* uesp */
789 pushf /* flags */
790 /*
2d21ac55
A
791 * Clear, among others, the Nested Task (NT) flags bit;
792 * this is zeroed by INT, but not by SYSENTER.
793 */
0c530ab8
A
794 push $0
795 popf
796 push $(SYSENTER_CS) /* cs */
797 swapgs /* switch to kernel gs (cpu_data) */
798L_sysenter_continue:
799 push %rdx /* eip */
800 push %rax /* err/eax - syscall code */
6d2010ae 801 push $0
b0d623f7 802 push $(T_SYSENTER)
0c530ab8 803 orl $(EFL_IF), ISF64_RFLAGS(%rsp)
2d21ac55
A
804 movl $(LO_MACH_SCALL), ISF64_TRAPFN(%rsp)
805 testl %eax, %eax
806 js L_32bit_enter_check
807 movl $(LO_UNIX_SCALL), ISF64_TRAPFN(%rsp)
808 cmpl $(TASK_MAP_32BIT), %gs:CPU_TASK_MAP
809 jne L_64bit_entry_reject
810/* If the caller (typically LibSystem) has recorded the cumulative size of
811 * the arguments in EAX, copy them over from the user stack directly.
812 * We recover from exceptions inline--if the copy loop doesn't complete
813 * due to an exception, we fall back to copyin from compatibility mode.
814 * We can potentially extend this mechanism to mach traps as well (DRK).
815 */
816L_sysenter_copy_args:
817 testl $(I386_SYSCALL_ARG_BYTES_MASK), %eax
818 jz L_32bit_enter
819 xor %r9, %r9
820 mov %gs:CPU_UBER_ARG_STORE, %r8
821 movl %eax, %r9d
822 mov %gs:CPU_UBER_ARG_STORE_VALID, %r12
823 xor %r10, %r10
824 shrl $(I386_SYSCALL_ARG_DWORDS_SHIFT), %r9d
825 andl $(I386_SYSCALL_ARG_DWORDS_MASK), %r9d
826 movl $0, (%r12)
827EXT(hi64_sysenter_user_arg_copy):
8280:
829 movl 4(%rcx, %r10, 4), %r11d
830 movl %r11d, (%r8, %r10, 4)
831 incl %r10d
832 decl %r9d
833 jnz 0b
834 movl $1, (%r12)
835 /* Fall through to 32-bit handler */
0c530ab8
A
836
837L_32bit_enter:
6d2010ae 838 cld
0c530ab8
A
839 /*
840 * Make space for the compatibility save area.
841 */
842 sub $(ISC32_OFFSET), %rsp
843 movl $(SS_32), SS_FLAVOR(%rsp)
844
845 /*
846 * Save segment regs
847 */
b0d623f7
A
848 mov %ds, R32_DS(%rsp)
849 mov %es, R32_ES(%rsp)
850 mov %fs, R32_FS(%rsp)
851 mov %gs, R32_GS(%rsp)
0c530ab8
A
852
853 /*
854 * Save general 32-bit registers
855 */
b0d623f7
A
856 mov %eax, R32_EAX(%rsp)
857 mov %ebx, R32_EBX(%rsp)
858 mov %ecx, R32_ECX(%rsp)
859 mov %edx, R32_EDX(%rsp)
860 mov %ebp, R32_EBP(%rsp)
861 mov %esi, R32_ESI(%rsp)
862 mov %edi, R32_EDI(%rsp)
0c530ab8
A
863
864 /* Unconditionally save cr2; only meaningful on page faults */
865 mov %cr2, %rax
b0d623f7 866 mov %eax, R32_CR2(%rsp)
0c530ab8
A
867
868 /*
869 * Copy registers already saved in the machine state
870 * (in the interrupt stack frame) into the compat save area.
871 */
872 mov ISC32_RIP(%rsp), %eax
b0d623f7 873 mov %eax, R32_EIP(%rsp)
0c530ab8 874 mov ISC32_RFLAGS(%rsp), %eax
b0d623f7 875 mov %eax, R32_EFLAGS(%rsp)
0c530ab8 876 mov ISC32_CS(%rsp), %eax
b0d623f7
A
877 mov %eax, R32_CS(%rsp)
878 testb $3, %al
879 jz 1f
880 xor %ebp, %ebp
8811:
0c530ab8 882 mov ISC32_RSP(%rsp), %eax
b0d623f7 883 mov %eax, R32_UESP(%rsp)
0c530ab8 884 mov ISC32_SS(%rsp), %eax
b0d623f7 885 mov %eax, R32_SS(%rsp)
0c530ab8
A
886L_32bit_enter_after_fault:
887 mov ISC32_TRAPNO(%rsp), %ebx /* %ebx := trapno for later */
b0d623f7 888 mov %ebx, R32_TRAPNO(%rsp)
0c530ab8 889 mov ISC32_ERR(%rsp), %eax
b0d623f7 890 mov %eax, R32_ERR(%rsp)
0c530ab8
A
891 mov ISC32_TRAPFN(%rsp), %edx
892
893/*
894 * Common point to enter lo_handler in compatibilty mode:
895 * %ebx trapno
896 * %edx locore handler address
897 */
898L_enter_lohandler2:
899 /*
900 * Switch address space to kernel
901 * if not shared space and not already mapped.
902 * Note: cpu_task_map is valid only if cpu_task_cr3 is loaded in cr3.
903 */
904 mov %cr3, %rax
905 mov %gs:CPU_TASK_CR3, %rcx
906 cmp %rax, %rcx /* is the task's cr3 loaded? */
907 jne 1f
908 cmpl $(TASK_MAP_64BIT_SHARED), %gs:CPU_TASK_MAP
909 je 2f
9101:
911 mov %gs:CPU_KERNEL_CR3, %rcx
912 cmp %rax, %rcx
913 je 2f
914 mov %rcx, %cr3
915 mov %rcx, %gs:CPU_ACTIVE_CR3
9162:
6d2010ae
A
917 movl %gs:CPU_ACTIVE_THREAD,%ecx /* Get the active thread */
918 cmpl $0, TH_PCB_IDS(%ecx) /* Is there a debug register state? */
919 jz 21f
920 xor %ecx, %ecx /* If so, reset DR7 (the control) */
921 mov %rcx, %dr7
92221:
0c530ab8
A
923 /*
924 * Switch to compatibility mode.
925 * Then establish kernel segments.
926 */
927 swapgs /* Done with uber-kernel gs */
928 ENTER_COMPAT_MODE()
929
930 /*
931 * Now in compatibility mode and running in compatibility space
932 * prepare to enter the locore handler.
933 * %ebx trapno
934 * %edx lo_handler pointer
935 * Note: the stack pointer (now 32-bit) is now directly addressing the
936 * the kernel below 4G and therefore is automagically re-based.
937 */
938 mov $(KERNEL_DS), %eax
939 mov %eax, %ss
940 mov %eax, %ds
941 mov %eax, %es
942 mov %eax, %fs
943 mov $(CPU_DATA_GS), %eax
944 mov %eax, %gs
945
6d2010ae
A
946 incl %gs:hwIntCnt(,%ebx,4) /* Bump the trap/intr count */
947
0c530ab8
A
948 /* Dispatch the designated lo handler */
949 jmp *%edx
950
951 .code64
952L_64bit_entry_reject:
953 /*
954 * Here for a 64-bit user attempting an invalid kernel entry.
955 */
956 movl $(LO_ALLTRAPS), ISF64_TRAPFN(%rsp)
957 movl $(T_INVALID_OPCODE), ISF64_TRAPNO(%rsp)
958 /* Fall through... */
959
960L_64bit_enter:
961 /*
962 * Here for a 64-bit user task, or special 64-bit kernel code.
963 * Make space for the save area.
964 */
965 sub $(ISS64_OFFSET), %rsp
966 movl $(SS_64), SS_FLAVOR(%rsp)
967
6d2010ae 968 cld
0c530ab8
A
969 /*
970 * Save segment regs
971 */
972 mov %fs, R64_FS(%rsp)
973 mov %gs, R64_GS(%rsp)
974
975 /* Save general-purpose registers */
976 mov %rax, R64_RAX(%rsp)
977 mov %rcx, R64_RCX(%rsp)
978 mov %rbx, R64_RBX(%rsp)
979 mov %rbp, R64_RBP(%rsp)
980 mov %r11, R64_R11(%rsp)
981 mov %r12, R64_R12(%rsp)
982 mov %r13, R64_R13(%rsp)
983 mov %r14, R64_R14(%rsp)
984 mov %r15, R64_R15(%rsp)
985
986 /* cr2 is significant only for page-faults */
987 mov %cr2, %rax
988 mov %rax, R64_CR2(%rsp)
989
990 /* Other registers (which may contain syscall args) */
991 mov %rdi, R64_RDI(%rsp) /* arg0 .. */
992 mov %rsi, R64_RSI(%rsp)
993 mov %rdx, R64_RDX(%rsp)
994 mov %r10, R64_R10(%rsp)
995 mov %r8, R64_R8(%rsp)
996 mov %r9, R64_R9(%rsp) /* .. arg5 */
997
998L_64bit_enter_after_fault:
999 /*
1000 * At this point we're almost ready to join the common lo-entry code.
1001 */
1002 mov R64_TRAPNO(%rsp), %ebx
1003 mov R64_TRAPFN(%rsp), %edx
1004
b0d623f7
A
1005 testb $3, ISF64_CS+ISS64_OFFSET(%rsp)
1006 jz 1f
1007 xor %rbp, %rbp
10081:
0c530ab8
A
1009 jmp L_enter_lohandler2
1010
2d21ac55 1011Entry(hi64_page_fault)
6d2010ae 1012 push $(LO_ALLTRAPS)
2d21ac55 1013 push $(T_PAGE_FAULT)
2d21ac55
A
1014 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1015 jne L_enter_lohandler
1016 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
6d2010ae 1017 jne hi64_kernel_trap
2d21ac55
A
1018 mov ISF64_RSP(%rsp), %rsp
1019 jmp L_32bit_enter
1020
0c530ab8
A
1021/*
1022 * Debug trap. Check for single-stepping across system call into
1023 * kernel. If this is the case, taking the debug trap has turned
1024 * off single-stepping - save the flags register with the trace
1025 * bit set.
1026 */
1027Entry(hi64_debug)
1028 swapgs /* set %gs for cpu data */
1029 push $0 /* error code */
6d2010ae 1030 push $(LO_ALLTRAPS)
0c530ab8 1031 push $(T_DEBUG)
0c530ab8
A
1032
1033 testb $3, ISF64_CS(%rsp)
1034 jnz L_enter_lohandler_continue
1035
1036 /*
1037 * trap came from kernel mode
1038 */
1039 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1040 jne L_enter_lohandler_continue /* trap not in uber-space */
1041
1042 cmpl $(EXT(hi64_mach_scall)), ISF64_RIP(%rsp)
1043 jne 6f
1044 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1045 jmp L_mach_scall_continue /* continue system call entry */
10466:
1047 cmpl $(EXT(hi64_mdep_scall)), ISF64_RIP(%rsp)
1048 jne 5f
1049 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1050 jmp L_mdep_scall_continue /* continue system call entry */
10515:
1052 cmpl $(EXT(hi64_unix_scall)), ISF64_RIP(%rsp)
1053 jne 4f
1054 add $(ISF64_SIZE),%rsp /* remove entire intr stack frame */
1055 jmp L_unix_scall_continue /* continue system call entry */
10564:
1057 cmpl $(EXT(hi64_sysenter)), ISF64_RIP(%rsp)
1058 jne L_enter_lohandler_continue
1059 /*
1060 * Interrupt stack frame has been pushed on the temporary stack.
1061 * We have to switch to pcb stack and copy eflags.
1062 */
6d2010ae 1063 add $40,%rsp /* remove trapno/trapfn/err/rip/cs */
0c530ab8
A
1064 push %rcx /* save %rcx - user stack pointer */
1065 mov 32(%rsp),%rcx /* top of intr stack -> pcb stack */
1066 xchg %rcx,%rsp /* switch to pcb stack */
1067 push $(USER_DS) /* ss */
1068 push (%rcx) /* saved %rcx into rsp slot */
1069 push 8(%rcx) /* rflags */
1070 mov (%rcx),%rcx /* restore %rcx */
1071 push $(SYSENTER_TF_CS) /* cs - not SYSENTER_CS for iret path */
1072 jmp L_sysenter_continue /* continue sysenter entry */
1073
1074
1075Entry(hi64_double_fault)
1076 swapgs /* set %gs for cpu data */
6d2010ae 1077 push $(LO_DOUBLE_FAULT)
0c530ab8 1078 push $(T_DOUBLE_FAULT)
0c530ab8
A
1079
1080 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1081 jne L_enter_lohandler_continue /* trap not in uber-space */
1082
1083 cmpl $(EXT(hi64_syscall)), ISF64_RIP(%rsp)
1084 jne L_enter_lohandler_continue
1085
1086 mov ISF64_RSP(%rsp), %rsp
1087 jmp L_syscall_continue
1088
1089
1090/*
1091 * General protection or segment-not-present fault.
1092 * Check for a GP/NP fault in the kernel_return
1093 * sequence; if there, report it as a GP/NP fault on the user's instruction.
1094 *
6d2010ae
A
1095 * rsp-> 0 ISF64_TRAPNO: trap code (NP or GP)
1096 * 8 ISF64_TRAPFN: trap function
1097 * 16 ISF64_ERR: segment number in error (error code)
1098 * 24 ISF64_RIP: rip
1099 * 32 ISF64_CS: cs
1100 * 40 ISF64_RFLAGS: rflags
1101 * 48 ISF64_RSP: rsp
1102 * 56 ISF64_SS: ss
1103 * 64 old registers (trap is from kernel)
0c530ab8
A
1104 */
1105Entry(hi64_gen_prot)
6d2010ae 1106 push $(LO_ALLTRAPS)
0c530ab8
A
1107 push $(T_GENERAL_PROTECTION)
1108 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1109
1110Entry(hi64_stack_fault)
6d2010ae 1111 push $(LO_ALLTRAPS)
0c530ab8
A
1112 push $(T_STACK_FAULT)
1113 jmp trap_check_kernel_exit /* check for kernel exit sequence */
1114
1115Entry(hi64_segnp)
6d2010ae 1116 push $(LO_ALLTRAPS)
0c530ab8
A
1117 push $(T_SEGMENT_NOT_PRESENT)
1118 /* indicate fault type */
1119trap_check_kernel_exit:
6d2010ae
A
1120 testb $3,ISF64_CS(%rsp)
1121 jnz L_enter_lohandler
0c530ab8
A
1122 /* trap was from kernel mode, so */
1123 /* check for the kernel exit sequence */
6d2010ae
A
1124 cmpl $(KERNEL_UBER_BASE_HI32), ISF64_RIP+4(%rsp)
1125 jne L_enter_lohandler_continue /* trap not in uber-space */
0c530ab8 1126
6d2010ae 1127 cmpl $(EXT(ret32_iret)), ISF64_RIP(%rsp)
0c530ab8 1128 je L_fault_iret32
6d2010ae 1129 cmpl $(EXT(ret32_set_ds)), ISF64_RIP(%rsp)
0c530ab8 1130 je L_32bit_fault_set_seg
6d2010ae 1131 cmpl $(EXT(ret32_set_es)), ISF64_RIP(%rsp)
0c530ab8 1132 je L_32bit_fault_set_seg
6d2010ae 1133 cmpl $(EXT(ret32_set_fs)), ISF64_RIP(%rsp)
0c530ab8 1134 je L_32bit_fault_set_seg
6d2010ae 1135 cmpl $(EXT(ret32_set_gs)), ISF64_RIP(%rsp)
0c530ab8
A
1136 je L_32bit_fault_set_seg
1137
6d2010ae 1138 cmpl $(EXT(ret64_iret)), ISF64_RIP(%rsp)
0c530ab8
A
1139 je L_fault_iret64
1140
2d21ac55 1141 cmpl $(EXT(hi64_sysenter_user_arg_copy)), ISF64_RIP(%rsp)
6d2010ae
A
1142 cmove ISF64_RSP(%rsp), %rsp
1143 je L_32bit_enter
1144
1145hi64_kernel_trap:
1146 /*
1147 * Here after taking an unexpected trap from kernel mode - perhaps
1148 * while running in the trampolines hereabouts.
1149 * Make sure we're not on the PCB stack, if so move to the kernel stack.
1150 * This is likely a fatal condition.
1151 * But first, try to be sure we have the kernel gs base active...
1152 */
1153 cmpq $0, %gs:CPU_THIS /* test gs_base */
1154 js 1f /* -ve kernel addr, no swap */
1155 swapgs /* +ve user addr, swap */
11561:
1157 movq %rax, %gs:CPU_UBER_TMP /* save %rax */
1158 movq %gs:CPU_UBER_ISF, %rax /* PCB stack addr */
1159 subq %rsp, %rax
1160 cmpq $(PAGE_SIZE), %rax /* current stack in PCB? */
1161 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1162 ja L_enter_lohandler_continue /* stack not in PCB */
1163
1164 /*
1165 * Here if %rsp is in the PCB
1166 * Copy the interrupt stack frame from PCB stack to kernel stack
1167 */
1168 movq %gs:CPU_KERNEL_STACK, %rax /* note: %rax restored below */
1169 xchgq %rax, %rsp
1170 pushq ISF64_SS(%rax)
1171 pushq ISF64_RSP(%rax)
1172 pushq ISF64_RFLAGS(%rax)
1173 pushq ISF64_CS(%rax)
1174 pushq ISF64_RIP(%rax)
1175 pushq ISF64_ERR(%rax)
1176 pushq ISF64_TRAPFN(%rax)
1177 pushq ISF64_TRAPNO(%rax)
1178 movq %gs:CPU_UBER_TMP, %rax /* restore %rax */
1179 jmp L_enter_lohandler_continue
1180
0c530ab8 1181
0c530ab8
A
1182/*
1183 * GP/NP fault on IRET: CS or SS is in error.
1184 * All registers contain the user's values.
1185 *
1186 * on SP is
6d2010ae
A
1187 * 0 ISF64_TRAPNO: trap code (NP or GP)
1188 * 8 ISF64_TRAPFN: trap function
1189 * 16 ISF64_ERR: segment number in error (error code)
1190 * 24 ISF64_RIP: rip
1191 * 32 ISF64_CS: cs
1192 * 40 ISF64_RFLAGS: rflags
1193 * 48 ISF64_RSP: rsp
1194 * 56 ISF64_SS: ss --> new new trapno/trapfn
1195 * 64 pad --> new errcode
1196 * 72 user rip
1197 * 80 user cs
1198 * 88 user rflags
1199 * 96 user rsp
1200 * 104 user ss (16-byte aligned)
0c530ab8
A
1201 */
1202L_fault_iret32:
6d2010ae
A
1203 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1204 mov ISF64_TRAPNO(%rsp), %rax
1205 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1206 mov ISF64_ERR(%rsp), %rax
1207 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1208 mov ISF64_RIP(%rsp), %rax /* restore rax */
1209 add $(ISF64_SS), %rsp /* reset to original frame */
0c530ab8
A
1210 /* now treat as fault from user */
1211 swapgs
1212 jmp L_32bit_enter
1213
1214L_fault_iret64:
6d2010ae
A
1215 mov %rax, ISF64_RIP(%rsp) /* save rax (we don`t need saved rip) */
1216 mov ISF64_TRAPNO(%rsp), %rax
1217 mov %rax, ISF64_SS(%rsp) /* put in user trap number */
1218 mov ISF64_ERR(%rsp), %rax
1219 mov %rax, 8+ISF64_SS(%rsp) /* put in user errcode */
1220 mov ISF64_RIP(%rsp), %rax /* restore rax */
1221 add $(ISF64_SS), %rsp /* reset to original frame */
0c530ab8
A
1222 /* now treat as fault from user */
1223 swapgs
1224 jmp L_64bit_enter
1225
1226/*
1227 * Fault restoring a segment register. All of the saved state is still
1228 * on the stack untouched since we didn't move the stack pointer.
1229 */
1230L_32bit_fault_set_seg:
6d2010ae
A
1231 mov ISF64_TRAPNO(%rsp), %rax
1232 mov ISF64_ERR(%rsp), %rdx
1233 mov ISF64_RSP(%rsp), %rsp /* reload stack prior to fault */
0c530ab8
A
1234 mov %rax,ISC32_TRAPNO(%rsp)
1235 mov %rdx,ISC32_ERR(%rsp)
1236 /* now treat as fault from user */
1237 /* except that all the state is */
1238 /* already saved - we just have to */
1239 /* move the trapno and error into */
1240 /* the compatibility frame */
1241 swapgs
1242 jmp L_32bit_enter_after_fault
1243
1244
1245/*
1246 * Fatal exception handlers:
1247 */
1248Entry(db_task_dbl_fault64)
6d2010ae 1249 push $(LO_DOUBLE_FAULT)
0c530ab8 1250 push $(T_DOUBLE_FAULT)
0c530ab8
A
1251 jmp L_enter_lohandler
1252
1253Entry(db_task_stk_fault64)
6d2010ae 1254 push $(LO_DOUBLE_FAULT)
0c530ab8 1255 push $(T_STACK_FAULT)
0c530ab8
A
1256 jmp L_enter_lohandler
1257
1258Entry(mc64)
1259 push $(0) /* Error */
6d2010ae 1260 push $(LO_MACHINE_CHECK)
0c530ab8 1261 push $(T_MACHINE_CHECK)
0c530ab8 1262 jmp L_enter_lohandler
6d2010ae
A
1263
1264
1265 .code32
1266
1267/*
1268 * All task 'exceptions' enter lo_alltraps:
1269 * esp -> x86_saved_state_t
1270 *
1271 * The rest of the state is set up as:
1272 * cr3 -> kernel directory
1273 * esp -> low based stack
1274 * gs -> CPU_DATA_GS
1275 * cs -> KERNEL32_CS
1276 * ss/ds/es -> KERNEL_DS
1277 *
1278 * interrupts disabled
1279 * direction flag cleared
1280 */
1281Entry(lo_alltraps)
1282 movl R32_CS(%esp),%eax /* assume 32-bit state */
1283 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1284 jne 1f
1285 movl R64_CS(%esp),%eax /* 64-bit user mode */
12861:
1287 testb $3,%al
1288 jz trap_from_kernel
1289 /* user mode trap */
1290 TIME_TRAP_UENTRY
1291
1292 movl %gs:CPU_ACTIVE_THREAD,%ecx
1293 movl TH_TASK(%ecx),%ebx
1294
1295 /* Check for active vtimers in the current task */
1296 TASK_VTIMER_CHECK(%ebx, %ecx)
1297
1298 movl %gs:CPU_KERNEL_STACK,%ebx
1299 xchgl %ebx,%esp /* switch to kernel stack */
1300
1301 CCALL1(user_trap, %ebx) /* call user trap routine */
1302 /* user_trap() unmasks interrupts */
1303 cli /* hold off intrs - critical section */
1304 xorl %ecx,%ecx /* don't check if we're in the PFZ */
1305
1306/*
1307 * Return from trap or system call, checking for ASTs.
1308 * On lowbase PCB stack with intrs disabled
1309 */
1310Entry(return_from_trap)
1311 movl %gs:CPU_ACTIVE_THREAD, %esp
1312 movl TH_PCB_ISS(%esp),%esp /* switch back to PCB stack */
1313 movl %gs:CPU_PENDING_AST, %eax
1314 testl %eax, %eax
1315 je return_to_user /* branch if no AST */
1316LEXT(return_from_trap_with_ast)
1317 movl %gs:CPU_KERNEL_STACK, %ebx
1318 xchgl %ebx, %esp /* switch to kernel stack */
1319
1320 testl %ecx, %ecx /* see if we need to check for an EIP in the PFZ */
1321 je 2f /* no, go handle the AST */
1322 cmpl $(SS_64), SS_FLAVOR(%ebx) /* are we a 64-bit task? */
1323 je 1f
1324 /* no... 32-bit user mode */
1325 movl R32_EIP(%ebx), %eax
1326 pushl %ebx /* save PCB stack */
1327 xorl %ebp, %ebp /* clear frame pointer */
1328 CCALL1(commpage_is_in_pfz32, %eax)
1329 popl %ebx /* retrieve pointer to PCB stack */
1330 testl %eax, %eax
1331 je 2f /* not in the PFZ... go service AST */
1332 movl %eax, R32_EBX(%ebx) /* let the PFZ know we've pended an AST */
1333 xchgl %ebx, %esp /* switch back to PCB stack */
1334 jmp return_to_user
13351: /* 64-bit user mode */
1336 movl R64_RIP(%ebx), %ecx
1337 movl R64_RIP+4(%ebx), %eax
1338 pushl %ebx /* save PCB stack */
1339 xorl %ebp, %ebp /* clear frame pointer */
1340 CCALL2(commpage_is_in_pfz64, %ecx, %eax)
1341 popl %ebx /* retrieve pointer to PCB stack */
1342 testl %eax, %eax
1343 je 2f /* not in the PFZ... go service AST */
1344 movl %eax, R64_RBX(%ebx) /* let the PFZ know we've pended an AST */
1345 xchgl %ebx, %esp /* switch back to PCB stack */
1346 jmp return_to_user
13472:
1348 sti /* interrupts always enabled on return to user mode */
1349 pushl %ebx /* save PCB stack */
1350 xorl %ebp, %ebp /* Clear framepointer */
1351 CCALL1(i386_astintr, $0) /* take the AST */
1352 cli
1353
1354 popl %esp /* switch back to PCB stack (w/exc link) */
1355
1356 xorl %ecx, %ecx /* don't check if we're in the PFZ */
1357 jmp EXT(return_from_trap) /* and check again (rare) */
1358
1359
1360
1361/*
1362 * Trap from kernel mode. No need to switch stacks.
1363 * Interrupts must be off here - we will set them to state at time of trap
1364 * as soon as it's safe for us to do so and not recurse doing preemption
1365 */
1366trap_from_kernel:
1367 movl %esp, %eax /* saved state addr */
1368 pushl R32_EIP(%esp) /* Simulate a CALL from fault point */
1369 pushl %ebp /* Extend framepointer chain */
1370 movl %esp, %ebp
1371 CCALL1WITHSP(kernel_trap, %eax) /* Call kernel trap handler */
1372 popl %ebp
1373 addl $4, %esp
1374 cli
1375
1376 movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
1377 testl $ AST_URGENT,%eax /* any urgent preemption? */
1378 je ret_to_kernel /* no, nothing to do */
1379 cmpl $ T_PREEMPT,R32_TRAPNO(%esp)
1380 je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
1381 testl $ EFL_IF,R32_EFLAGS(%esp) /* interrupts disabled? */
1382 je ret_to_kernel
1383 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1384 jne ret_to_kernel
1385 movl %gs:CPU_KERNEL_STACK,%eax
1386 movl %esp,%ecx
1387 xorl %eax,%ecx
1388 and EXT(kernel_stack_mask),%ecx
1389 testl %ecx,%ecx /* are we on the kernel stack? */
1390 jne ret_to_kernel /* no, skip it */
1391
1392 CCALL1(i386_astintr, $1) /* take the AST */
1393
1394
1395/*
1396 * All interrupts on all tasks enter here with:
1397 * esp-> -> x86_saved_state_t
1398 *
1399 * cr3 -> kernel directory
1400 * esp -> low based stack
1401 * gs -> CPU_DATA_GS
1402 * cs -> KERNEL32_CS
1403 * ss/ds/es -> KERNEL_DS
1404 *
1405 * interrupts disabled
1406 * direction flag cleared
1407 */
1408Entry(lo_allintrs)
1409 /*
1410 * test whether already on interrupt stack
1411 */
1412 movl %gs:CPU_INT_STACK_TOP,%ecx
1413 cmpl %esp,%ecx
1414 jb 1f
1415 leal -INTSTACK_SIZE(%ecx),%edx
1416 cmpl %esp,%edx
1417 jb int_from_intstack
14181:
1419 xchgl %ecx,%esp /* switch to interrupt stack */
1420
1421 movl %cr0,%eax /* get cr0 */
1422 orl $(CR0_TS),%eax /* or in TS bit */
1423 movl %eax,%cr0 /* set cr0 */
1424
1425 subl $8, %esp /* for 16-byte stack alignment */
1426 pushl %ecx /* save pointer to old stack */
1427 movl %ecx,%gs:CPU_INT_STATE /* save intr state */
1428
1429 TIME_INT_ENTRY /* do timing */
1430
1431 movl %gs:CPU_ACTIVE_THREAD,%ecx
1432 movl TH_TASK(%ecx),%ebx
1433
1434 /* Check for active vtimers in the current task */
1435 TASK_VTIMER_CHECK(%ebx, %ecx)
1436
1437 incl %gs:CPU_PREEMPTION_LEVEL
1438 incl %gs:CPU_INTERRUPT_LEVEL
1439
1440 movl %gs:CPU_INT_STATE, %eax
1441 CCALL1(interrupt, %eax) /* call generic interrupt routine */
1442
1443 cli /* just in case we returned with intrs enabled */
1444 xorl %eax,%eax
1445 movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
1446
1447 decl %gs:CPU_INTERRUPT_LEVEL
1448 decl %gs:CPU_PREEMPTION_LEVEL
1449
1450 TIME_INT_EXIT /* do timing */
1451
1452 movl %gs:CPU_ACTIVE_THREAD,%eax
1453 movl TH_PCB_FPS(%eax),%eax /* get pcb's ifps */
1454 testl %eax, %eax /* Is there a context */
1455 je 1f /* Branch if not */
1456 cmpl $0, FP_VALID(%eax) /* Check fp_valid */
1457 jne 1f /* Branch if valid */
1458 clts /* Clear TS */
1459 jmp 2f
14601:
1461 movl %cr0,%eax /* get cr0 */
1462 orl $(CR0_TS),%eax /* or in TS bit */
1463 movl %eax,%cr0 /* set cr0 */
14642:
1465 popl %esp /* switch back to old stack */
1466
1467 /* Load interrupted code segment into %eax */
1468 movl R32_CS(%esp),%eax /* assume 32-bit state */
1469 cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
1470 jne 3f
1471 movl R64_CS(%esp),%eax /* 64-bit user mode */
14723:
1473 testb $3,%al /* user mode, */
1474 jnz ast_from_interrupt_user /* go handle potential ASTs */
1475 /*
1476 * we only want to handle preemption requests if
1477 * the interrupt fell in the kernel context
1478 * and preemption isn't disabled
1479 */
1480 movl %gs:CPU_PENDING_AST,%eax
1481 testl $ AST_URGENT,%eax /* any urgent requests? */
1482 je ret_to_kernel /* no, nothing to do */
1483
1484 cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
1485 jne ret_to_kernel /* yes, skip it */
1486
1487 movl %gs:CPU_KERNEL_STACK,%eax
1488 movl %esp,%ecx
1489 xorl %eax,%ecx
1490 and EXT(kernel_stack_mask),%ecx
1491 testl %ecx,%ecx /* are we on the kernel stack? */
1492 jne ret_to_kernel /* no, skip it */
1493
1494 /*
1495 * Take an AST from kernel space. We don't need (and don't want)
1496 * to do as much as the case where the interrupt came from user
1497 * space.
1498 */
1499 CCALL1(i386_astintr, $1)
1500
1501 jmp ret_to_kernel
1502
1503
1504/*
1505 * nested int - simple path, can't preempt etc on way out
1506 */
1507int_from_intstack:
1508 incl %gs:CPU_PREEMPTION_LEVEL
1509 incl %gs:CPU_INTERRUPT_LEVEL
1510 incl %gs:CPU_NESTED_ISTACK
1511
1512 movl %esp, %edx /* x86_saved_state */
1513 CCALL1(interrupt, %edx)
1514
1515 decl %gs:CPU_INTERRUPT_LEVEL
1516 decl %gs:CPU_PREEMPTION_LEVEL
1517 decl %gs:CPU_NESTED_ISTACK
1518
1519 jmp ret_to_kernel
1520
1521/*
1522 * Take an AST from an interrupted user
1523 */
1524ast_from_interrupt_user:
1525 movl %gs:CPU_PENDING_AST,%eax
1526 testl %eax,%eax /* pending ASTs? */
1527 je ret_to_user /* no, nothing to do */
1528
1529 TIME_TRAP_UENTRY
1530
1531 movl $1, %ecx /* check if we're in the PFZ */
1532 jmp EXT(return_from_trap_with_ast) /* return */
1533
1534
1535/*
1536 * 32bit Tasks
1537 * System call entries via INTR_GATE or sysenter:
1538 *
1539 * esp -> x86_saved_state32_t
1540 * cr3 -> kernel directory
1541 * esp -> low based stack
1542 * gs -> CPU_DATA_GS
1543 * cs -> KERNEL32_CS
1544 * ss/ds/es -> KERNEL_DS
1545 *
1546 * interrupts disabled
1547 * direction flag cleared
1548 */
1549
1550Entry(lo_unix_scall)
1551 TIME_TRAP_UENTRY
1552
1553 movl %gs:CPU_KERNEL_STACK,%edi
1554 xchgl %edi,%esp /* switch to kernel stack */
1555 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1556 movl TH_TASK(%ecx),%ebx /* point to current task */
1557 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1558
1559 /* Check for active vtimers in the current task */
1560 TASK_VTIMER_CHECK(%ebx, %ecx)
1561
1562 sti
1563
1564 CCALL1(unix_syscall, %edi)
1565 /*
1566 * always returns through thread_exception_return
1567 */
1568
1569
1570Entry(lo_mach_scall)
1571 TIME_TRAP_UENTRY
1572
1573 movl %gs:CPU_KERNEL_STACK,%edi
1574 xchgl %edi,%esp /* switch to kernel stack */
1575 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1576 movl TH_TASK(%ecx),%ebx /* point to current task */
1577 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1578
1579 /* Check for active vtimers in the current task */
1580 TASK_VTIMER_CHECK(%ebx, %ecx)
1581
1582 sti
1583
1584 CCALL1(mach_call_munger, %edi)
1585 /*
1586 * always returns through thread_exception_return
1587 */
1588
1589
1590Entry(lo_mdep_scall)
1591 TIME_TRAP_UENTRY
1592
1593 movl %gs:CPU_KERNEL_STACK,%edi
1594 xchgl %edi,%esp /* switch to kernel stack */
1595 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1596 movl TH_TASK(%ecx),%ebx /* point to current task */
1597
1598 /* Check for active vtimers in the current task */
1599 TASK_VTIMER_CHECK(%ebx, %ecx)
1600
1601 sti
1602
1603 CCALL1(machdep_syscall, %edi)
1604 /*
1605 * always returns through thread_exception_return
1606 */
1607
1608
1609Entry(lo_diag_scall)
1610 TIME_TRAP_UENTRY
1611
1612 movl %gs:CPU_KERNEL_STACK,%edi
1613 xchgl %edi,%esp /* switch to kernel stack */
1614 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1615 movl TH_TASK(%ecx),%ebx /* point to current task */
1616
1617 /* Check for active vtimers in the current task */
1618 TASK_VTIMER_CHECK(%ebx, %ecx)
1619
1620 pushl %edi /* push pbc stack for later */
1621
1622 CCALL1(diagCall, %edi) // Call diagnostics
1623
1624 cli // Disable interruptions just in case
1625 cmpl $0,%eax // What kind of return is this?
1626 je 1f // - branch if bad (zero)
1627 popl %esp // Get back the original stack
1628 jmp return_to_user // Normal return, do not check asts...
16291:
1630 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1631 // pass what would be the diag syscall
1632 // error return - cause an exception
1633 /* no return */
1634
1635
1636return_to_user:
1637 TIME_TRAP_UEXIT
1638 jmp ret_to_user
1639
1640
1641/*
1642 * 64bit Tasks
1643 * System call entries via syscall only:
1644 *
1645 * esp -> x86_saved_state64_t
1646 * cr3 -> kernel directory
1647 * esp -> low based stack
1648 * gs -> CPU_DATA_GS
1649 * cs -> KERNEL32_CS
1650 * ss/ds/es -> KERNEL_DS
1651 *
1652 * interrupts disabled
1653 * direction flag cleared
1654 */
1655
1656Entry(lo_syscall)
1657 TIME_TRAP_UENTRY
1658
1659 movl %gs:CPU_KERNEL_STACK,%edi
1660 xchgl %edi,%esp /* switch to kernel stack */
1661
1662 movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
1663 movl TH_TASK(%ecx),%ebx /* point to current task */
1664
1665 /* Check for active vtimers in the current task */
1666 TASK_VTIMER_CHECK(%ebx, %ecx)
1667
1668 /*
1669 * We can be here either for a mach, unix machdep or diag syscall,
1670 * as indicated by the syscall class:
1671 */
1672 movl R64_RAX(%edi), %eax /* syscall number/class */
1673 movl %eax, %edx
1674 andl $(SYSCALL_CLASS_MASK), %edx /* syscall class */
1675 cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %edx
1676 je EXT(lo64_mach_scall)
1677 cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %edx
1678 je EXT(lo64_unix_scall)
1679 cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %edx
1680 je EXT(lo64_mdep_scall)
1681 cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %edx
1682 je EXT(lo64_diag_scall)
1683
1684 sti
1685
1686 /* Syscall class unknown */
1687 CCALL5(i386_exception, $(EXC_SYSCALL), %eax, $0, $1, $0)
1688 /* no return */
1689
1690
1691Entry(lo64_unix_scall)
1692 incl TH_SYSCALLS_UNIX(%ecx) /* increment call count */
1693 sti
1694
1695 CCALL1(unix_syscall64, %edi)
1696 /*
1697 * always returns through thread_exception_return
1698 */
1699
1700
1701Entry(lo64_mach_scall)
1702 incl TH_SYSCALLS_MACH(%ecx) /* increment call count */
1703 sti
1704
1705 CCALL1(mach_call_munger64, %edi)
1706 /*
1707 * always returns through thread_exception_return
1708 */
1709
1710
1711
1712Entry(lo64_mdep_scall)
1713 sti
1714
1715 CCALL1(machdep_syscall64, %edi)
1716 /*
1717 * always returns through thread_exception_return
1718 */
1719
1720
1721Entry(lo64_diag_scall)
1722 CCALL1(diagCall64, %edi) // Call diagnostics
1723
1724 cli // Disable interruptions just in case
1725 cmpl $0,%eax // What kind of return is this?
1726 je 1f
1727 movl %edi, %esp // Get back the original stack
1728 jmp return_to_user // Normal return, do not check asts...
17291:
1730 CCALL5(i386_exception, $EXC_SYSCALL, $0x6000, $0, $1, $0)
1731 // pass what would be the diag syscall
1732 // error return - cause an exception
1733 /* no return */
1734
1735
1736
1737/*
1738 * Compatibility mode's last gasp...
1739 */
1740Entry(lo_df64)
1741 movl %esp, %eax
1742 CCALL1(panic_double_fault64, %eax)
1743 hlt
1744
1745Entry(lo_mc64)
1746 movl %esp, %eax
1747 CCALL1(panic_machine_check64, %eax)
1748 hlt