clac /* Clear EFLAGS.AC if SMAP is present/enabled */
1:
/*
- * On entering the kernel, we don't need to switch cr3
+ * On entering the kernel, we typically don't switch CR3
* because the kernel shares the user's address space.
- * But we mark the kernel's cr3 as "active".
- * If, however, the invalid cr3 flag is set, we have to flush tlbs
- * since the kernel's mapping was changed while we were in userspace.
+ * But we mark the kernel's cr3 as "active" for TLB coherency evaluation
+ * If, however, the CPU's invalid TLB flag is set, we have to invalidate the TLB
+ * since the kernel pagetables were changed while we were in userspace.
*
- * But: if global no_shared_cr3 is TRUE we do switch to the kernel's cr3
+ * For threads with a mapped pagezero (some WINE games) on non-SMAP platforms,
+ * we switch to the kernel's address space on entry. Also,
+ * if the global no_shared_cr3 is TRUE we do switch to the kernel's cr3
* so that illicit accesses to userspace can be trapped.
*/
mov %gs:CPU_KERNEL_CR3, %rcx
test $3, %esi /* user/kernel? */
jz 2f /* skip cr3 reload from kernel */
xor %rbp, %rbp
+ cmpl $0, %gs:CPU_PAGEZERO_MAPPED
+ jnz 11f
cmpl $0, EXT(no_shared_cr3)(%rip)
je 2f
+11:
+ xor %eax, %eax
+ movw %gs:CPU_KERNEL_PCID, %ax
+ or %rax, %rcx
mov %rcx, %cr3 /* load kernel cr3 */
jmp 4f /* and skip tlb flush test */
2:
shr $32, %rcx
testl %ecx, %ecx
jz 4f
- movl $0, %gs:CPU_TLB_INVALID
testl $(1<<16), %ecx /* Global? */
jz 3f
+ movl $0, %gs:CPU_TLB_INVALID
mov %cr4, %rcx /* RMWW CR4, for lack of an alternative*/
and $(~CR4_PGE), %rcx
mov %rcx, %cr4
mov %rcx, %cr4
jmp 4f
3:
+ movb $0, %gs:CPU_TLB_INVALID_LOCAL
mov %cr3, %rcx
mov %rcx, %cr3
4:
mov %rcx, %gs:CPU_DR7
2:
/*
- * On exiting the kernel there's no need to switch cr3 since we're
+ * On exiting the kernel there's typically no need to switch cr3 since we're
* already running in the user's address space which includes the
- * kernel. Nevertheless, we now mark the task's cr3 as active.
- * But, if no_shared_cr3 is set, we do need to switch cr3 at this point.
+ * kernel. We now mark the task's cr3 as active, for TLB coherency.
+ * If the target address space has a pagezero mapping present, or
+ * if no_shared_cr3 is set, we do need to switch cr3 at this point.
*/
mov %gs:CPU_TASK_CR3, %rcx
mov %rcx, %gs:CPU_ACTIVE_CR3
+ cmpl $0, %gs:CPU_PAGEZERO_MAPPED
+ jnz L_cr3_switch_island
movl EXT(no_shared_cr3)(%rip), %eax
test %eax, %eax /* -no_shared_cr3 */
- jz 3f
- mov %rcx, %cr3
-3:
+ jnz L_cr3_switch_island
+
+L_cr3_switch_return:
mov %gs:CPU_DR7, %rax /* Is there a debug control register?*/
cmp $0, %rax
je 4f
EXT(ret32_iret):
iretq /* return from interrupt */
+
L_fast_exit:
pop %rdx /* user return eip */
pop %rcx /* pop and toss cs */
sti /* interrupts enabled after sysexit */
sysexitl /* 32-bit sysexit */
+L_cr3_switch_island:
+ xor %eax, %eax
+ movw %gs:CPU_ACTIVE_PCID, %ax
+ or %rax, %rcx
+ mov %rcx, %cr3
+ jmp L_cr3_switch_return
+
ret_to_kernel:
#if DEBUG_IDT64
cmpl $(SS_64), SS_FLAVOR(%r15) /* 64-bit state? */
Entry(idt64_double_fault)
PUSH_FUNCTION(HNDL_DOUBLE_FAULT)
pushq $(T_DOUBLE_FAULT)
+ jmp L_dispatch_kernel
- push %rax
- leaq EXT(idt64_syscall)(%rip), %rax
- cmp %rax, ISF64_RIP+8(%rsp)
- pop %rax
- jne L_dispatch_kernel
-
- mov ISF64_RSP(%rsp), %rsp
- jmp L_syscall_continue
-
/*
* For GP/NP/SS faults, we use the IST1 stack.
cli /* hold off intrs - critical section */
xorl %ecx, %ecx /* don't check if we're in the PFZ */
-#define CLI cli
-#define STI sti
Entry(return_from_trap)
movq %gs:CPU_ACTIVE_THREAD,%r15 /* Get current thread */
movl %eax, R64_RBX(%r15) /* let the PFZ know we've pended an AST */
jmp EXT(return_to_user)
2:
- STI /* interrupts always enabled on return to user mode */
+ sti /* interrupts always enabled on return to user mode */
xor %edi, %edi /* zero %rdi */
xorq %rbp, %rbp /* clear framepointer */
CCALL(i386_astintr) /* take the AST */
- CLI
+ cli
mov %rsp, %r15 /* AST changes stack, saved state */
xorl %ecx, %ecx /* don't check if we're in the PFZ */
jmp EXT(return_from_trap) /* and check again (rare) */
CCALL1(interrupt, %r15) /* call generic interrupt routine */
- cli /* just in case we returned with intrs enabled */
-
.globl EXT(return_to_iret)
LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */
Entry(hndl_diag_scall64)
CCALL1(diagCall64, %r15) // Call diagnostics
- cli // Disable interruptions just in case
test %eax, %eax // What kind of return is this?
je 1f // - branch if bad (zero)
jmp EXT(return_to_user) // Normal return, do not check asts...