- movl %gs:CPU_PENDING_AST,%eax /* get pending asts */
- testl $ AST_URGENT,%eax /* any urgent preemption? */
- je ret_to_kernel /* no, nothing to do */
- cmpl $ T_PREEMPT,R_TRAPNO(%esp)
- je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */
- testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */
- je ret_to_kernel
- cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
- jne ret_to_kernel
- movl %gs:CPU_KERNEL_STACK,%eax
- movl %esp,%ecx
- xorl %eax,%ecx
- andl $(-KERNEL_STACK_SIZE),%ecx
- testl %ecx,%ecx /* are we on the kernel stack? */
- jne ret_to_kernel /* no, skip it */
-
- CCALL1(i386_astintr, $1) /* take the AST */
-
-ret_to_kernel:
- cmpl $0, %gs:CPU_IS64BIT
- je EXT(lo_ret_to_kernel)
- jmp EXT(lo64_ret_to_kernel)
-
-
-
-/*******************************************************************************************************
- *
- * All interrupts on all tasks enter here with:
- * esp-> -> x86_saved_state_t
- *
- * cr3 -> kernel directory
- * esp -> low based stack
- * gs -> CPU_DATA_GS
- * cs -> KERNEL_CS
- * ss/ds/es -> KERNEL_DS
- *
- * interrupts disabled
- * direction flag cleared
- */
-Entry(lo_allintrs)
- /*
- * test whether already on interrupt stack
- */
- movl %gs:CPU_INT_STACK_TOP,%ecx
- cmpl %esp,%ecx
- jb 1f
- leal -INTSTACK_SIZE(%ecx),%edx
- cmpl %esp,%edx
- jb int_from_intstack
-1:
- xchgl %ecx,%esp /* switch to interrupt stack */
-
- movl %cr0,%eax /* get cr0 */
- orl $(CR0_TS),%eax /* or in TS bit */
- movl %eax,%cr0 /* set cr0 */
-
- subl $8, %esp /* for 16-byte stack alignment */
- pushl %ecx /* save pointer to old stack */
- movl %ecx,%gs:CPU_INT_STATE /* save intr state */
-
- TIME_INT_ENTRY /* do timing */
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx
- movl ACT_TASK(%ecx),%ebx
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- /* Set a thread AST (atomic) */
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- incl %gs:CPU_PREEMPTION_LEVEL
- incl %gs:CPU_INTERRUPT_LEVEL
-
- movl %gs:CPU_INT_STATE, %eax
- CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */
-
- cli /* just in case we returned with intrs enabled */
- xorl %eax,%eax
- movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */
-
- decl %gs:CPU_INTERRUPT_LEVEL
- decl %gs:CPU_PREEMPTION_LEVEL
-
- TIME_INT_EXIT /* do timing */
-
- movl %gs:CPU_ACTIVE_THREAD,%eax
- movl ACT_PCB(%eax),%eax /* get act`s PCB */
- movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */
- cmpl $0,%eax /* Is there a context */
- je 1f /* Branch if not */
- movl FP_VALID(%eax),%eax /* Load fp_valid */
- cmpl $0,%eax /* Check if valid */
- jne 1f /* Branch if valid */
- clts /* Clear TS */
- jmp 2f
-1:
- movl %cr0,%eax /* get cr0 */
- orl $(CR0_TS),%eax /* or in TS bit */
- movl %eax,%cr0 /* set cr0 */
-2:
- popl %esp /* switch back to old stack */
-
- /* Load interrupted code segment into %eax */
- movl R_CS(%esp),%eax /* assume 32-bit state */
- cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */
- jne 3f
- movl R64_CS(%esp),%eax /* 64-bit user mode */
-3:
- testb $3,%al /* user mode, */
- jnz ast_from_interrupt_user /* go handle potential ASTs */
- /*
- * we only want to handle preemption requests if
- * the interrupt fell in the kernel context
- * and preemption isn't disabled
- */
- movl %gs:CPU_PENDING_AST,%eax
- testl $ AST_URGENT,%eax /* any urgent requests? */
- je ret_to_kernel /* no, nothing to do */
-
- cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */
- jne ret_to_kernel /* yes, skip it */
-
- movl %gs:CPU_KERNEL_STACK,%eax
- movl %esp,%ecx
- xorl %eax,%ecx
- andl $(-KERNEL_STACK_SIZE),%ecx
- testl %ecx,%ecx /* are we on the kernel stack? */
- jne ret_to_kernel /* no, skip it */
-
- /*
- * Take an AST from kernel space. We don't need (and don't want)
- * to do as much as the case where the interrupt came from user
- * space.
- */
- CCALL1(i386_astintr, $1)
-
- jmp ret_to_kernel
-
-
-/*
- * nested int - simple path, can't preempt etc on way out
- */
-int_from_intstack:
- incl %gs:CPU_PREEMPTION_LEVEL
- incl %gs:CPU_INTERRUPT_LEVEL
-
- movl %esp, %edx /* x86_saved_state */
- CCALL1(PE_incoming_interrupt, %edx)
-
- decl %gs:CPU_INTERRUPT_LEVEL
- decl %gs:CPU_PREEMPTION_LEVEL
-
- jmp ret_to_kernel
-
-/*
- * Take an AST from an interrupted user
- */
-ast_from_interrupt_user:
- movl %gs:CPU_PENDING_AST,%eax
- testl %eax,%eax /* pending ASTs? */
- je EXT(ret_to_user) /* no, nothing to do */
-
- TIME_TRAP_UENTRY
-
- jmp EXT(return_from_trap) /* return */
-
-
-/*******************************************************************************************************
- *
- * 32bit Tasks
- * System call entries via INTR_GATE or sysenter:
- *
- * esp -> x86_saved_state32_t
- * cr3 -> kernel directory
- * esp -> low based stack
- * gs -> CPU_DATA_GS
- * cs -> KERNEL_CS
- * ss/ds/es -> KERNEL_DS
- *
- * interrupts disabled
- * direction flag cleared
- */
-
-Entry(lo_sysenter)
- /*
- * We can be here either for a mach syscall or a unix syscall,
- * as indicated by the sign of the code:
- */
- movl R_EAX(%esp),%eax
- testl %eax,%eax
- js EXT(lo_mach_scall) /* < 0 => mach */
- /* > 0 => unix */
-
-Entry(lo_unix_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
- addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- /* Set a thread AST (atomic) */
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- movl %gs:CPU_KERNEL_STACK,%ebx
- xchgl %ebx,%esp /* switch to kernel stack */
-
- sti
-
- CCALL1(unix_syscall, %ebx)
- /*
- * always returns through thread_exception_return
- */
-
-
-Entry(lo_mach_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
- addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- /* Set a thread AST (atomic) */
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- movl %gs:CPU_KERNEL_STACK,%ebx
- xchgl %ebx,%esp /* switch to kernel stack */
-
- sti
-
- CCALL1(mach_call_munger, %ebx)
- /*
- * always returns through thread_exception_return
- */
-
-
-Entry(lo_mdep_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- /* Set a thread AST (atomic) */
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- movl %gs:CPU_KERNEL_STACK,%ebx
- xchgl %ebx,%esp /* switch to kernel stack */
-
- sti
-
- CCALL1(machdep_syscall, %ebx)
- /*
- * always returns through thread_exception_return
- */
-
-
-Entry(lo_diag_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- /* Set a thread AST (atomic) */
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack
- xchgl %ebx,%esp // Switch to it, saving the previous
-
- CCALL1(diagCall, %ebx) // Call diagnostics
-
- cmpl $0,%eax // What kind of return is this?
- je 2f
- cli // Disable interruptions just in case they were enabled
- popl %esp // Get back the original stack
- jmp EXT(return_to_user) // Normal return, do not check asts...
-2:
- CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1)
- // pass what would be the diag syscall
- // error return - cause an exception
- /* no return */
-
-
-
-/*******************************************************************************************************
- *
- * 64bit Tasks
- * System call entries via syscall only:
- *
- * esp -> x86_saved_state64_t
- * cr3 -> kernel directory
- * esp -> low based stack
- * gs -> CPU_DATA_GS
- * cs -> KERNEL_CS
- * ss/ds/es -> KERNEL_DS
- *
- * interrupts disabled
- * direction flag cleared
- */
-
-Entry(lo_syscall)
- /*
- * We can be here either for a mach, unix machdep or diag syscall,
- * as indicated by the syscall class:
- */
- movl R64_RAX(%esp), %eax /* syscall number/class */
- movl %eax, %ebx
- andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */
- cmpl $(SYSCALL_CLASS_MACH<<SYSCALL_CLASS_SHIFT), %ebx
- je EXT(lo64_mach_scall)
- cmpl $(SYSCALL_CLASS_UNIX<<SYSCALL_CLASS_SHIFT), %ebx
- je EXT(lo64_unix_scall)
- cmpl $(SYSCALL_CLASS_MDEP<<SYSCALL_CLASS_SHIFT), %ebx
- je EXT(lo64_mdep_scall)
- cmpl $(SYSCALL_CLASS_DIAG<<SYSCALL_CLASS_SHIFT), %ebx
- je EXT(lo64_diag_scall)
-
- movl %gs:CPU_KERNEL_STACK,%ebx
- xchgl %ebx,%esp /* switch to kernel stack */
-
- sti
-
- /* Syscall class unknown */
- CCALL3(i386_exception, $(EXC_SYSCALL), %eax, $1)
- /* no return */
-
-
-Entry(lo64_unix_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
- addl $1,TASK_SYSCALLS_UNIX(%ebx) /* increment call count */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- /* Set a thread AST (atomic) */
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- movl %gs:CPU_KERNEL_STACK,%ebx
- xchgl %ebx,%esp /* switch to kernel stack */
-
- sti
-
- CCALL1(unix_syscall64, %ebx)
- /*
- * always returns through thread_exception_return
- */
-
-
-Entry(lo64_mach_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
- addl $1,TASK_SYSCALLS_MACH(%ebx) /* increment call count */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- movl %gs:CPU_KERNEL_STACK,%ebx
- xchgl %ebx,%esp /* switch to kernel stack */
-
- sti
-
- CCALL1(mach_call_munger64, %ebx)
- /*
- * always returns through thread_exception_return
- */
-
-
-
-Entry(lo64_mdep_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST
-
- /* Set a thread AST (atomic) */
- lock
- orl $(AST_BSD),ACT_AST(%ecx)
-
-1:
- movl %gs:CPU_KERNEL_STACK,%ebx
- xchgl %ebx,%esp /* switch to kernel stack */
-
- sti
-
- CCALL1(machdep_syscall64, %ebx)
- /*
- * always returns through thread_exception_return
- */
-
-
-Entry(lo64_diag_scall)
- TIME_TRAP_UENTRY
-
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */
- movl ACT_TASK(%ecx),%ebx /* point to current task */
-
- /* Check for active vtimers in the current task */
- cmpl $0,TASK_VTIMERS(%ebx)
- jz 1f
-
- /* Set a pending AST */
- orl $(AST_BSD),%gs:CPU_PENDING_AST