X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4452a7af2eac33dbad800bcc91f2399d62c18f53..99c3a10404e5d1ef94397ab4df5a8b74711fc4d3:/osfmk/i386/locore.s diff --git a/osfmk/i386/locore.s b/osfmk/i386/locore.s index 166c843f5..911439764 100644 --- a/osfmk/i386/locore.s +++ b/osfmk/i386/locore.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2009 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -56,10 +56,7 @@ #include #include -#include -#include #include -#include #include #include @@ -69,12 +66,8 @@ #include #include #include -#include -#define _ARCH_I386_ASM_HELP_H_ /* Prevent inclusion of user header */ -#include - -#include +#include /* * PTmap is recursive pagemap at top of virtual address space. @@ -85,15 +78,6 @@ .set _PTD,_PTmap + (PTDPTDI * NBPG) .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) -/* - * APTmap, APTD is the alternate recursive pagemap. - * It's used when modifying another process's page tables. - */ - .globl _APTmap,_APTD,_APTDpde - .set _APTmap,(APTDPTDI << PDESHIFT) - .set _APTD,_APTmap + (APTDPTDI * NBPG) - .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) - #if __MACHO__ /* Under Mach-O, etext is a variable which contains * the last text address @@ -106,50 +90,6 @@ #define ETEXT_ADDR $ EXT(etext) #endif -#define CX(addr,reg) addr(,reg,4) - -/* - * The following macros make calls into C code. - * They dynamically align the stack to 16 bytes. - * Arguments are moved (not pushed) onto the correctly aligned stack. - * NOTE: EDI is destroyed in the process, and hence cannot - * be directly used as a parameter. Users of this macro must - * independently preserve EDI (a non-volatile) if the routine is - * intended to be called from C, for instance. - */ - -#define CCALL(fn) \ - movl %esp, %edi ;\ - andl $0xFFFFFFF0, %esp ;\ - call EXT(fn) ;\ - movl %edi, %esp - -#define CCALL1(fn, arg1) \ - movl %esp, %edi ;\ - subl $4, %esp ;\ - andl $0xFFFFFFF0, %esp ;\ - movl arg1, 0(%esp) ;\ - call EXT(fn) ;\ - movl %edi, %esp - -#define CCALL2(fn, arg1, arg2) \ - movl %esp, %edi ;\ - subl $8, %esp ;\ - andl $0xFFFFFFF0, %esp ;\ - movl arg2, 4(%esp) ;\ - movl arg1, 0(%esp) ;\ - call EXT(fn) ;\ - movl %edi, %esp - -#define CCALL3(fn, arg1, arg2, arg3) \ - movl %esp, %edi ;\ - subl $12, %esp ;\ - andl $0xFFFFFFF0, %esp ;\ - movl arg3, 8(%esp) ;\ - movl arg2, 4(%esp) ;\ - movl arg1, 0(%esp) ;\ - call EXT(fn) ;\ - movl %edi, %esp .text locore_start: @@ -182,7 +122,16 @@ LEXT(recover_table) ;\ .align 2 ;\ .globl EXT(recover_table_end) ;\ LEXT(recover_table_end) ;\ - .text + .long 0 /* workaround see comment below */ ;\ + .text ; + +/* TODO FIXME + * the .long 0 is to work around a linker bug (insert radar# here) + * basically recover_table_end has zero size and bumps up right against saved_esp in acpi_wakeup.s + * recover_table_end is in __RECOVER,__vectors and saved_esp is in __SLEEP,__data, but they're right next to each + * other and so the linker combines them and incorrectly relocates everything referencing recover_table_end to point + * into the SLEEP section + */ /* * Allocate recovery and table. @@ -190,739 +139,53 @@ LEXT(recover_table_end) ;\ RECOVERY_SECTION RECOVER_TABLE_START -/* - * Timing routines. - */ -Entry(timer_update) - movl 4(%esp),%ecx - movl 8(%esp),%eax - movl 12(%esp),%edx - movl %eax,TIMER_HIGHCHK(%ecx) - movl %edx,TIMER_LOW(%ecx) - movl %eax,TIMER_HIGH(%ecx) - ret - -Entry(timer_grab) - movl 4(%esp),%ecx -0: movl TIMER_HIGH(%ecx),%edx - movl TIMER_LOW(%ecx),%eax - cmpl TIMER_HIGHCHK(%ecx),%edx - jne 0b - ret - -#if STAT_TIME - -#define TIME_TRAP_UENTRY -#define TIME_TRAP_UEXIT -#define TIME_INT_ENTRY -#define TIME_INT_EXIT - -#else -/* - * Nanosecond timing. - */ - -/* - * Low 32-bits of nanotime returned in %eax. - * Computed from tsc based on the scale factor - * and an implicit 32 bit shift. - * - * Uses %esi, %edi, %ebx, %ecx and %edx. - */ -#define RNT_INFO _rtc_nanotime_info -#define NANOTIME32 \ -0: movl RNT_INFO+RNT_TSC_BASE,%esi ;\ - movl RNT_INFO+RNT_TSC_BASE+4,%edi ;\ - rdtsc ;\ - subl %esi,%eax /* tsc - tsc_base */ ;\ - sbbl %edi,%edx ;\ - movl RNT_INFO+RNT_SCALE,%ecx ;\ - movl %edx,%ebx /* delta * scale */ ;\ - mull %ecx ;\ - movl %ebx,%eax ;\ - movl %edx,%ebx ;\ - mull %ecx ;\ - addl %ebx,%eax ;\ - addl RNT_INFO+RNT_NS_BASE,%eax /* add ns_base */ ;\ - cmpl RNT_INFO+RNT_TSC_BASE,%esi ;\ - jne 0b ;\ - cmpl RNT_INFO+RNT_TSC_BASE+4,%edi ;\ - jne 0b - -/* - * Add 32-bit ns delta in register dreg to timer pointed to by register treg. - */ -#define TIMER_UPDATE(treg,dreg) \ - addl TIMER_LOW(treg),dreg /* add delta low bits */ ;\ - adcl $0,TIMER_HIGHCHK(treg) /* add carry check bits */ ;\ - movl dreg,TIMER_LOW(treg) /* store updated low bit */ ;\ - movl TIMER_HIGHCHK(treg),dreg /* copy high check bits */ ;\ - movl dreg,TIMER_HIGH(treg) /* to high bita */ - -/* - * Add time delta to old timer and start new. - */ -#define TIMER_EVENT(old,new) \ - NANOTIME32 /* eax low bits nanosecs */ ;\ - movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\ - movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\ - movl %eax,%edx /* save timestamp in %edx */ ;\ - subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\ - TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\ - addl $(new##_TIMER-old##_TIMER),%ecx /* point to new timer */ ;\ - movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ ;\ - movl %gs:CPU_PROCESSOR,%edx /* get current processor */ ;\ - movl %ecx,CURRENT_TIMER(%edx) /* set current timer */ - - -/* - * Update time on user trap entry. - * Uses %eax,%ecx,%edx,%esi. - */ -#define TIME_TRAP_UENTRY TIMER_EVENT(USER,SYSTEM) - -/* - * update time on user trap exit. - * Uses %eax,%ecx,%edx,%esi. - */ -#define TIME_TRAP_UEXIT TIMER_EVENT(SYSTEM,USER) - -/* - * update time on interrupt entry. - * Uses %eax,%ecx,%edx,%esi. - */ -#define TIME_INT_ENTRY \ - NANOTIME32 /* eax low bits nanosecs */ ;\ - movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\ - movl CURRENT_TIMER(%ecx),%ecx /* get current timer */ ;\ - movl %eax,%edx /* save timestamp in %edx */ ;\ - subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\ - TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\ - movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\ - addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\ - movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ - -/* - * update time on interrupt exit. - * Uses %eax, %ecx, %edx, %esi. - */ -#define TIME_INT_EXIT \ - NANOTIME32 /* eax low bits nanosecs */ ;\ - movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ ;\ - addl $(SYSTEM_TIMER),%ecx /* point to sys timer */ ;\ - movl %eax,%edx /* save timestamp in %edx */ ;\ - subl TIMER_TSTAMP(%ecx),%eax /* compute elapsed time */ ;\ - TIMER_UPDATE(%ecx,%eax) /* update timer struct */ ;\ - movl %gs:CPU_PROCESSOR,%ecx /* get current processor */ ;\ - movl CURRENT_TIMER(%ecx),%ecx /* interrupted timer */ ;\ - movl %edx,TIMER_TSTAMP(%ecx) /* set timestamp */ - -#endif /* STAT_TIME */ - -#undef PDEBUG - -#ifdef PDEBUG - -/* - * Traditional, not ANSI. - */ -#define CAH(label) \ - .data ;\ - .globl label/**/count ;\ -label/**/count: ;\ - .long 0 ;\ - .globl label/**/limit ;\ -label/**/limit: ;\ - .long 0 ;\ - .text ;\ - addl $1,%ss:label/**/count ;\ - cmpl $0,label/**/limit ;\ - jz label/**/exit ;\ - pushl %eax ;\ -label/**/loop: ;\ - movl %ss:label/**/count,%eax ;\ - cmpl %eax,%ss:label/**/limit ;\ - je label/**/loop ;\ - popl %eax ;\ -label/**/exit: - -#else /* PDEBUG */ - -#define CAH(label) - -#endif /* PDEBUG */ - -#if MACH_KDB -/* - * Last-ditch debug code to handle faults that might result - * from entering kernel (from collocated server) on an invalid - * stack. On collocated entry, there's no hardware-initiated - * stack switch, so a valid stack must be in place when an - * exception occurs, or we may double-fault. - * - * In case of a double-fault, our only recourse is to switch - * hardware "tasks", so that we avoid using the current stack. - * - * The idea here is just to get the processor into the debugger, - * post-haste. No attempt is made to fix up whatever error got - * us here, so presumably continuing from the debugger will - * simply land us here again -- at best. - */ -#if 0 -/* - * Note that the per-fault entry points are not currently - * functional. The only way to make them work would be to - * set up separate TSS's for each fault type, which doesn't - * currently seem worthwhile. (The offset part of a task - * gate is always ignored.) So all faults that task switch - * currently resume at db_task_start. - */ -/* - * Double fault (Murphy's point) - error code (0) on stack - */ -Entry(db_task_dbl_fault) - popl %eax - movl $(T_DOUBLE_FAULT),%ebx - jmp db_task_start -/* - * Segment not present - error code on stack - */ -Entry(db_task_seg_np) - popl %eax - movl $(T_SEGMENT_NOT_PRESENT),%ebx - jmp db_task_start -/* - * Stack fault - error code on (current) stack - */ -Entry(db_task_stk_fault) - popl %eax - movl $(T_STACK_FAULT),%ebx - jmp db_task_start -/* - * General protection fault - error code on stack - */ -Entry(db_task_gen_prot) - popl %eax - movl $(T_GENERAL_PROTECTION),%ebx - jmp db_task_start -#endif /* 0 */ -/* - * The entry point where execution resumes after last-ditch debugger task - * switch. - */ -Entry(db_task_start) - movl %esp,%edx - subl $(ISS32_SIZE),%edx - movl %edx,%esp /* allocate i386_saved_state on stack */ - movl %eax,R_ERR(%esp) - movl %ebx,R_TRAPNO(%esp) - pushl %edx - CPU_NUMBER(%edx) - movl CX(EXT(master_dbtss),%edx),%edx - movl TSS_LINK(%edx),%eax - pushl %eax /* pass along selector of previous TSS */ - call EXT(db_tss_to_frame) - popl %eax /* get rid of TSS selector */ - call EXT(db_trap_from_asm) - addl $0x4,%esp - /* - * And now...? - */ - iret /* ha, ha, ha... */ -#endif /* MACH_KDB */ /* * Called as a function, makes the current thread * return from the kernel as if from an exception. + * We will consult with DTrace if this is a + * newly created thread and we need to fire a probe. */ .globl EXT(thread_exception_return) .globl EXT(thread_bootstrap_return) -LEXT(thread_exception_return) LEXT(thread_bootstrap_return) - cli - movl %gs:CPU_KERNEL_STACK,%ecx - movl (%ecx),%esp /* switch back to PCB stack */ - jmp EXT(return_from_trap) - -Entry(call_continuation) - movl S_ARG0,%eax /* get continuation */ - movl S_ARG1,%edx /* continuation param */ - movl S_ARG2,%ecx /* wait result */ - movl %gs:CPU_KERNEL_STACK,%esp /* pop the stack */ - xorl %ebp,%ebp /* zero frame pointer */ - subl $8,%esp /* align the stack */ - pushl %ecx - pushl %edx - call *%eax /* call continuation */ - addl $16,%esp - movl %gs:CPU_ACTIVE_THREAD,%eax - pushl %eax - call EXT(thread_terminate) - - - -/******************************************************************************************************* - * - * All 64 bit task 'exceptions' enter lo_alltraps: - * esp -> x86_saved_state_t - * - * The rest of the state is set up as: - * cr3 -> kernel directory - * esp -> low based stack - * gs -> CPU_DATA_GS - * cs -> KERNEL_CS - * ss/ds/es -> KERNEL_DS - * - * interrupts disabled - * direction flag cleared - */ -Entry(lo_alltraps) - movl R_CS(%esp),%eax /* assume 32-bit state */ - cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */ - jne 1f - movl R64_CS(%esp),%eax /* 64-bit user mode */ -1: - testb $3,%eax - jz trap_from_kernel - /* user mode trap */ - TIME_TRAP_UENTRY - - movl %gs:CPU_KERNEL_STACK,%ebx - xchgl %ebx,%esp /* switch to kernel stack */ - sti - - CCALL1(user_trap, %ebx) /* call user trap routine */ - cli /* hold off intrs - critical section */ - popl %esp /* switch back to PCB stack */ - -/* - * Return from trap or system call, checking for ASTs. - * On lowbase PCB stack with intrs disabled - */ -LEXT(return_from_trap) - movl %gs:CPU_PENDING_AST,%eax - testl %eax,%eax - je EXT(return_to_user) /* branch if no AST */ - - movl %gs:CPU_KERNEL_STACK,%ebx - xchgl %ebx,%esp /* switch to kernel stack */ - sti /* interrupts always enabled on return to user mode */ - - pushl %ebx /* save PCB stack */ - CCALL1(i386_astintr, $0) /* take the AST */ - cli - popl %esp /* switch back to PCB stack (w/exc link) */ - jmp EXT(return_from_trap) /* and check again (rare) */ - -LEXT(return_to_user) - TIME_TRAP_UEXIT - -LEXT(ret_to_user) - cmpl $0, %gs:CPU_IS64BIT - je EXT(lo_ret_to_user) - jmp EXT(lo64_ret_to_user) - +#if CONFIG_DTRACE + call EXT(dtrace_thread_bootstrap) +#endif - -/* - * Trap from kernel mode. No need to switch stacks. - * Interrupts must be off here - we will set them to state at time of trap - * as soon as it's safe for us to do so and not recurse doing preemption - */ -trap_from_kernel: - movl %esp, %eax /* saved state addr */ - CCALL1(kernel_trap, %eax) /* to kernel trap routine */ +LEXT(thread_exception_return) cli + xorl %ecx,%ecx /* don't check if in the PFZ */ + cmpl $0, %gs:CPU_IS64BIT + je EXT(return_from_trap32) + jmp EXT(return_from_trap) - movl %gs:CPU_PENDING_AST,%eax /* get pending asts */ - testl $ AST_URGENT,%eax /* any urgent preemption? */ - je ret_to_kernel /* no, nothing to do */ - cmpl $ T_PREEMPT,R_TRAPNO(%esp) - je ret_to_kernel /* T_PREEMPT handled in kernel_trap() */ - testl $ EFL_IF,R_EFLAGS(%esp) /* interrupts disabled? */ - je ret_to_kernel - cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */ - jne ret_to_kernel - movl %gs:CPU_KERNEL_STACK,%eax - movl %esp,%ecx - xorl %eax,%ecx - andl $(-KERNEL_STACK_SIZE),%ecx - testl %ecx,%ecx /* are we on the kernel stack? */ - jne ret_to_kernel /* no, skip it */ - - CCALL1(i386_astintr, $1) /* take the AST */ - -ret_to_kernel: - cmpl $0, %gs:CPU_IS64BIT - je EXT(lo_ret_to_kernel) - jmp EXT(lo64_ret_to_kernel) - - - -/******************************************************************************************************* - * - * All interrupts on all tasks enter here with: - * esp-> -> x86_saved_state_t - * - * cr3 -> kernel directory - * esp -> low based stack - * gs -> CPU_DATA_GS - * cs -> KERNEL_CS - * ss/ds/es -> KERNEL_DS - * - * interrupts disabled - * direction flag cleared - */ -Entry(lo_allintrs) - /* - * test whether already on interrupt stack - */ - movl %gs:CPU_INT_STACK_TOP,%ecx - cmpl %esp,%ecx - jb 1f - leal -INTSTACK_SIZE(%ecx),%edx - cmpl %esp,%edx - jb int_from_intstack -1: - xchgl %ecx,%esp /* switch to interrupt stack */ - - movl %cr0,%eax /* get cr0 */ - orl $(CR0_TS),%eax /* or in TS bit */ - movl %eax,%cr0 /* set cr0 */ - - subl $8, %esp /* for 16-byte stack alignment */ - pushl %ecx /* save pointer to old stack */ - movl %ecx,%gs:CPU_INT_STATE /* save intr state */ - - TIME_INT_ENTRY /* do timing */ - - incl %gs:CPU_PREEMPTION_LEVEL - incl %gs:CPU_INTERRUPT_LEVEL - - movl %gs:CPU_INT_STATE, %eax - CCALL1(PE_incoming_interrupt, %eax) /* call generic interrupt routine */ - - cli /* just in case we returned with intrs enabled */ - xorl %eax,%eax - movl %eax,%gs:CPU_INT_STATE /* clear intr state pointer */ - - .globl EXT(return_to_iret) -LEXT(return_to_iret) /* (label for kdb_kintr and hardclock) */ - - decl %gs:CPU_INTERRUPT_LEVEL - decl %gs:CPU_PREEMPTION_LEVEL - - TIME_INT_EXIT /* do timing */ - - movl %gs:CPU_ACTIVE_THREAD,%eax - movl ACT_PCB(%eax),%eax /* get act`s PCB */ - movl PCB_FPS(%eax),%eax /* get pcb's ims.ifps */ - cmpl $0,%eax /* Is there a context */ - je 1f /* Branch if not */ - movl FP_VALID(%eax),%eax /* Load fp_valid */ - cmpl $0,%eax /* Check if valid */ - jne 1f /* Branch if valid */ - clts /* Clear TS */ - jmp 2f -1: - movl %cr0,%eax /* get cr0 */ - orl $(CR0_TS),%eax /* or in TS bit */ - movl %eax,%cr0 /* set cr0 */ -2: - popl %esp /* switch back to old stack */ - - /* Load interrupted code segment into %eax */ - movl R_CS(%esp),%eax /* assume 32-bit state */ - cmpl $(SS_64),SS_FLAVOR(%esp)/* 64-bit? */ - jne 3f - movl R64_CS(%esp),%eax /* 64-bit user mode */ -3: - testb $3,%eax /* user mode, */ - jnz ast_from_interrupt_user /* go handle potential ASTs */ - /* - * we only want to handle preemption requests if - * the interrupt fell in the kernel context - * and preemption isn't disabled - */ - movl %gs:CPU_PENDING_AST,%eax - testl $ AST_URGENT,%eax /* any urgent requests? */ - je ret_to_kernel /* no, nothing to do */ - - cmpl $0,%gs:CPU_PREEMPTION_LEVEL /* preemption disabled? */ - jne ret_to_kernel /* yes, skip it */ - - movl %gs:CPU_KERNEL_STACK,%eax - movl %esp,%ecx - xorl %eax,%ecx - andl $(-KERNEL_STACK_SIZE),%ecx - testl %ecx,%ecx /* are we on the kernel stack? */ - jne ret_to_kernel /* no, skip it */ - - /* - * Take an AST from kernel space. We don't need (and don't want) - * to do as much as the case where the interrupt came from user - * space. - */ - CCALL1(i386_astintr, $1) - - jmp ret_to_kernel - - -/* - * nested int - simple path, can't preempt etc on way out - */ -int_from_intstack: - incl %gs:CPU_PREEMPTION_LEVEL - incl %gs:CPU_INTERRUPT_LEVEL - - movl %esp, %edx /* i386_saved_state */ - CCALL1(PE_incoming_interrupt, %edx) - - decl %gs:CPU_INTERRUPT_LEVEL - decl %gs:CPU_PREEMPTION_LEVEL - - jmp ret_to_kernel - -/* - * Take an AST from an interrupted user - */ -ast_from_interrupt_user: - movl %gs:CPU_PENDING_AST,%eax - testl %eax,%eax /* pending ASTs? */ - je EXT(ret_to_user) /* no, nothing to do */ - - TIME_TRAP_UENTRY - - jmp EXT(return_from_trap) /* return */ - - -/******************************************************************************************************* - * - * 32bit Tasks - * System call entries via INTR_GATE or sysenter: - * - * esp -> i386_saved_state_t - * cr3 -> kernel directory - * esp -> low based stack - * gs -> CPU_DATA_GS - * cs -> KERNEL_CS - * ss/ds/es -> KERNEL_DS - * - * interrupts disabled - * direction flag cleared - */ - -Entry(lo_sysenter) - /* - * We can be here either for a mach syscall or a unix syscall, - * as indicated by the sign of the code: - */ - movl R_EAX(%esp),%eax - testl %eax,%eax - js EXT(lo_mach_scall) /* < 0 => mach */ - /* > 0 => unix */ - -Entry(lo_unix_scall) - TIME_TRAP_UENTRY - - movl %gs:CPU_KERNEL_STACK,%ebx - xchgl %ebx,%esp /* switch to kernel stack */ - - sti - movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ - movl ACT_TASK(%ecx),%ecx /* point to current task */ - addl $1,TASK_SYSCALLS_UNIX(%ecx) /* increment call count */ - - CCALL1(unix_syscall, %ebx) - /* - * always returns through thread_exception_return - */ - - -Entry(lo_mach_scall) - TIME_TRAP_UENTRY - - movl %gs:CPU_KERNEL_STACK,%ebx - xchgl %ebx,%esp /* switch to kernel stack */ - - sti - movl %gs:CPU_ACTIVE_THREAD,%ecx /* get current thread */ - movl ACT_TASK(%ecx),%ecx /* point to current task */ - addl $1,TASK_SYSCALLS_MACH(%ecx) /* increment call count */ - - CCALL1(mach_call_munger, %ebx) - /* - * always returns through thread_exception_return - */ - - -Entry(lo_mdep_scall) - TIME_TRAP_UENTRY - - movl %gs:CPU_KERNEL_STACK,%ebx - xchgl %ebx,%esp /* switch to kernel stack */ - - sti - - CCALL1(machdep_syscall, %ebx) - /* - * always returns through thread_exception_return - */ - - -Entry(lo_diag_scall) - TIME_TRAP_UENTRY - - movl %gs:CPU_KERNEL_STACK,%ebx // Get the address of the kernel stack - xchgl %ebx,%esp // Switch to it, saving the previous - - CCALL1(diagCall, %ebx) // Call diagnostics - cli // Disable interruptions just in case they were enabled - popl %esp // Get back the original stack - - cmpl $0,%eax // What kind of return is this? - jne EXT(return_to_user) // Normal return, do not check asts... - - CCALL3(i386_exception, $EXC_SYSCALL, $0x6000, $1) - // pass what would be the diag syscall - // error return - cause an exception - /* no return */ - - - -/******************************************************************************************************* - * - * 64bit Tasks - * System call entries via syscall only: - * - * esp -> x86_saved_state64_t - * cr3 -> kernel directory - * esp -> low based stack - * gs -> CPU_DATA_GS - * cs -> KERNEL_CS - * ss/ds/es -> KERNEL_DS - * - * interrupts disabled - * direction flag cleared - */ - -Entry(lo_syscall) - /* - * We can be here either for a mach, unix machdep or diag syscall, - * as indicated by the syscall class: - */ - movl R64_RAX(%esp), %eax /* syscall number/class */ - movl %eax, %ebx - andl $(SYSCALL_CLASS_MASK), %ebx /* syscall class */ - cmpl $(SYSCALL_CLASS_MACH<(address, type, len, persistence) - */ -ENTRY(dr0) - movl S_ARG0, %eax - movl %eax,EXT(dr_addr) - movl %eax, %db0 - movl $0, %ecx - jmp 0f -ENTRY(dr1) - movl S_ARG0, %eax - movl %eax,EXT(dr_addr)+1*4 - movl %eax, %db1 - movl $2, %ecx - jmp 0f -ENTRY(dr2) - movl S_ARG0, %eax - movl %eax,EXT(dr_addr)+2*4 - movl %eax, %db2 - movl $4, %ecx - jmp 0f - -ENTRY(dr3) - movl S_ARG0, %eax - movl %eax,EXT(dr_addr)+3*4 - movl %eax, %db3 - movl $6, %ecx - -0: - pushl %ebp - movl %esp, %ebp - - movl %db7, %edx - movl %edx,EXT(dr_addr)+4*4 - andl dr_msk(,%ecx,2),%edx /* clear out new entry */ - movl %edx,EXT(dr_addr)+5*4 - movzbl B_ARG3, %eax - andb $3, %al - shll %cl, %eax - orl %eax, %edx - - movzbl B_ARG1, %eax - andb $3, %al - addb $0x10, %cl - shll %cl, %eax - orl %eax, %edx - - movzbl B_ARG2, %eax - andb $3, %al - addb $0x2, %cl - shll %cl, %eax - orl %eax, %edx - - movl %edx, %db7 - movl %edx,EXT(dr_addr)+7*4 - movl %edx, %eax - leave - ret - - .data -dr_msk: - .long ~0x000f0003 - .long ~0x00f0000c - .long ~0x0f000030 - .long ~0xf00000c0 -ENTRY(dr_addr) - .long 0,0,0,0 - .long 0,0,0,0 - - .text - -ENTRY(get_cr0) - movl %cr0, %eax - ret - -ENTRY(set_cr0) - movl 4(%esp), %eax - movl %eax, %cr0 - ret - -#ifndef SYMMETRY - /* * ffs(mask) */ @@ -1470,8 +414,6 @@ Entry(cpu_shutdown) div %ecx,%eax /* reboot now */ ret /* this will "never" be executed */ -#endif /* SYMMETRY */ - /* * setbit(int bitno, int *s) - set bit in bit string @@ -1520,120 +462,3 @@ ENTRY(testbit) sbbl %eax,%eax ret -ENTRY(get_pc) - movl 4(%ebp),%eax - ret - -ENTRY(minsecurity) - pushl %ebp - movl %esp,%ebp -/* - * jail: set the EIP to "jail" to block a kernel thread. - * Useful to debug synchronization problems on MPs. - */ -ENTRY(jail) - jmp EXT(jail) - -/* - * unsigned int - * div_scale(unsigned int dividend, - * unsigned int divisor, - * unsigned int *scale) - * - * This function returns (dividend << *scale) //divisor where *scale - * is the largest possible value before overflow. This is used in - * computation where precision must be achieved in order to avoid - * floating point usage. - * - * Algorithm: - * *scale = 0; - * while (((dividend >> *scale) >= divisor)) - * (*scale)++; - * *scale = 32 - *scale; - * return ((dividend << *scale) / divisor); - */ -ENTRY(div_scale) - PUSH_FRAME - xorl %ecx, %ecx /* *scale = 0 */ - xorl %eax, %eax - movl ARG0, %edx /* get dividend */ -0: - cmpl ARG1, %edx /* if (divisor > dividend) */ - jle 1f /* goto 1f */ - addl $1, %ecx /* (*scale)++ */ - shrdl $1, %edx, %eax /* dividend >> 1 */ - shrl $1, %edx /* dividend >> 1 */ - jmp 0b /* goto 0b */ -1: - divl ARG1 /* (dividend << (32 - *scale)) / divisor */ - movl ARG2, %edx /* get scale */ - movl $32, (%edx) /* *scale = 32 */ - subl %ecx, (%edx) /* *scale -= %ecx */ - POP_FRAME - ret - -/* - * unsigned int - * mul_scale(unsigned int multiplicand, - * unsigned int multiplier, - * unsigned int *scale) - * - * This function returns ((multiplicand * multiplier) >> *scale) where - * scale is the largest possible value before overflow. This is used in - * computation where precision must be achieved in order to avoid - * floating point usage. - * - * Algorithm: - * *scale = 0; - * while (overflow((multiplicand * multiplier) >> *scale)) - * (*scale)++; - * return ((multiplicand * multiplier) >> *scale); - */ -ENTRY(mul_scale) - PUSH_FRAME - xorl %ecx, %ecx /* *scale = 0 */ - movl ARG0, %eax /* get multiplicand */ - mull ARG1 /* multiplicand * multiplier */ -0: - cmpl $0, %edx /* if (!overflow()) */ - je 1f /* goto 1 */ - addl $1, %ecx /* (*scale)++ */ - shrdl $1, %edx, %eax /* (multiplicand * multiplier) >> 1 */ - shrl $1, %edx /* (multiplicand * multiplier) >> 1 */ - jmp 0b -1: - movl ARG2, %edx /* get scale */ - movl %ecx, (%edx) /* set *scale */ - POP_FRAME - ret - - - -/* - * Double-fault exception handler task. The last gasp... - */ -Entry(df_task_start) - CCALL1(panic_double_fault, $(T_DOUBLE_FAULT)) - hlt - - -/* - * machine-check handler task. The last gasp... - */ -Entry(mc_task_start) - CCALL1(panic_machine_check, $(T_MACHINE_CHECK)) - hlt - -/* - * Compatibility mode's last gasp... - */ -Entry(lo_df64) - movl %esp, %eax - CCALL1(panic_double_fault64, %eax) - hlt - -Entry(lo_mc64) - movl %esp, %eax - CCALL1(panic_machine_check64, %eax) - hlt -