X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/21362eb3e66fd2c787aee132bce100a44d71a99c..4d15aeb193b2c68f1d38666c317f8d3734f5f083:/osfmk/i386/i386_lock.s diff --git a/osfmk/i386/i386_lock.s b/osfmk/i386/i386_lock.s index 4c3fb99e1..f54e040a1 100644 --- a/osfmk/i386/i386_lock.s +++ b/osfmk/i386/i386_lock.s @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -36,49 +36,18 @@ */ #include -#include #include #include - +#include +#include +#include +#include + #include "assym.s" #define PAUSE rep; nop -/* - * When performance isn't the only concern, it's - * nice to build stack frames... - */ -#define BUILD_STACK_FRAMES (GPROF || \ - ((MACH_LDEBUG || ETAP_LOCK_TRACE) && MACH_KDB)) - -#if BUILD_STACK_FRAMES - -/* STack-frame-relative: */ -#define L_PC B_PC -#define L_ARG0 B_ARG0 -#define L_ARG1 B_ARG1 - -#define LEAF_ENTRY(name) \ - Entry(name); \ - FRAME; \ - MCOUNT - -#define LEAF_ENTRY2(n1,n2) \ - Entry(n1); \ - Entry(n2); \ - FRAME; \ - MCOUNT - -#define LEAF_RET \ - EMARF; \ - ret - -#else /* BUILD_STACK_FRAMES */ - -/* Stack-pointer-relative: */ -#define L_PC S_PC -#define L_ARG0 S_ARG0 -#define L_ARG1 S_ARG1 +#include #define LEAF_ENTRY(name) \ Entry(name) @@ -90,51 +59,46 @@ #define LEAF_RET \ ret -#endif /* BUILD_STACK_FRAMES */ - - /* Non-leaf routines always have a stack frame: */ #define NONLEAF_ENTRY(name) \ Entry(name); \ - FRAME; \ - MCOUNT + FRAME #define NONLEAF_ENTRY2(n1,n2) \ Entry(n1); \ Entry(n2); \ - FRAME; \ - MCOUNT + FRAME #define NONLEAF_RET \ EMARF; \ ret -#define M_ILK (%edx) -#define M_LOCKED MUTEX_LOCKED(%edx) -#define M_WAITERS MUTEX_WAITERS(%edx) -#define M_PROMOTED_PRI MUTEX_PROMOTED_PRI(%edx) -#define M_ITAG MUTEX_ITAG(%edx) -#define M_PTR MUTEX_PTR(%edx) -#if MACH_LDEBUG -#define M_TYPE MUTEX_TYPE(%edx) -#define M_PC MUTEX_PC(%edx) -#define M_THREAD MUTEX_THREAD(%edx) -#endif /* MACH_LDEBUG */ +/* For x86_64, the varargs ABI requires that %al indicate + * how many SSE register contain arguments. In our case, 0 */ +#define ALIGN_STACK() and $0xFFFFFFFFFFFFFFF0, %rsp ; +#define LOAD_STRING_ARG0(label) leaq label(%rip), %rdi ; +#define LOAD_ARG1(x) mov x, %esi ; +#define LOAD_PTR_ARG1(x) mov x, %rsi ; +#define CALL_PANIC() xorb %al,%al ; call EXT(panic) ; -#include -#define CX(addr,reg) addr(,reg,4) +#define CHECK_UNLOCK(current, owner) \ + cmp current, owner ; \ + je 1f ; \ + ALIGN_STACK() ; \ + LOAD_STRING_ARG0(2f) ; \ + CALL_PANIC() ; \ + hlt ; \ + .data ; \ +2: String "Mutex unlock attempted from non-owner thread"; \ + .text ; \ +1: #if MACH_LDEBUG /* * Routines for general lock debugging. */ -#define S_TYPE SLOCK_TYPE(%edx) -#define S_PC SLOCK_PC(%edx) -#define S_THREAD SLOCK_THREAD(%edx) -#define S_DURATIONH SLOCK_DURATIONH(%edx) -#define S_DURATIONL SLOCK_DURATIONL(%edx) /* * Checks for expected lock types and calls "panic" on @@ -144,25 +108,15 @@ #define CHECK_MUTEX_TYPE() \ cmpl $ MUTEX_TAG,M_TYPE ; \ je 1f ; \ - pushl $2f ; \ - call EXT(panic) ; \ + ALIGN_STACK() ; \ + LOAD_STRING_ARG0(2f) ; \ + CALL_PANIC() ; \ hlt ; \ .data ; \ 2: String "not a mutex!" ; \ .text ; \ 1: -#define CHECK_SIMPLE_LOCK_TYPE() \ - cmpl $ USLOCK_TAG,S_TYPE ; \ - je 1f ; \ - pushl $2f ; \ - call EXT(panic) ; \ - hlt ; \ - .data ; \ -2: String "not a simple lock!" ; \ - .text ; \ -1: - /* * If one or more simplelocks are currently held by a thread, * an attempt to acquire a mutex will cause this check to fail @@ -171,714 +125,1435 @@ */ #if MACH_RT #define CHECK_PREEMPTION_LEVEL() \ + cmpl $0,%gs:CPU_HIBERNATE ; \ + jne 1f ; \ cmpl $0,%gs:CPU_PREEMPTION_LEVEL ; \ je 1f ; \ - pushl $2f ; \ - call EXT(panic) ; \ + ALIGN_STACK() ; \ + movl %gs:CPU_PREEMPTION_LEVEL, %eax ; \ + LOAD_ARG1(%eax) ; \ + LOAD_STRING_ARG0(2f) ; \ + CALL_PANIC() ; \ hlt ; \ .data ; \ -2: String "preemption_level != 0!" ; \ +2: String "preemption_level(%d) != 0!" ; \ .text ; \ 1: #else /* MACH_RT */ #define CHECK_PREEMPTION_LEVEL() #endif /* MACH_RT */ -#define CHECK_NO_SIMPLELOCKS() \ - cmpl $0,%gs:CPU_SIMPLE_LOCK_COUNT ; \ - je 1f ; \ - pushl $2f ; \ - call EXT(panic) ; \ - hlt ; \ - .data ; \ -2: String "simple_locks_held!" ; \ - .text ; \ -1: - -/* - * Verifies return to the correct thread in "unlock" situations. - */ -#define CHECK_THREAD(thd) \ - movl %gs:CPU_ACTIVE_THREAD,%ecx ; \ - testl %ecx,%ecx ; \ - je 1f ; \ - cmpl %ecx,thd ; \ - je 1f ; \ - pushl $2f ; \ - call EXT(panic) ; \ - hlt ; \ - .data ; \ -2: String "wrong thread!" ; \ - .text ; \ -1: - -#define CHECK_MYLOCK(thd) \ - movl %gs:CPU_ACTIVE_THREAD,%ecx ; \ - testl %ecx,%ecx ; \ - je 1f ; \ - cmpl %ecx,thd ; \ +#define CHECK_MYLOCK(current, owner) \ + cmp current, owner ; \ jne 1f ; \ - pushl $2f ; \ - call EXT(panic) ; \ + ALIGN_STACK() ; \ + LOAD_STRING_ARG0(2f) ; \ + CALL_PANIC() ; \ hlt ; \ .data ; \ -2: String "mylock attempt!" ; \ +2: String "Attempt to recursively lock a non-recursive lock"; \ .text ; \ 1: -#define METER_SIMPLE_LOCK_LOCK(reg) \ - pushl reg ; \ - call EXT(meter_simple_lock) ; \ - popl reg - -#define METER_SIMPLE_LOCK_UNLOCK(reg) \ - pushl reg ; \ - call EXT(meter_simple_unlock) ; \ - popl reg - #else /* MACH_LDEBUG */ #define CHECK_MUTEX_TYPE() -#define CHECK_SIMPLE_LOCK_TYPE -#define CHECK_THREAD(thd) #define CHECK_PREEMPTION_LEVEL() -#define CHECK_NO_SIMPLELOCKS() #define CHECK_MYLOCK(thd) -#define METER_SIMPLE_LOCK_LOCK(reg) -#define METER_SIMPLE_LOCK_UNLOCK(reg) #endif /* MACH_LDEBUG */ +#define PREEMPTION_DISABLE \ + incl %gs:CPU_PREEMPTION_LEVEL + +#define PREEMPTION_LEVEL_DEBUG 1 +#if PREEMPTION_LEVEL_DEBUG +#define PREEMPTION_ENABLE \ + decl %gs:CPU_PREEMPTION_LEVEL ; \ + js 17f ; \ + jnz 19f ; \ + testl $AST_URGENT,%gs:CPU_PENDING_AST ; \ + jz 19f ; \ + PUSHF ; \ + testl $EFL_IF, S_PC ; \ + jz 18f ; \ + POPF ; \ + int $(T_PREEMPT) ; \ + jmp 19f ; \ +17: \ + call _preemption_underflow_panic ; \ +18: \ + POPF ; \ +19: +#else +#define PREEMPTION_ENABLE \ + decl %gs:CPU_PREEMPTION_LEVEL ; \ + jnz 19f ; \ + testl $AST_URGENT,%gs:CPU_PENDING_AST ; \ + jz 19f ; \ + PUSHF ; \ + testl $EFL_IF, S_PC ; \ + jz 18f ; \ + POPF ; \ + int $(T_PREEMPT) ; \ + jmp 19f ; \ +18: \ + POPF ; \ +19: +#endif + + +#if CONFIG_DTRACE + + .globl _lockstat_probe + .globl _lockstat_probemap + +/* + * LOCKSTAT_LABEL creates a dtrace symbol which contains + * a pointer into the lock code function body. At that + * point is a "ret" instruction that can be patched into + * a "nop" + */ + +#define LOCKSTAT_LABEL(lab) \ + .data ;\ + .globl lab ;\ + lab: ;\ + .quad 9f ;\ + .text ;\ + 9: + +#define LOCKSTAT_RECORD(id, lck) \ + push %rbp ; \ + mov %rsp,%rbp ; \ + movl _lockstat_probemap + (id * 4)(%rip),%eax ; \ + test %eax,%eax ; \ + je 9f ; \ + mov lck, %rsi ; \ + mov %rax, %rdi ; \ + mov $0, %rdx ; \ + mov $0, %rcx ; \ + mov $0, %r8 ; \ + mov $0, %r9 ; \ + call *_lockstat_probe(%rip) ; \ +9: leave + /* ret - left to subsequent code, e.g. return values */ + +#endif /* CONFIG_DTRACE */ /* - * void hw_lock_init(hw_lock_t) + * For most routines, the hw_lock_t pointer is loaded into a + * register initially, and then either a byte or register-sized + * word is loaded/stored to the pointer + */ + +/* + * void hw_lock_byte_init(volatile uint8_t *) * - * Initialize a hardware lock. + * Initialize a hardware byte lock. */ -LEAF_ENTRY(hw_lock_init) - movl L_ARG0,%edx /* fetch lock pointer */ - movl $0,0(%edx) /* clear the lock */ +LEAF_ENTRY(hw_lock_byte_init) + movb $0, (%rdi) /* clear the lock */ LEAF_RET /* - * void hw_lock_lock(hw_lock_t) + * void hw_lock_byte_lock(uint8_t *lock_byte) * - * Acquire lock, spinning until it becomes available. + * Acquire byte sized lock operand, spinning until it becomes available. * MACH_RT: also return with preemption disabled. */ -LEAF_ENTRY(hw_lock_lock) - movl L_ARG0,%edx /* fetch lock pointer */ - movl L_PC,%ecx -1: DISABLE_PREEMPTION - movl 0(%edx), %eax - testl %eax,%eax /* lock locked? */ +LEAF_ENTRY(hw_lock_byte_lock) + PREEMPTION_DISABLE + movl $1, %ecx /* Set lock value */ +1: + movb (%rdi), %al /* Load byte at address */ + testb %al,%al /* lock locked? */ jne 3f /* branch if so */ - lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */ + lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */ jne 3f - movl $1,%eax /* In case this was a timeout call */ LEAF_RET /* if yes, then nothing left to do */ - -3: ENABLE_PREEMPTION /* no reason we can't be preemptable */ +3: PAUSE /* pause for hyper-threading */ jmp 1b /* try again */ /* - * unsigned int hw_lock_to(hw_lock_t, unsigned int) + * void hw_lock_byte_unlock(uint8_t *lock_byte) * - * Acquire lock, spinning until it becomes available or timeout. - * MACH_RT: also return with preemption disabled. + * Unconditionally release byte sized lock operand. + * MACH_RT: release preemption level. */ -LEAF_ENTRY(hw_lock_to) + +LEAF_ENTRY(hw_lock_byte_unlock) + movb $0, (%rdi) /* Clear the lock byte */ + PREEMPTION_ENABLE + LEAF_RET + +/* + * Reader-writer lock fastpaths. These currently exist for the + * shared lock acquire, the exclusive lock acquire, the shared to + * exclusive upgrade and the release paths (where they reduce overhead + * considerably) -- these are by far the most frequently used routines + * + * The following should reflect the layout of the bitfield embedded within + * the lck_rw_t structure (see i386/locks.h). + */ +#define LCK_RW_INTERLOCK (0x1 << 16) + +#define LCK_RW_PRIV_EXCL (0x1 << 24) +#define LCK_RW_WANT_UPGRADE (0x2 << 24) +#define LCK_RW_WANT_WRITE (0x4 << 24) +#define LCK_R_WAITING (0x8 << 24) +#define LCK_W_WAITING (0x10 << 24) + +#define LCK_RW_SHARED_MASK (0xffff) + +/* + * For most routines, the lck_rw_t pointer is loaded into a + * register initially, and the flags bitfield loaded into another + * register and examined + */ + +#define RW_LOCK_SHARED_MASK (LCK_RW_INTERLOCK | LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) +/* + * void lck_rw_lock_shared(lck_rw_t *) + * + */ +Entry(lck_rw_lock_shared) + mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ + incl TH_RWLOCK_COUNT(%rcx) /* Increment count before atomic CAS */ 1: - movl L_ARG0,%edx /* fetch lock pointer */ - movl L_PC,%ecx + mov (%rdi), %eax /* Load state bitfield and interlock */ + testl $(RW_LOCK_SHARED_MASK), %eax /* Eligible for fastpath? */ + jne 3f + + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + incl %ecx /* Increment reader refcount */ + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 2f + +#if CONFIG_DTRACE /* - * Attempt to grab the lock immediately - * - fastpath without timeout nonsense. + * Dtrace lockstat event: LS_LCK_RW_LOCK_SHARED_ACQUIRE + * Implemented by swapping between return and no-op instructions. + * See bsd/dev/dtrace/lockstat.c. */ - DISABLE_PREEMPTION - movl 0(%edx), %eax - testl %eax,%eax /* lock locked? */ - jne 2f /* branch if so */ - lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */ - jne 2f /* branch on failure */ - movl $1,%eax - LEAF_RET - + LOCKSTAT_LABEL(_lck_rw_lock_shared_lockstat_patch_point) + ret + /* + Fall thru when patched, counting on lock pointer in %rdi + */ + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, %rdi) +#endif + ret 2: -#define INNER_LOOP_COUNT 1000 + PAUSE + jmp 1b +3: + jmp EXT(lck_rw_lock_shared_gen) + + + +#define RW_TRY_LOCK_SHARED_MASK (LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) +/* + * void lck_rw_try_lock_shared(lck_rw_t *) + * + */ +Entry(lck_rw_try_lock_shared) +1: + mov (%rdi), %eax /* Load state bitfield and interlock */ + testl $(LCK_RW_INTERLOCK), %eax + jne 2f + testl $(RW_TRY_LOCK_SHARED_MASK), %eax + jne 3f /* lock is busy */ + + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + incl %ecx /* Increment reader refcount */ + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 2f + + mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ + incl TH_RWLOCK_COUNT(%rcx) /* Increment count on success. */ + /* There is a 3 instr window where preemption may not notice rwlock_count after cmpxchg */ + +#if CONFIG_DTRACE + movl $1, %eax /* - * Failed to get the lock so set the timeout - * and then spin re-checking the lock but pausing - * every so many (INNER_LOOP_COUNT) spins to check for timeout. + * Dtrace lockstat event: LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE + * Implemented by swapping between return and no-op instructions. + * See bsd/dev/dtrace/lockstat.c. */ - movl L_ARG1,%ecx /* fetch timeout */ - push %edi - push %ebx - mov %edx,%edi - - rdtsc /* read cyclecount into %edx:%eax */ - addl %ecx,%eax /* fetch and timeout */ - adcl $0,%edx /* add carry */ - mov %edx,%ecx - mov %eax,%ebx /* %ecx:%ebx is the timeout expiry */ + LOCKSTAT_LABEL(_lck_rw_try_lock_shared_lockstat_patch_point) + ret + /* Fall thru when patched, counting on lock pointer in %rdi */ + LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, %rdi) +#endif + movl $1, %eax /* return TRUE */ + ret +2: + PAUSE + jmp 1b 3: - ENABLE_PREEMPTION /* no reason not to be preempted now */ + xorl %eax, %eax + ret + + +#define RW_LOCK_EXCLUSIVE_HELD (LCK_RW_WANT_WRITE | LCK_RW_WANT_UPGRADE) +/* + * int lck_rw_grab_shared(lck_rw_t *) + * + */ +Entry(lck_rw_grab_shared) +1: + mov (%rdi), %eax /* Load state bitfield and interlock */ + testl $(LCK_RW_INTERLOCK), %eax + jne 5f + testl $(RW_LOCK_EXCLUSIVE_HELD), %eax + jne 3f +2: + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + incl %ecx /* Increment reader refcount */ + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 4f + + movl $1, %eax /* return success */ + ret +3: + testl $(LCK_RW_SHARED_MASK), %eax + je 4f + testl $(LCK_RW_PRIV_EXCL), %eax + je 2b 4: - /* - * The inner-loop spin to look for the lock being freed. - */ - mov $(INNER_LOOP_COUNT),%edx + xorl %eax, %eax /* return failure */ + ret 5: - PAUSE /* pause for hyper-threading */ - movl 0(%edi),%eax /* spin checking lock value in cache */ - testl %eax,%eax - je 6f /* zero => unlocked, try to grab it */ - decl %edx /* decrement inner loop count */ - jnz 5b /* time to check for timeout? */ + PAUSE + jmp 1b + + +#define RW_LOCK_EXCLUSIVE_MASK (LCK_RW_SHARED_MASK | LCK_RW_INTERLOCK | \ + LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) +/* + * void lck_rw_lock_exclusive(lck_rw_t*) + * + */ +Entry(lck_rw_lock_exclusive) + mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ + incl TH_RWLOCK_COUNT(%rcx) /* Increment count before atomic CAS */ +1: + mov (%rdi), %eax /* Load state bitfield, interlock and shared count */ + testl $(RW_LOCK_EXCLUSIVE_MASK), %eax /* Eligible for fastpath? */ + jne 3f /* no, go slow */ + + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + orl $(LCK_RW_WANT_WRITE), %ecx + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 2f + +#if CONFIG_DTRACE /* - * Here after spinning INNER_LOOP_COUNT times, check for timeout + * Dtrace lockstat event: LS_LCK_RW_LOCK_EXCL_ACQUIRE + * Implemented by swapping between return and no-op instructions. + * See bsd/dev/dtrace/lockstat.c. */ - rdtsc /* cyclecount into %edx:%eax */ - cmpl %ecx,%edx /* compare high-order 32-bits */ - jb 4b /* continue spinning if less, or */ - cmpl %ebx,%eax /* compare low-order 32-bits */ - jb 5b /* continue if less, else bail */ - xor %eax,%eax /* with 0 return value */ - pop %ebx - pop %edi - LEAF_RET + LOCKSTAT_LABEL(_lck_rw_lock_exclusive_lockstat_patch_point) + ret + /* Fall thru when patched, counting on lock pointer in %rdi */ + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, %rdi) +#endif + ret +2: + PAUSE + jmp 1b +3: + jmp EXT(lck_rw_lock_exclusive_gen) -6: + + +#define RW_TRY_LOCK_EXCLUSIVE_MASK (LCK_RW_SHARED_MASK | LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE) +/* + * void lck_rw_try_lock_exclusive(lck_rw_t *) + * + * Tries to get a write lock. + * + * Returns FALSE if the lock is not held on return. + */ +Entry(lck_rw_try_lock_exclusive) +1: + mov (%rdi), %eax /* Load state bitfield, interlock and shared count */ + testl $(LCK_RW_INTERLOCK), %eax + jne 2f + testl $(RW_TRY_LOCK_EXCLUSIVE_MASK), %eax + jne 3f /* can't get it */ + + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + orl $(LCK_RW_WANT_WRITE), %ecx + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 2f + + mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */ + incl TH_RWLOCK_COUNT(%rcx) /* Increment count on success. */ + /* There is a 3 instr window where preemption may not notice rwlock_count after cmpxchg */ + +#if CONFIG_DTRACE + movl $1, %eax /* - * Here to try to grab the lock that now appears to be free - * after contention. + * Dtrace lockstat event: LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE + * Implemented by swapping between return and no-op instructions. + * See bsd/dev/dtrace/lockstat.c. */ - movl 8+L_PC,%edx /* calling pc (8+ for pushed regs) */ - DISABLE_PREEMPTION - lock; cmpxchgl %edx,0(%edi) /* try to acquire the HW lock */ - jne 3b /* no - spin again */ - movl $1,%eax /* yes */ - pop %ebx - pop %edi - LEAF_RET + LOCKSTAT_LABEL(_lck_rw_try_lock_exclusive_lockstat_patch_point) + ret + /* Fall thru when patched, counting on lock pointer in %rdi */ + LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, %rdi) +#endif + movl $1, %eax /* return TRUE */ + ret +2: + PAUSE + jmp 1b +3: + xorl %eax, %eax /* return FALSE */ + ret + + /* - * void hw_lock_unlock(hw_lock_t) + * void lck_rw_lock_shared_to_exclusive(lck_rw_t*) * - * Unconditionally release lock. - * MACH_RT: release preemption level. + * fastpath can be taken if + * the current rw_shared_count == 1 + * AND the interlock is clear + * AND RW_WANT_UPGRADE is not set + * + * note that RW_WANT_WRITE could be set, but will not + * be indicative of an exclusive hold since we have + * a read count on the lock that we have not yet released + * we can blow by that state since the lck_rw_lock_exclusive + * function will block until rw_shared_count == 0 and + * RW_WANT_UPGRADE is clear... it does this check behind + * the interlock which we are also checking for + * + * to make the transition we must be able to atomically + * set RW_WANT_UPGRADE and get rid of the read count we hold */ -LEAF_ENTRY(hw_lock_unlock) - movl L_ARG0,%edx /* fetch lock pointer */ - movl $0,0(%edx) /* clear the lock */ - ENABLE_PREEMPTION - LEAF_RET +Entry(lck_rw_lock_shared_to_exclusive) +1: + mov (%rdi), %eax /* Load state bitfield, interlock and shared count */ + testl $(LCK_RW_INTERLOCK), %eax + jne 7f + testl $(LCK_RW_WANT_UPGRADE), %eax + jne 2f + + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + orl $(LCK_RW_WANT_UPGRADE), %ecx /* ask for WANT_UPGRADE */ + decl %ecx /* and shed our read count */ + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 7f + /* we now own the WANT_UPGRADE */ + testl $(LCK_RW_SHARED_MASK), %ecx /* check to see if all of the readers are drained */ + jne 8f /* if not, we need to go wait */ + +#if CONFIG_DTRACE + movl $1, %eax + /* + * Dtrace lockstat event: LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE + * Implemented by swapping between return and no-op instructions. + * See bsd/dev/dtrace/lockstat.c. + */ + LOCKSTAT_LABEL(_lck_rw_lock_shared_to_exclusive_lockstat_patch_point) + ret + /* Fall thru when patched, counting on lock pointer in %rdi */ + LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, %rdi) +#endif + movl $1, %eax /* return success */ + ret + +2: /* someone else already holds WANT_UPGRADE */ + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + decl %ecx /* shed our read count */ + testl $(LCK_RW_SHARED_MASK), %ecx + jne 3f /* we were the last reader */ + andl $(~LCK_W_WAITING), %ecx /* so clear the wait indicator */ +3: + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 7f + + mov %eax, %esi /* put old flags as second arg */ + /* lock is alread in %rdi */ + call EXT(lck_rw_lock_shared_to_exclusive_failure) + ret /* and pass the failure return along */ +7: + PAUSE + jmp 1b +8: + jmp EXT(lck_rw_lock_shared_to_exclusive_success) + + + .cstring +rwl_release_error_str: + .asciz "Releasing non-exclusive RW lock without a reader refcount!" + .text + /* - * unsigned int hw_lock_try(hw_lock_t) - * MACH_RT: returns with preemption disabled on success. + * lck_rw_type_t lck_rw_done(lck_rw_t *) + * */ -LEAF_ENTRY(hw_lock_try) - movl L_ARG0,%edx /* fetch lock pointer */ - - movl L_PC,%ecx - DISABLE_PREEMPTION - movl 0(%edx),%eax - testl %eax,%eax - jne 1f - lock; cmpxchgl %ecx,0(%edx) /* try to acquire the HW lock */ - jne 1f - - movl $1,%eax /* success */ - LEAF_RET +Entry(lck_rw_done) +1: + mov (%rdi), %eax /* Load state bitfield, interlock and reader count */ + testl $(LCK_RW_INTERLOCK), %eax + jne 7f /* wait for interlock to clear */ + + movl %eax, %ecx /* keep original value in %eax for cmpxchgl */ + testl $(LCK_RW_SHARED_MASK), %ecx /* if reader count == 0, must be exclusive lock */ + je 2f + decl %ecx /* Decrement reader count */ + testl $(LCK_RW_SHARED_MASK), %ecx /* if reader count has now gone to 0, check for waiters */ + je 4f + jmp 6f +2: + testl $(LCK_RW_WANT_UPGRADE), %ecx + je 3f + andl $(~LCK_RW_WANT_UPGRADE), %ecx + jmp 4f +3: + testl $(LCK_RW_WANT_WRITE), %ecx + je 8f /* lock is not 'owned', go panic */ + andl $(~LCK_RW_WANT_WRITE), %ecx +4: + /* + * test the original values to match what + * lck_rw_done_gen is going to do to determine + * which wakeups need to happen... + * + * if !(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) + */ + testl $(LCK_W_WAITING), %eax + je 5f + andl $(~LCK_W_WAITING), %ecx + + testl $(LCK_RW_PRIV_EXCL), %eax + jne 6f +5: + andl $(~LCK_R_WAITING), %ecx +6: + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 7f -1: ENABLE_PREEMPTION /* failure: release preemption... */ - xorl %eax,%eax /* ...and return failure */ - LEAF_RET + mov %eax,%esi /* old flags in %rsi */ + /* lock is in %rdi already */ + call EXT(lck_rw_done_gen) + ret +7: + PAUSE + jmp 1b +8: + ALIGN_STACK() + LOAD_STRING_ARG0(rwl_release_error_str) + CALL_PANIC() + + /* - * unsigned int hw_lock_held(hw_lock_t) - * MACH_RT: doesn't change preemption state. - * N.B. Racy, of course. + * lck_rw_type_t lck_rw_lock_exclusive_to_shared(lck_rw_t *) + * */ -LEAF_ENTRY(hw_lock_held) - movl L_ARG0,%edx /* fetch lock pointer */ +Entry(lck_rw_lock_exclusive_to_shared) +1: + mov (%rdi), %eax /* Load state bitfield, interlock and reader count */ + testl $(LCK_RW_INTERLOCK), %eax + jne 6f /* wait for interlock to clear */ + + movl %eax, %ecx /* keep original value in %eax for cmpxchgl */ + incl %ecx /* Increment reader count */ + + testl $(LCK_RW_WANT_UPGRADE), %ecx + je 2f + andl $(~LCK_RW_WANT_UPGRADE), %ecx + jmp 3f +2: + andl $(~LCK_RW_WANT_WRITE), %ecx +3: + /* + * test the original values to match what + * lck_rw_lock_exclusive_to_shared_gen is going to do to determine + * which wakeups need to happen... + * + * if !(fake_lck->lck_rw_priv_excl && fake_lck->lck_w_waiting) + */ + testl $(LCK_W_WAITING), %eax + je 4f + testl $(LCK_RW_PRIV_EXCL), %eax + jne 5f +4: + andl $(~LCK_R_WAITING), %ecx +5: + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 6f - movl 0(%edx),%eax /* check lock value */ - testl %eax,%eax - movl $1,%ecx - cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */ - LEAF_RET + mov %eax,%esi + call EXT(lck_rw_lock_exclusive_to_shared_gen) + ret +6: + PAUSE + jmp 1b -LEAF_ENTRY(mutex_init) - movl L_ARG0,%edx /* fetch lock pointer */ - xorl %eax,%eax - movl %eax,M_ILK /* clear interlock */ - movl %eax,M_LOCKED /* clear locked flag */ - movw %ax,M_WAITERS /* init waiter count */ - movw %ax,M_PROMOTED_PRI -#if MACH_LDEBUG - movl $ MUTEX_TAG,M_TYPE /* set lock type */ - movl %eax,M_PC /* init caller pc */ - movl %eax,M_THREAD /* and owning thread */ -#endif - LEAF_RET +/* + * int lck_rw_grab_want(lck_rw_t *) + * + */ +Entry(lck_rw_grab_want) +1: + mov (%rdi), %eax /* Load state bitfield, interlock and reader count */ + testl $(LCK_RW_INTERLOCK), %eax + jne 3f /* wait for interlock to clear */ + testl $(LCK_RW_WANT_WRITE), %eax /* want_write has been grabbed by someone else */ + jne 2f /* go return failure */ + + movl %eax, %ecx /* original value in %eax for cmpxchgl */ + orl $(LCK_RW_WANT_WRITE), %ecx + lock + cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */ + jne 2f + /* we now own want_write */ + movl $1, %eax /* return success */ + ret +2: + xorl %eax, %eax /* return failure */ + ret +3: + PAUSE + jmp 1b -NONLEAF_ENTRY2(mutex_lock,_mutex_lock) + +#define RW_LOCK_SHARED_OR_UPGRADE_MASK (LCK_RW_SHARED_MASK | LCK_RW_INTERLOCK | LCK_RW_WANT_UPGRADE) +/* + * int lck_rw_held_read_or_upgrade(lck_rw_t *) + * + */ +Entry(lck_rw_held_read_or_upgrade) + mov (%rdi), %eax + andl $(RW_LOCK_SHARED_OR_UPGRADE_MASK), %eax + ret - movl B_ARG0,%edx /* fetch lock pointer */ - CHECK_MUTEX_TYPE() - CHECK_NO_SIMPLELOCKS() - CHECK_PREEMPTION_LEVEL() + +/* + * N.B.: On x86, statistics are currently recorded for all indirect mutexes. + * Also, only the acquire attempt count (GRP_MTX_STAT_UTIL) is maintained + * as a 64-bit quantity (this matches the existing PowerPC implementation, + * and the new x86 specific statistics are also maintained as 32-bit + * quantities). + * + * + * Enable this preprocessor define to record the first miss alone + * By default, we count every miss, hence multiple misses may be + * recorded for a single lock acquire attempt via lck_mtx_lock + */ +#undef LOG_FIRST_MISS_ALONE - pushf /* save interrupt state */ - cli /* disable interrupts */ +/* + * This preprocessor define controls whether the R-M-W update of the + * per-group statistics elements are atomic (LOCK-prefixed) + * Enabled by default. + */ +#define ATOMIC_STAT_UPDATES 1 -ml_retry: - movl B_PC,%ecx +#if defined(ATOMIC_STAT_UPDATES) +#define LOCK_IF_ATOMIC_STAT_UPDATES lock +#else +#define LOCK_IF_ATOMIC_STAT_UPDATES +#endif /* ATOMIC_STAT_UPDATES */ -ml_get_hw: - movl M_ILK,%eax /* read interlock */ - testl %eax,%eax /* unlocked? */ - je 1f /* yes - attempt to lock it */ - PAUSE /* no - pause */ - jmp ml_get_hw /* try again */ -1: - lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ - jne ml_get_hw /* branch on failure to retry */ - movl M_LOCKED,%ecx /* get lock owner */ - testl %ecx,%ecx /* is the mutex locked? */ - jne ml_fail /* yes, we lose */ - movl %gs:CPU_ACTIVE_THREAD,%ecx - movl %ecx,M_LOCKED +/* + * For most routines, the lck_mtx_t pointer is loaded into a + * register initially, and the owner field checked for indirection. + * Eventually the lock owner is loaded into a register and examined. + */ -#if MACH_LDEBUG - movl %ecx,M_THREAD - movl B_PC,%ecx - movl %ecx,M_PC +#define M_OWNER MUTEX_OWNER +#define M_PTR MUTEX_PTR +#define M_STATE MUTEX_STATE + + +#define LMTX_ENTER_EXTENDED \ + mov M_PTR(%rdx), %rdx ; \ + xor %r11, %r11 ; \ + mov MUTEX_GRP(%rdx), %r10 ; \ + LOCK_IF_ATOMIC_STAT_UPDATES ; \ + incq GRP_MTX_STAT_UTIL(%r10) + + +#if LOG_FIRST_MISS_ALONE +#define LMTX_UPDATE_MISS \ + test $1, %r11 ; \ + jnz 11f ; \ + LOCK_IF_ATOMIC_STAT_UPDATES ; \ + incl GRP_MTX_STAT_MISS(%r10) ; \ + or $1, %r11 ; \ +11: +#else +#define LMTX_UPDATE_MISS \ + LOCK_IF_ATOMIC_STAT_UPDATES ; \ + incl GRP_MTX_STAT_MISS(%r10) #endif + - pushl %edx /* save mutex address */ - pushl %edx - call EXT(lck_mtx_lock_acquire) - addl $4,%esp - popl %edx /* restore mutex address */ +#if LOG_FIRST_MISS_ALONE +#define LMTX_UPDATE_WAIT \ + test $2, %r11 ; \ + jnz 11f ; \ + LOCK_IF_ATOMIC_STAT_UPDATES ; \ + incl GRP_MTX_STAT_WAIT(%r10) ; \ + or $2, %r11 ; \ +11: +#else +#define LMTX_UPDATE_WAIT \ + LOCK_IF_ATOMIC_STAT_UPDATES ; \ + incl GRP_MTX_STAT_WAIT(%r10) +#endif - xorl %eax,%eax - movl %eax,M_ILK - popf /* restore interrupt state */ +/* + * Record the "direct wait" statistic, which indicates if a + * miss proceeded to block directly without spinning--occurs + * if the owner of the mutex isn't running on another processor + * at the time of the check. + */ +#define LMTX_UPDATE_DIRECT_WAIT \ + LOCK_IF_ATOMIC_STAT_UPDATES ; \ + incl GRP_MTX_STAT_DIRECT_WAIT(%r10) - NONLEAF_RET + +#define LMTX_CALLEXT1(func_name) \ + cmp %rdx, %rdi ; \ + je 12f ; \ + push %r10 ; \ + push %r11 ; \ +12: push %rdi ; \ + push %rdx ; \ + mov %rdx, %rdi ; \ + call EXT(func_name) ; \ + pop %rdx ; \ + pop %rdi ; \ + cmp %rdx, %rdi ; \ + je 12f ; \ + pop %r11 ; \ + pop %r10 ; \ +12: + +#define LMTX_CALLEXT2(func_name, reg) \ + cmp %rdx, %rdi ; \ + je 12f ; \ + push %r10 ; \ + push %r11 ; \ +12: push %rdi ; \ + push %rdx ; \ + mov reg, %rsi ; \ + mov %rdx, %rdi ; \ + call EXT(func_name) ; \ + pop %rdx ; \ + pop %rdi ; \ + cmp %rdx, %rdi ; \ + je 12f ; \ + pop %r11 ; \ + pop %r10 ; \ +12: + + +#define M_WAITERS_MSK 0x0000ffff +#define M_PRIORITY_MSK 0x00ff0000 +#define M_ILOCKED_MSK 0x01000000 +#define M_MLOCKED_MSK 0x02000000 +#define M_PROMOTED_MSK 0x04000000 +#define M_SPIN_MSK 0x08000000 -ml_fail: -ml_block: - CHECK_MYLOCK(M_THREAD) - pushl M_LOCKED - pushl %edx /* push mutex address */ - call EXT(lck_mtx_lock_wait) /* wait for the lock */ - addl $8,%esp - movl B_ARG0,%edx /* refetch mutex address */ - jmp ml_retry /* and try again */ +/* + * void lck_mtx_assert(lck_mtx_t* l, unsigned int) + * Takes the address of a lock, and an assertion type as parameters. + * The assertion can take one of two forms determine by the type + * parameter: either the lock is held by the current thread, and the + * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is + * LCK_MTX_ASSERT_NOTOWNED. Calls panic on assertion failure. + * + */ -NONLEAF_ENTRY2(mutex_try,_mutex_try) +NONLEAF_ENTRY(lck_mtx_assert) + mov %rdi, %rdx /* Load lock address */ + mov %gs:CPU_ACTIVE_THREAD, %rax /* Load current thread */ + + mov M_STATE(%rdx), %ecx + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ + jne 0f + mov M_PTR(%rdx), %rdx /* If so, take indirection */ +0: + mov M_OWNER(%rdx), %rcx /* Load owner */ + cmp $(MUTEX_ASSERT_OWNED), %rsi + jne 2f /* Assert ownership? */ + cmp %rax, %rcx /* Current thread match? */ + jne 3f /* no, go panic */ + testl $(M_ILOCKED_MSK | M_MLOCKED_MSK), M_STATE(%rdx) + je 3f +1: /* yes, we own it */ + NONLEAF_RET +2: + cmp %rax, %rcx /* Current thread match? */ + jne 1b /* No, return */ + ALIGN_STACK() + LOAD_PTR_ARG1(%rdx) + LOAD_STRING_ARG0(mutex_assert_owned_str) + jmp 4f +3: + ALIGN_STACK() + LOAD_PTR_ARG1(%rdx) + LOAD_STRING_ARG0(mutex_assert_not_owned_str) +4: + CALL_PANIC() - movl B_ARG0,%edx /* fetch lock pointer */ - CHECK_MUTEX_TYPE() - CHECK_NO_SIMPLELOCKS() +lck_mtx_destroyed: + ALIGN_STACK() + LOAD_PTR_ARG1(%rdx) + LOAD_STRING_ARG0(mutex_interlock_destroyed_str) + CALL_PANIC() + - movl B_PC,%ecx +.data +mutex_assert_not_owned_str: + .asciz "mutex (%p) not owned\n" +mutex_assert_owned_str: + .asciz "mutex (%p) owned\n" +mutex_interlock_destroyed_str: + .asciz "trying to interlock destroyed mutex (%p)" +.text - pushf /* save interrupt state */ - cli /* disable interrupts */ -mt_get_hw: - movl M_ILK,%eax /* read interlock */ - testl %eax,%eax /* unlocked? */ - je 1f /* yes - attempt to lock it */ - PAUSE /* no - pause */ - jmp mt_get_hw /* try again */ -1: - lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ - jne mt_get_hw /* branch on failure to retry */ - movl M_LOCKED,%ecx /* get lock owner */ - testl %ecx,%ecx /* is the mutex locked? */ - jne mt_fail /* yes, we lose */ - movl %gs:CPU_ACTIVE_THREAD,%ecx - movl %ecx,M_LOCKED +/* + * lck_mtx_lock() + * lck_mtx_try_lock() + * lck_mtx_unlock() + * lck_mtx_lock_spin() + * lck_mtx_lock_spin_always() + * lck_mtx_try_lock_spin() + * lck_mtx_try_lock_spin_always() + * lck_mtx_convert_spin() + */ +NONLEAF_ENTRY(lck_mtx_lock_spin_always) + mov %rdi, %rdx /* fetch lock pointer */ + jmp Llmls_avoid_check + +NONLEAF_ENTRY(lck_mtx_lock_spin) + mov %rdi, %rdx /* fetch lock pointer */ + CHECK_PREEMPTION_LEVEL() +Llmls_avoid_check: + mov M_STATE(%rdx), %ecx + test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ + jnz Llmls_slow +Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */ + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK | M_SPIN_MSK), %ecx + + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne Llmls_busy_disabled + + mov %gs:CPU_ACTIVE_THREAD, %rax + mov %rax, M_OWNER(%rdx) /* record owner of interlock */ #if MACH_LDEBUG - movl %ecx,M_THREAD - movl B_PC,%ecx - movl %ecx,M_PC + test %rax, %rax + jz 1f + incl TH_MUTEX_COUNT(%rax) /* lock statistic */ +1: +#endif /* MACH_LDEBUG */ + + /* return with the interlock held and preemption disabled */ + leave +#if CONFIG_DTRACE + LOCKSTAT_LABEL(_lck_mtx_lock_spin_lockstat_patch_point) + ret + /* inherit lock pointer in %rdx above */ + LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, %rdx) #endif + ret - pushl %edx /* save mutex address */ - pushl %edx - call EXT(lck_mtx_lock_acquire) - addl $4,%esp - popl %edx /* restore mutex address */ +Llmls_slow: + test $M_ILOCKED_MSK, %ecx /* is the interlock held */ + jz Llml_contended /* no, must have been the mutex */ - xorl %eax,%eax - movl %eax,M_ILK + cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ + je lck_mtx_destroyed + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex */ + jne Llmls_loop /* no... must be interlocked */ - popf /* restore interrupt state */ + LMTX_ENTER_EXTENDED - movl $1,%eax + mov M_STATE(%rdx), %ecx + test $(M_SPIN_MSK), %ecx + jz Llmls_loop1 - NONLEAF_RET + LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ +Llmls_loop: + PAUSE + mov M_STATE(%rdx), %ecx +Llmls_loop1: + test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx + jz Llmls_try + test $(M_MLOCKED_MSK), %ecx + jnz Llml_contended /* mutex owned by someone else, go contend for it */ + jmp Llmls_loop -mt_fail: - xorl %eax,%eax - movl %eax,M_ILK +Llmls_busy_disabled: + PREEMPTION_ENABLE + jmp Llmls_loop - popf /* restore interrupt state */ - xorl %eax,%eax + +NONLEAF_ENTRY(lck_mtx_lock) + mov %rdi, %rdx /* fetch lock pointer */ - NONLEAF_RET + CHECK_PREEMPTION_LEVEL() -NONLEAF_ENTRY(mutex_unlock) - movl B_ARG0,%edx /* fetch lock pointer */ + mov M_STATE(%rdx), %ecx + test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ + jnz Llml_slow +Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */ + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx - CHECK_MUTEX_TYPE() - CHECK_THREAD(M_THREAD) + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne Llml_busy_disabled - movl B_PC,%ecx + mov %gs:CPU_ACTIVE_THREAD, %rax + mov %rax, M_OWNER(%rdx) /* record owner of mutex */ +#if MACH_LDEBUG + test %rax, %rax + jz 1f + incl TH_MUTEX_COUNT(%rax) /* lock statistic */ +1: +#endif /* MACH_LDEBUG */ - pushf /* save interrupt state */ - cli /* disable interrupts */ + testl $(M_WAITERS_MSK), M_STATE(%rdx) + jz Llml_finish -mu_get_hw: - movl M_ILK,%eax /* read interlock */ - testl %eax,%eax /* unlocked? */ - je 1f /* yes - attempt to lock it */ - PAUSE /* no - pause */ - jmp mu_get_hw /* try again */ -1: - lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ - jne mu_get_hw /* branch on failure to retry */ + LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) - cmpw $0,M_WAITERS /* are there any waiters? */ - jne mu_wakeup /* yes, more work to do */ +Llml_finish: + andl $(~M_ILOCKED_MSK), M_STATE(%rdx) + PREEMPTION_ENABLE + + cmp %rdx, %rdi /* is this an extended mutex */ + jne 2f -mu_doit: + leave +#if CONFIG_DTRACE + LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point) + ret + /* inherit lock pointer in %rdx above */ + LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %rdx) +#endif + ret +2: + leave +#if CONFIG_DTRACE + LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point) + ret + /* inherit lock pointer in %rdx above */ + LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %rdx) +#endif + ret + +Llml_slow: + test $M_ILOCKED_MSK, %ecx /* is the interlock held */ + jz Llml_contended /* no, must have been the mutex */ + + cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ + je lck_mtx_destroyed + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ + jne Llml_loop /* no... must be interlocked */ + + LMTX_ENTER_EXTENDED + + mov M_STATE(%rdx), %ecx + test $(M_SPIN_MSK), %ecx + jz Llml_loop1 + + LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */ +Llml_loop: + PAUSE + mov M_STATE(%rdx), %ecx +Llml_loop1: + test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx + jz Llml_try + test $(M_MLOCKED_MSK), %ecx + jnz Llml_contended /* mutex owned by someone else, go contend for it */ + jmp Llml_loop + +Llml_busy_disabled: + PREEMPTION_ENABLE + jmp Llml_loop + + +Llml_contended: + cmp %rdx, %rdi /* is this an extended mutex */ + je 0f + LMTX_UPDATE_MISS +0: + LMTX_CALLEXT1(lck_mtx_lock_spinwait_x86) + + test %rax, %rax + jz Llml_acquired /* acquired mutex, interlock held and preemption disabled */ + + cmp $1, %rax /* check for direct wait status */ + je 2f + cmp %rdx, %rdi /* is this an extended mutex */ + je 2f + LMTX_UPDATE_DIRECT_WAIT +2: + mov M_STATE(%rdx), %ecx + test $(M_ILOCKED_MSK), %ecx + jnz 6f + + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK), %ecx /* try to take the interlock */ + + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne 5f + + test $(M_MLOCKED_MSK), %ecx /* we've got the interlock and */ + jnz 3f + or $(M_MLOCKED_MSK), %ecx /* the mutex is free... grab it directly */ + mov %ecx, M_STATE(%rdx) + + mov %gs:CPU_ACTIVE_THREAD, %rax + mov %rax, M_OWNER(%rdx) /* record owner of mutex */ #if MACH_LDEBUG - movl $0,M_THREAD /* disown thread */ -#endif + test %rax, %rax + jz 1f + incl TH_MUTEX_COUNT(%rax) /* lock statistic */ +1: +#endif /* MACH_LDEBUG */ - xorl %ecx,%ecx - movl %ecx,M_LOCKED /* unlock the mutex */ +Llml_acquired: + testl $(M_WAITERS_MSK), M_STATE(%rdx) + jnz 1f + mov M_OWNER(%rdx), %rax + mov TH_WAS_PROMOTED_ON_WAKEUP(%rax), %eax + test %eax, %eax + jz Llml_finish +1: + LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) + jmp Llml_finish + +3: /* interlock held, mutex busy */ + cmp %rdx, %rdi /* is this an extended mutex */ + je 4f + LMTX_UPDATE_WAIT +4: + LMTX_CALLEXT1(lck_mtx_lock_wait_x86) + jmp Llml_contended +5: + PREEMPTION_ENABLE +6: + PAUSE + jmp 2b + - movl %ecx,M_ILK +NONLEAF_ENTRY(lck_mtx_try_lock_spin_always) + mov %rdi, %rdx /* fetch lock pointer */ + jmp Llmts_avoid_check - popf /* restore interrupt state */ +NONLEAF_ENTRY(lck_mtx_try_lock_spin) + mov %rdi, %rdx /* fetch lock pointer */ - NONLEAF_RET +Llmts_avoid_check: + mov M_STATE(%rdx), %ecx + test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ + jnz Llmts_slow +Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */ + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK | M_SPIN_MSK), %rcx -mu_wakeup: - pushl M_LOCKED - pushl %edx /* push mutex address */ - call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */ - addl $8,%esp - movl B_ARG0,%edx /* restore lock pointer */ - jmp mu_doit + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne Llmts_busy_disabled -/* - * lck_mtx_lock() - * lck_mtx_try_lock() - * lck_mutex_unlock() - * - * These are variants of mutex_lock(), mutex_try() and mutex_unlock() without - * DEBUG checks (which require fields not present in lck_mtx_t's). - */ -NONLEAF_ENTRY(lck_mtx_lock) + mov %gs:CPU_ACTIVE_THREAD, %rax + mov %rax, M_OWNER(%rdx) /* record owner of mutex */ +#if MACH_LDEBUG + test %rax, %rax + jz 1f + incl TH_MUTEX_COUNT(%rax) /* lock statistic */ +1: +#endif /* MACH_LDEBUG */ - movl B_ARG0,%edx /* fetch lock pointer */ - cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */ - cmove M_PTR,%edx /* yes - take indirection */ + leave - CHECK_NO_SIMPLELOCKS() - CHECK_PREEMPTION_LEVEL() +#if CONFIG_DTRACE + mov $1, %rax /* return success */ + LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point) + ret + /* inherit lock pointer in %rdx above */ + LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %rdx) +#endif + mov $1, %rax /* return success */ + ret - pushf /* save interrupt state */ - cli /* disable interrupts */ +Llmts_slow: + test $(M_ILOCKED_MSK), %ecx /* is the interlock held */ + jz Llmts_fail /* no, must be held as a mutex */ + + cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ + je lck_mtx_destroyed + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ + jne Llmts_loop1 + + LMTX_ENTER_EXTENDED +Llmts_loop: + PAUSE + mov M_STATE(%rdx), %ecx +Llmts_loop1: + test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx + jnz Llmts_fail + test $(M_ILOCKED_MSK), %ecx + jz Llmts_try + jmp Llmts_loop + +Llmts_busy_disabled: + PREEMPTION_ENABLE + jmp Llmts_loop -lml_retry: - movl B_PC,%ecx -lml_get_hw: - movl M_ILK,%eax /* read interlock */ - testl %eax,%eax /* unlocked? */ - je 1f /* yes - attempt to lock it */ - PAUSE /* no - pause */ - jmp lml_get_hw /* try again */ -1: - lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ - jne lml_get_hw /* branch on failure to retry */ + +NONLEAF_ENTRY(lck_mtx_try_lock) + mov %rdi, %rdx /* fetch lock pointer */ + + mov M_STATE(%rdx), %ecx + test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */ + jnz Llmt_slow +Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */ + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx + + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne Llmt_busy_disabled - movl M_LOCKED,%ecx /* get lock owner */ - testl %ecx,%ecx /* is the mutex locked? */ - jne lml_fail /* yes, we lose */ - movl %gs:CPU_ACTIVE_THREAD,%ecx - movl %ecx,M_LOCKED + mov %gs:CPU_ACTIVE_THREAD, %rax + mov %rax, M_OWNER(%rdx) /* record owner of mutex */ +#if MACH_LDEBUG + test %rax, %rax + jz 1f + incl TH_MUTEX_COUNT(%rax) /* lock statistic */ +1: +#endif /* MACH_LDEBUG */ - pushl %edx /* save mutex address */ - pushl %edx - call EXT(lck_mtx_lock_acquire) - addl $4,%esp - popl %edx /* restore mutex address */ + test $(M_WAITERS_MSK), %ecx + jz 0f - xorl %eax,%eax - movl %eax,M_ILK + LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) +0: + andl $(~M_ILOCKED_MSK), M_STATE(%rdx) + PREEMPTION_ENABLE - popf /* restore interrupt state */ + leave +#if CONFIG_DTRACE + mov $1, %rax /* return success */ + /* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */ + LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point) + ret + /* inherit lock pointer in %rdx from above */ + LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %rdx) +#endif + mov $1, %rax /* return success */ + ret +Llmt_slow: + test $(M_ILOCKED_MSK), %ecx /* is the interlock held */ + jz Llmt_fail /* no, must be held as a mutex */ + + cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */ + je lck_mtx_destroyed + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ + jne Llmt_loop + + LMTX_ENTER_EXTENDED +Llmt_loop: + PAUSE + mov M_STATE(%rdx), %ecx +Llmt_loop1: + test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx + jnz Llmt_fail + test $(M_ILOCKED_MSK), %ecx + jz Llmt_try + jmp Llmt_loop + +Llmt_busy_disabled: + PREEMPTION_ENABLE + jmp Llmt_loop + + +Llmt_fail: +Llmts_fail: + cmp %rdx, %rdi /* is this an extended mutex */ + je 0f + LMTX_UPDATE_MISS +0: + xor %rax, %rax NONLEAF_RET -lml_fail: - CHECK_MYLOCK(M_THREAD) - pushl %edx /* save mutex address */ - pushl M_LOCKED - pushl %edx /* push mutex address */ - call EXT(lck_mtx_lock_wait) /* wait for the lock */ - addl $8,%esp - popl %edx /* restore mutex address */ - jmp lml_retry /* and try again */ -NONLEAF_ENTRY(lck_mtx_try_lock) - - movl B_ARG0,%edx /* fetch lock pointer */ - cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */ - cmove M_PTR,%edx /* yes - take indirection */ - CHECK_NO_SIMPLELOCKS() - CHECK_PREEMPTION_LEVEL() +NONLEAF_ENTRY(lck_mtx_convert_spin) + mov %rdi, %rdx /* fetch lock pointer */ - movl B_PC,%ecx + mov M_STATE(%rdx), %ecx + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ + jne 0f + mov M_PTR(%rdx), %rdx /* If so, take indirection */ + mov M_STATE(%rdx), %ecx +0: + test $(M_MLOCKED_MSK), %ecx /* already owned as a mutex, just return */ + jnz 2f + test $(M_WAITERS_MSK), %ecx /* are there any waiters? */ + jz 1f - pushf /* save interrupt state */ - cli /* disable interrupts */ + LMTX_CALLEXT1(lck_mtx_lock_acquire_x86) + mov M_STATE(%rdx), %ecx +1: + and $(~(M_ILOCKED_MSK | M_SPIN_MSK)), %ecx /* convert from spin version to mutex */ + or $(M_MLOCKED_MSK), %ecx + mov %ecx, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */ -lmt_get_hw: - movl M_ILK,%eax /* read interlock */ - testl %eax,%eax /* unlocked? */ - je 1f /* yes - attempt to lock it */ - PAUSE /* no - pause */ - jmp lmt_get_hw /* try again */ -1: - lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ - jne lmt_get_hw /* branch on failure to retry */ + PREEMPTION_ENABLE +2: + NONLEAF_RET - movl M_LOCKED,%ecx /* get lock owner */ - testl %ecx,%ecx /* is the mutex locked? */ - jne lmt_fail /* yes, we lose */ - movl %gs:CPU_ACTIVE_THREAD,%ecx - movl %ecx,M_LOCKED + - pushl %edx /* save mutex address */ - pushl %edx - call EXT(lck_mtx_lock_acquire) - addl $4,%esp - popl %edx /* restore mutex address */ +NONLEAF_ENTRY(lck_mtx_unlock) + mov %rdi, %rdx /* fetch lock pointer */ +Llmu_entry: + mov M_STATE(%rdx), %ecx +Llmu_prim: + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ + je Llmu_ext + +Llmu_chktype: + test $(M_MLOCKED_MSK), %ecx /* check for full mutex */ + jz Llmu_unlock +Llmu_mutex: + test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */ + jnz Llmu_busy + + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + and $(~M_MLOCKED_MSK), %ecx /* drop mutex */ + or $(M_ILOCKED_MSK), %ecx /* pick up interlock */ + + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne Llmu_busy_disabled /* branch on failure to spin loop */ - xorl %eax,%eax - movl %eax,M_ILK +Llmu_unlock: + xor %rax, %rax + mov %rax, M_OWNER(%rdx) + mov %rcx, %rax /* keep original state in %ecx for later evaluation */ + and $(~(M_ILOCKED_MSK | M_SPIN_MSK | M_PROMOTED_MSK)), %rax - popf /* restore interrupt state */ + test $(M_WAITERS_MSK), %eax + jz 2f + dec %eax /* decrement waiter count */ +2: + mov %eax, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */ - movl $1,%eax /* return success */ - NONLEAF_RET +#if MACH_LDEBUG + /* perform lock statistics after drop to prevent delay */ + mov %gs:CPU_ACTIVE_THREAD, %rax + test %rax, %rax + jz 1f + decl TH_MUTEX_COUNT(%rax) /* lock statistic */ +1: +#endif /* MACH_LDEBUG */ -lmt_fail: - xorl %eax,%eax - movl %eax,M_ILK + test $(M_PROMOTED_MSK | M_WAITERS_MSK), %ecx + jz 3f - popf /* restore interrupt state */ + LMTX_CALLEXT2(lck_mtx_unlock_wakeup_x86, %rcx) +3: + PREEMPTION_ENABLE - xorl %eax,%eax /* return failure */ - NONLEAF_RET + cmp %rdx, %rdi + jne 4f -NONLEAF_ENTRY(lck_mtx_unlock) + leave +#if CONFIG_DTRACE + /* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */ + LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point) + ret + /* inherit lock pointer in %rdx from above */ + LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %rdx) +#endif + ret +4: + leave +#if CONFIG_DTRACE + /* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */ + LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point) + ret + /* inherit lock pointer in %rdx from above */ + LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %rdx) +#endif + ret - movl B_ARG0,%edx /* fetch lock pointer */ - cmpl $(MUTEX_IND),M_ITAG /* is this indirect? */ - cmove M_PTR,%edx /* yes - take indirection */ - movl B_PC,%ecx +Llmu_busy_disabled: + PREEMPTION_ENABLE +Llmu_busy: + PAUSE + mov M_STATE(%rdx), %ecx + jmp Llmu_mutex - pushf /* save interrupt state */ - cli /* disable interrupts */ +Llmu_ext: + mov M_PTR(%rdx), %rdx + mov M_OWNER(%rdx), %rax + mov %gs:CPU_ACTIVE_THREAD, %rcx + CHECK_UNLOCK(%rcx, %rax) + mov M_STATE(%rdx), %ecx + jmp Llmu_chktype -lmu_get_hw: - movl M_ILK,%eax /* read interlock */ - testl %eax,%eax /* unlocked? */ - je 1f /* yes - attempt to lock it */ - PAUSE /* no - pause */ - jmp lmu_get_hw /* try again */ -1: - lock; cmpxchgl %ecx,M_ILK /* atomic compare and exchange */ - jne lmu_get_hw /* branch on failure to retry */ - cmpw $0,M_WAITERS /* are there any waiters? */ - jne lmu_wakeup /* yes, more work to do */ + +LEAF_ENTRY(lck_mtx_ilk_try_lock) + mov %rdi, %rdx /* fetch lock pointer - no indirection here */ -lmu_doit: - xorl %ecx,%ecx - movl %ecx,M_LOCKED /* unlock the mutex */ + mov M_STATE(%rdx), %ecx - movl %ecx,M_ILK + test $(M_ILOCKED_MSK), %ecx /* can't have the interlock yet */ + jnz 3f - popf /* restore interrupt state */ + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK), %ecx - NONLEAF_RET + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne 2f /* return failure after re-enabling preemption */ -lmu_wakeup: - pushl %edx /* save mutex address */ - pushl M_LOCKED - pushl %edx /* push mutex address */ - call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */ - addl $8,%esp - popl %edx /* restore mutex pointer */ - jmp lmu_doit + mov $1, %rax /* return success with preemption disabled */ + LEAF_RET +2: + PREEMPTION_ENABLE /* need to re-enable preemption */ +3: + xor %rax, %rax /* return failure */ + LEAF_RET + LEAF_ENTRY(lck_mtx_ilk_unlock) - movl L_ARG0,%edx /* no indirection here */ + mov %rdi, %rdx /* fetch lock pointer - no indirection here */ - xorl %eax,%eax - movl %eax,M_ILK + andl $(~M_ILOCKED_MSK), M_STATE(%rdx) - LEAF_RET - -LEAF_ENTRY(_disable_preemption) -#if MACH_RT - _DISABLE_PREEMPTION -#endif /* MACH_RT */ - LEAF_RET + PREEMPTION_ENABLE /* need to re-enable preemption */ -LEAF_ENTRY(_enable_preemption) -#if MACH_RT -#if MACH_ASSERT - cmpl $0,%gs:CPU_PREEMPTION_LEVEL - jg 1f - pushl %gs:CPU_PREEMPTION_LEVEL - pushl $2f - call EXT(panic) - hlt - .data -2: String "_enable_preemption: preemption_level(%d) < 0!" - .text -1: -#endif /* MACH_ASSERT */ - _ENABLE_PREEMPTION -#endif /* MACH_RT */ LEAF_RET -LEAF_ENTRY(_enable_preemption_no_check) -#if MACH_RT -#if MACH_ASSERT - cmpl $0,%gs:CPU_PREEMPTION_LEVEL - jg 1f - pushl $2f - call EXT(panic) - hlt - .data -2: String "_enable_preemption_no_check: preemption_level <= 0!" - .text -1: -#endif /* MACH_ASSERT */ - _ENABLE_PREEMPTION_NO_CHECK -#endif /* MACH_RT */ - LEAF_RET - -LEAF_ENTRY(_mp_disable_preemption) -#if MACH_RT - _DISABLE_PREEMPTION -#endif /* MACH_RT */ - LEAF_RET +LEAF_ENTRY(lck_mtx_lock_grab_mutex) + mov %rdi, %rdx /* fetch lock pointer - no indirection here */ -LEAF_ENTRY(_mp_enable_preemption) -#if MACH_RT -#if MACH_ASSERT - cmpl $0,%gs:CPU_PREEMPTION_LEVEL - jg 1f - pushl %gs:CPU_PREEMPTION_LEVEL - pushl $2f - call EXT(panic) - hlt - .data -2: String "_mp_enable_preemption: preemption_level (%d) <= 0!" - .text -1: -#endif /* MACH_ASSERT */ - _ENABLE_PREEMPTION -#endif /* MACH_RT */ - LEAF_RET + mov M_STATE(%rdx), %ecx -LEAF_ENTRY(_mp_enable_preemption_no_check) -#if MACH_RT -#if MACH_ASSERT - cmpl $0,%gs:CPU_PREEMPTION_LEVEL - jg 1f - pushl $2f - call EXT(panic) - hlt - .data -2: String "_mp_enable_preemption_no_check: preemption_level <= 0!" - .text + test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* can't have the mutex yet */ + jnz 3f + + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx + + PREEMPTION_DISABLE + lock + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne 2f /* branch on failure to spin loop */ + + mov %gs:CPU_ACTIVE_THREAD, %rax + mov %rax, M_OWNER(%rdx) /* record owner of mutex */ +#if MACH_LDEBUG + test %rax, %rax + jz 1f + incl TH_MUTEX_COUNT(%rax) /* lock statistic */ 1: -#endif /* MACH_ASSERT */ - _ENABLE_PREEMPTION_NO_CHECK -#endif /* MACH_RT */ +#endif /* MACH_LDEBUG */ + + mov $1, %rax /* return success */ LEAF_RET - - -LEAF_ENTRY(i_bit_set) - movl L_ARG0,%edx - movl L_ARG1,%eax - lock - bts %edx,(%eax) +2: + PREEMPTION_ENABLE +3: + xor %rax, %rax /* return failure */ LEAF_RET + -LEAF_ENTRY(i_bit_clear) - movl L_ARG0,%edx - movl L_ARG1,%eax - lock - btr %edx,(%eax) - LEAF_RET -LEAF_ENTRY(bit_lock) - movl L_ARG0,%ecx - movl L_ARG1,%eax +LEAF_ENTRY(lck_mtx_lock_mark_destroyed) + mov %rdi, %rdx 1: + mov M_STATE(%rdx), %ecx + cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */ + jne 2f + + movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */ + jmp 3f +2: + test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */ + jnz 5f + + PREEMPTION_DISABLE + mov %rcx, %rax /* eax contains snapshot for cmpxchgl */ + or $(M_ILOCKED_MSK), %ecx lock - bts %ecx,(%eax) - jb 1b - LEAF_RET + cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */ + jne 4f /* branch on failure to spin loop */ + movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */ + PREEMPTION_ENABLE +3: + LEAF_RET /* return with M_ILOCKED set */ +4: + PREEMPTION_ENABLE +5: + PAUSE + jmp 1b + +LEAF_ENTRY(preemption_underflow_panic) + FRAME + incl %gs:CPU_PREEMPTION_LEVEL + ALIGN_STACK() + LOAD_STRING_ARG0(16f) + CALL_PANIC() + hlt + .data +16: String "Preemption level underflow, possible cause unlocking an unlocked mutex or spinlock" + .text -LEAF_ENTRY(bit_lock_try) - movl L_ARG0,%ecx - movl L_ARG1,%eax - lock - bts %ecx,(%eax) - jb bit_lock_failed - LEAF_RET /* %eax better not be null ! */ -bit_lock_failed: - xorl %eax,%eax - LEAF_RET -LEAF_ENTRY(bit_unlock) - movl L_ARG0,%ecx - movl L_ARG1,%eax - lock - btr %ecx,(%eax) - LEAF_RET