*/
#include <mach_rt.h>
-#include <platforms.h>
#include <mach_ldebug.h>
#include <i386/asm.h>
#include <i386/eflags.h>
#include <i386/pal_lock_asm.h>
-/*
- * When performance isn't the only concern, it's
- * nice to build stack frames...
- */
-#define BUILD_STACK_FRAMES (GPROF)
-
-#if BUILD_STACK_FRAMES
-
-/* Stack-frame-relative: */
-#define L_PC B_PC
-#define L_ARG0 B_ARG0
-#define L_ARG1 B_ARG1
-
-#define LEAF_ENTRY(name) \
- Entry(name); \
- FRAME; \
- MCOUNT
-
-#define LEAF_ENTRY2(n1,n2) \
- Entry(n1); \
- Entry(n2); \
- FRAME; \
- MCOUNT
-
-#define LEAF_RET \
- EMARF; \
- ret
-
-#else /* BUILD_STACK_FRAMES */
-
-/* Stack-pointer-relative: */
-#define L_PC S_PC
-#define L_ARG0 S_ARG0
-#define L_ARG1 S_ARG1
-
#define LEAF_ENTRY(name) \
Entry(name)
#define LEAF_RET \
ret
-#endif /* BUILD_STACK_FRAMES */
-
-
/* Non-leaf routines always have a stack frame: */
#define NONLEAF_ENTRY(name) \
Entry(name); \
- FRAME; \
- MCOUNT
+ FRAME
#define NONLEAF_ENTRY2(n1,n2) \
Entry(n1); \
Entry(n2); \
- FRAME; \
- MCOUNT
+ FRAME
#define NONLEAF_RET \
EMARF; \
* word is loaded/stored to the pointer
*/
-#define HW_LOCK_REGISTER %rdi
-#define LOAD_HW_LOCK_REGISTER
-#define HW_LOCK_THREAD_REGISTER %rcx
-#define LOAD_HW_LOCK_THREAD_REGISTER mov %gs:CPU_ACTIVE_THREAD, HW_LOCK_THREAD_REGISTER
-#define HW_LOCK_MOV_WORD movq
-#define HW_LOCK_EXAM_REGISTER %rax
-
/*
* void hw_lock_init(hw_lock_t)
*
* Initialize a hardware lock.
*/
LEAF_ENTRY(hw_lock_init)
- LOAD_HW_LOCK_REGISTER /* fetch lock pointer */
- HW_LOCK_MOV_WORD $0, (HW_LOCK_REGISTER) /* clear the lock */
+ movq $0, (%rdi) /* clear the lock */
LEAF_RET
* Initialize a hardware byte lock.
*/
LEAF_ENTRY(hw_lock_byte_init)
- LOAD_HW_LOCK_REGISTER /* fetch lock pointer */
- movb $0, (HW_LOCK_REGISTER) /* clear the lock */
+ movb $0, (%rdi) /* clear the lock */
LEAF_RET
/*
* MACH_RT: also return with preemption disabled.
*/
LEAF_ENTRY(hw_lock_lock)
- LOAD_HW_LOCK_REGISTER /* fetch lock pointer */
- LOAD_HW_LOCK_THREAD_REGISTER /* get thread pointer */
+ mov %gs:CPU_ACTIVE_THREAD, %rcx /* get thread pointer */
PREEMPTION_DISABLE
1:
- mov (HW_LOCK_REGISTER), HW_LOCK_EXAM_REGISTER
- test HW_LOCK_EXAM_REGISTER,HW_LOCK_EXAM_REGISTER /* lock locked? */
+ mov (%rdi), %rax
+ test %rax,%rax /* lock locked? */
jne 3f /* branch if so */
- lock; cmpxchg HW_LOCK_THREAD_REGISTER,(HW_LOCK_REGISTER) /* try to acquire the HW lock */
+ lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */
jne 3f
movl $1,%eax /* In case this was a timeout call */
LEAF_RET /* if yes, then nothing left to do */
*/
LEAF_ENTRY(hw_lock_byte_lock)
- LOAD_HW_LOCK_REGISTER /* Load lock pointer */
PREEMPTION_DISABLE
movl $1, %ecx /* Set lock value */
1:
- movb (HW_LOCK_REGISTER), %al /* Load byte at address */
+ movb (%rdi), %al /* Load byte at address */
testb %al,%al /* lock locked? */
jne 3f /* branch if so */
- lock; cmpxchg %cl,(HW_LOCK_REGISTER) /* attempt atomic compare exchange */
+ lock; cmpxchg %cl,(%rdi) /* attempt atomic compare exchange */
jne 3f
LEAF_RET /* if yes, then nothing left to do */
3:
*/
LEAF_ENTRY(hw_lock_to)
1:
- LOAD_HW_LOCK_REGISTER /* fetch lock pointer */
- LOAD_HW_LOCK_THREAD_REGISTER
+ mov %gs:CPU_ACTIVE_THREAD, %rcx
/*
* Attempt to grab the lock immediately
*/
PREEMPTION_DISABLE
- mov (HW_LOCK_REGISTER), HW_LOCK_EXAM_REGISTER
- test HW_LOCK_EXAM_REGISTER,HW_LOCK_EXAM_REGISTER /* lock locked? */
+ mov (%rdi), %rax
+ test %rax,%rax /* lock locked? */
jne 2f /* branch if so */
- lock; cmpxchg HW_LOCK_THREAD_REGISTER,(HW_LOCK_REGISTER) /* try to acquire the HW lock */
+ lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */
jne 2f /* branch on failure */
movl $1,%eax
LEAF_RET
mov $(INNER_LOOP_COUNT),%r9
5:
PAUSE /* pause for hyper-threading */
- mov (HW_LOCK_REGISTER),HW_LOCK_EXAM_REGISTER /* spin checking lock value in cache */
- test HW_LOCK_EXAM_REGISTER,HW_LOCK_EXAM_REGISTER
+ mov (%rdi),%rax /* spin checking lock value in cache */
+ test %rax,%rax
je 6f /* zero => unlocked, try to grab it */
decq %r9 /* decrement inner loop count */
jnz 5b /* time to check for timeout? */
* Here to try to grab the lock that now appears to be free
* after contention.
*/
- LOAD_HW_LOCK_THREAD_REGISTER
- lock; cmpxchg HW_LOCK_THREAD_REGISTER,(HW_LOCK_REGISTER) /* try to acquire the HW lock */
+ mov %gs:CPU_ACTIVE_THREAD, %rcx
+ lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */
jne 4b /* no - spin again */
movl $1,%eax /* yes */
pop %r9
* MACH_RT: release preemption level.
*/
LEAF_ENTRY(hw_lock_unlock)
- LOAD_HW_LOCK_REGISTER /* fetch lock pointer */
- HW_LOCK_MOV_WORD $0, (HW_LOCK_REGISTER) /* clear the lock */
+ movq $0, (%rdi) /* clear the lock */
PREEMPTION_ENABLE
LEAF_RET
*/
LEAF_ENTRY(hw_lock_byte_unlock)
- LOAD_HW_LOCK_REGISTER /* Load lock pointer */
- movb $0, (HW_LOCK_REGISTER) /* Clear the lock byte */
+ movb $0, (%rdi) /* Clear the lock byte */
PREEMPTION_ENABLE
LEAF_RET
* MACH_RT: returns with preemption disabled on success.
*/
LEAF_ENTRY(hw_lock_try)
- LOAD_HW_LOCK_REGISTER /* fetch lock pointer */
- LOAD_HW_LOCK_THREAD_REGISTER
+ mov %gs:CPU_ACTIVE_THREAD, %rcx
PREEMPTION_DISABLE
- mov (HW_LOCK_REGISTER),HW_LOCK_EXAM_REGISTER
- test HW_LOCK_EXAM_REGISTER,HW_LOCK_EXAM_REGISTER
+ mov (%rdi),%rax
+ test %rax,%rax
jne 1f
- lock; cmpxchg HW_LOCK_THREAD_REGISTER,(HW_LOCK_REGISTER) /* try to acquire the HW lock */
+ lock; cmpxchg %rcx,(%rdi) /* try to acquire the HW lock */
jne 1f
movl $1,%eax /* success */
* N.B. Racy, of course.
*/
LEAF_ENTRY(hw_lock_held)
- LOAD_HW_LOCK_REGISTER /* fetch lock pointer */
- mov (HW_LOCK_REGISTER),HW_LOCK_EXAM_REGISTER /* check lock value */
- test HW_LOCK_EXAM_REGISTER,HW_LOCK_EXAM_REGISTER
+ mov (%rdi),%rax /* check lock value */
+ test %rax,%rax
movl $1,%ecx
cmovne %ecx,%eax /* 0 => unlocked, 1 => locked */
LEAF_RET
* register and examined
*/
-#define LCK_RW_REGISTER %rdi
-#define LOAD_LCK_RW_REGISTER
-#define LCK_RW_FLAGS_REGISTER %eax
-#define LOAD_LCK_RW_FLAGS_REGISTER mov (LCK_RW_REGISTER), LCK_RW_FLAGS_REGISTER
-
#define RW_LOCK_SHARED_MASK (LCK_RW_INTERLOCK | LCK_RW_WANT_UPGRADE | LCK_RW_WANT_WRITE)
/*
* void lck_rw_lock_shared(lck_rw_t *)
Entry(lck_rw_lock_shared)
mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */
incl TH_RWLOCK_COUNT(%rcx) /* Increment count before atomic CAS */
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield and interlock */
+ mov (%rdi), %eax /* Load state bitfield and interlock */
testl $(RW_LOCK_SHARED_MASK), %eax /* Eligible for fastpath? */
jne 3f
movl %eax, %ecx /* original value in %eax for cmpxchgl */
incl %ecx /* Increment reader refcount */
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 2f
#if CONFIG_DTRACE
LOCKSTAT_LABEL(_lck_rw_lock_shared_lockstat_patch_point)
ret
/*
- Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER
+ Fall thru when patched, counting on lock pointer in %rdi
*/
- LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, LCK_RW_REGISTER)
+ LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_ACQUIRE, %rdi)
#endif
ret
2:
*
*/
Entry(lck_rw_try_lock_shared)
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield and interlock */
+ mov (%rdi), %eax /* Load state bitfield and interlock */
testl $(LCK_RW_INTERLOCK), %eax
jne 2f
testl $(RW_TRY_LOCK_SHARED_MASK), %eax
movl %eax, %ecx /* original value in %eax for cmpxchgl */
incl %ecx /* Increment reader refcount */
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 2f
mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */
*/
LOCKSTAT_LABEL(_lck_rw_try_lock_shared_lockstat_patch_point)
ret
- /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER */
- LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, LCK_RW_REGISTER)
+ /* Fall thru when patched, counting on lock pointer in %rdi */
+ LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE, %rdi)
#endif
movl $1, %eax /* return TRUE */
ret
*
*/
Entry(lck_rw_grab_shared)
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield and interlock */
+ mov (%rdi), %eax /* Load state bitfield and interlock */
testl $(LCK_RW_INTERLOCK), %eax
jne 5f
testl $(RW_LOCK_EXCLUSIVE_HELD), %eax
jne 3f
2:
- movl %eax, %ecx /* original value in %eax for cmpxchgl */
- incl %ecx /* Increment reader refcount */
+ movl %eax, %ecx /* original value in %eax for cmpxchgl */
+ incl %ecx /* Increment reader refcount */
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 4f
- movl $1, %eax /* return success */
+ movl $1, %eax /* return success */
ret
3:
testl $(LCK_RW_SHARED_MASK), %eax
testl $(LCK_RW_PRIV_EXCL), %eax
je 2b
4:
- xorl %eax, %eax /* return failure */
+ xorl %eax, %eax /* return failure */
ret
5:
PAUSE
Entry(lck_rw_lock_exclusive)
mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */
incl TH_RWLOCK_COUNT(%rcx) /* Increment count before atomic CAS */
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield, interlock and shared count */
+ mov (%rdi), %eax /* Load state bitfield, interlock and shared count */
testl $(RW_LOCK_EXCLUSIVE_MASK), %eax /* Eligible for fastpath? */
jne 3f /* no, go slow */
movl %eax, %ecx /* original value in %eax for cmpxchgl */
orl $(LCK_RW_WANT_WRITE), %ecx
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 2f
#if CONFIG_DTRACE
*/
LOCKSTAT_LABEL(_lck_rw_lock_exclusive_lockstat_patch_point)
ret
- /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER */
- LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, LCK_RW_REGISTER)
+ /* Fall thru when patched, counting on lock pointer in %rdi */
+ LOCKSTAT_RECORD(LS_LCK_RW_LOCK_EXCL_ACQUIRE, %rdi)
#endif
ret
2:
* Returns FALSE if the lock is not held on return.
*/
Entry(lck_rw_try_lock_exclusive)
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield, interlock and shared count */
+ mov (%rdi), %eax /* Load state bitfield, interlock and shared count */
testl $(LCK_RW_INTERLOCK), %eax
jne 2f
testl $(RW_TRY_LOCK_EXCLUSIVE_MASK), %eax
- jne 3f /* can't get it */
+ jne 3f /* can't get it */
- movl %eax, %ecx /* original value in %eax for cmpxchgl */
+ movl %eax, %ecx /* original value in %eax for cmpxchgl */
orl $(LCK_RW_WANT_WRITE), %ecx
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 2f
mov %gs:CPU_ACTIVE_THREAD, %rcx /* Load thread pointer */
*/
LOCKSTAT_LABEL(_lck_rw_try_lock_exclusive_lockstat_patch_point)
ret
- /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER */
- LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, LCK_RW_REGISTER)
+ /* Fall thru when patched, counting on lock pointer in %rdi */
+ LOCKSTAT_RECORD(LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE, %rdi)
#endif
movl $1, %eax /* return TRUE */
ret
* set RW_WANT_UPGRADE and get rid of the read count we hold
*/
Entry(lck_rw_lock_shared_to_exclusive)
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield, interlock and shared count */
+ mov (%rdi), %eax /* Load state bitfield, interlock and shared count */
testl $(LCK_RW_INTERLOCK), %eax
jne 7f
testl $(LCK_RW_WANT_UPGRADE), %eax
orl $(LCK_RW_WANT_UPGRADE), %ecx /* ask for WANT_UPGRADE */
decl %ecx /* and shed our read count */
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 7f
/* we now own the WANT_UPGRADE */
testl $(LCK_RW_SHARED_MASK), %ecx /* check to see if all of the readers are drained */
*/
LOCKSTAT_LABEL(_lck_rw_lock_shared_to_exclusive_lockstat_patch_point)
ret
- /* Fall thru when patched, counting on lock pointer in LCK_RW_REGISTER */
- LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, LCK_RW_REGISTER)
+ /* Fall thru when patched, counting on lock pointer in %rdi */
+ LOCKSTAT_RECORD(LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE, %rdi)
#endif
movl $1, %eax /* return success */
ret
andl $(~LCK_W_WAITING), %ecx /* so clear the wait indicator */
3:
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 7f
mov %eax, %esi /* put old flags as second arg */
*
*/
Entry(lck_rw_done)
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield, interlock and reader count */
+ mov (%rdi), %eax /* Load state bitfield, interlock and reader count */
testl $(LCK_RW_INTERLOCK), %eax
jne 7f /* wait for interlock to clear */
andl $(~LCK_R_WAITING), %ecx
6:
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 7f
mov %eax,%esi /* old flags in %rsi */
*
*/
Entry(lck_rw_lock_exclusive_to_shared)
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield, interlock and reader count */
+ mov (%rdi), %eax /* Load state bitfield, interlock and reader count */
testl $(LCK_RW_INTERLOCK), %eax
jne 6f /* wait for interlock to clear */
andl $(~LCK_R_WAITING), %ecx
5:
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 6f
mov %eax,%esi
*
*/
Entry(lck_rw_grab_want)
- LOAD_LCK_RW_REGISTER
1:
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield, interlock and reader count */
+ mov (%rdi), %eax /* Load state bitfield, interlock and reader count */
testl $(LCK_RW_INTERLOCK), %eax
jne 3f /* wait for interlock to clear */
testl $(LCK_RW_WANT_WRITE), %eax /* want_write has been grabbed by someone else */
movl %eax, %ecx /* original value in %eax for cmpxchgl */
orl $(LCK_RW_WANT_WRITE), %ecx
lock
- cmpxchgl %ecx, (LCK_RW_REGISTER) /* Attempt atomic exchange */
+ cmpxchgl %ecx, (%rdi) /* Attempt atomic exchange */
jne 2f
/* we now own want_write */
movl $1, %eax /* return success */
*
*/
Entry(lck_rw_held_read_or_upgrade)
- LOAD_LCK_RW_REGISTER
- LOAD_LCK_RW_FLAGS_REGISTER /* Load state bitfield, interlock and reader count */
+ mov (%rdi), %eax
andl $(RW_LOCK_SHARED_OR_UPGRADE_MASK), %eax
ret
#define M_PTR MUTEX_PTR
#define M_STATE MUTEX_STATE
-#define LMTX_ARG0 %rdi
-#define LMTX_ARG1 %rsi
-#define LMTX_REG_ORIG %rdi
-#define LMTX_REG %rdx
-#define LMTX_A_REG %rax
-#define LMTX_A_REG32 %eax
-#define LMTX_C_REG %rcx
-#define LMTX_C_REG32 %ecx
-#define LMTX_RET_REG %rax
-#define LMTX_RET_REG32 %eax
-#define LMTX_LGROUP_REG %r10
-#define LMTX_SSTATE_REG %r11
-#define LOAD_LMTX_REG(arg) mov %rdi, %rdx
-#define LMTX_CHK_EXTENDED cmp LMTX_REG, LMTX_REG_ORIG
-#define LMTX_ASSERT_OWNED cmp $(MUTEX_ASSERT_OWNED), LMTX_ARG1
#define LMTX_ENTER_EXTENDED \
- mov M_PTR(LMTX_REG), LMTX_REG ; \
- xor LMTX_SSTATE_REG, LMTX_SSTATE_REG ; \
- mov MUTEX_GRP(LMTX_REG), LMTX_LGROUP_REG ; \
+ mov M_PTR(%rdx), %rdx ; \
+ xor %r11, %r11 ; \
+ mov MUTEX_GRP(%rdx), %r10 ; \
LOCK_IF_ATOMIC_STAT_UPDATES ; \
- incq GRP_MTX_STAT_UTIL(LMTX_LGROUP_REG)
-
-#define LMTX_EXIT_EXTENDED
-
-#define LMTX_CHK_EXTENDED_EXIT
+ incq GRP_MTX_STAT_UTIL(%r10)
#if LOG_FIRST_MISS_ALONE
#define LMTX_UPDATE_MISS \
- test $1, LMTX_SSTATE_REG ; \
+ test $1, %r11 ; \
jnz 11f ; \
LOCK_IF_ATOMIC_STAT_UPDATES ; \
- incl GRP_MTX_STAT_MISS(LMTX_LGROUP_REG) ; \
- or $1, LMTX_SSTATE_REG ; \
+ incl GRP_MTX_STAT_MISS(%r10) ; \
+ or $1, %r11 ; \
11:
#else
#define LMTX_UPDATE_MISS \
LOCK_IF_ATOMIC_STAT_UPDATES ; \
- incl GRP_MTX_STAT_MISS(LMTX_LGROUP_REG)
+ incl GRP_MTX_STAT_MISS(%r10)
#endif
#if LOG_FIRST_MISS_ALONE
#define LMTX_UPDATE_WAIT \
- test $2, LMTX_SSTATE_REG ; \
+ test $2, %r11 ; \
jnz 11f ; \
LOCK_IF_ATOMIC_STAT_UPDATES ; \
- incl GRP_MTX_STAT_WAIT(LMTX_LGROUP_REG) ; \
- or $2, LMTX_SSTATE_REG ; \
+ incl GRP_MTX_STAT_WAIT(%r10) ; \
+ or $2, %r11 ; \
11:
#else
#define LMTX_UPDATE_WAIT \
LOCK_IF_ATOMIC_STAT_UPDATES ; \
- incl GRP_MTX_STAT_WAIT(LMTX_LGROUP_REG)
+ incl GRP_MTX_STAT_WAIT(%r10)
#endif
*/
#define LMTX_UPDATE_DIRECT_WAIT \
LOCK_IF_ATOMIC_STAT_UPDATES ; \
- incl GRP_MTX_STAT_DIRECT_WAIT(LMTX_LGROUP_REG)
+ incl GRP_MTX_STAT_DIRECT_WAIT(%r10)
#define LMTX_CALLEXT1(func_name) \
- LMTX_CHK_EXTENDED ; \
+ cmp %rdx, %rdi ; \
je 12f ; \
- push LMTX_LGROUP_REG ; \
- push LMTX_SSTATE_REG ; \
-12: push LMTX_REG_ORIG ; \
- push LMTX_REG ; \
- mov LMTX_REG, LMTX_ARG0 ; \
+ push %r10 ; \
+ push %r11 ; \
+12: push %rdi ; \
+ push %rdx ; \
+ mov %rdx, %rdi ; \
call EXT(func_name) ; \
- pop LMTX_REG ; \
- pop LMTX_REG_ORIG ; \
- LMTX_CHK_EXTENDED ; \
+ pop %rdx ; \
+ pop %rdi ; \
+ cmp %rdx, %rdi ; \
je 12f ; \
- pop LMTX_SSTATE_REG ; \
- pop LMTX_LGROUP_REG ; \
+ pop %r11 ; \
+ pop %r10 ; \
12:
#define LMTX_CALLEXT2(func_name, reg) \
- LMTX_CHK_EXTENDED ; \
+ cmp %rdx, %rdi ; \
je 12f ; \
- push LMTX_LGROUP_REG ; \
- push LMTX_SSTATE_REG ; \
-12: push LMTX_REG_ORIG ; \
- push LMTX_REG ; \
- mov reg, LMTX_ARG1 ; \
- mov LMTX_REG, LMTX_ARG0 ; \
+ push %r10 ; \
+ push %r11 ; \
+12: push %rdi ; \
+ push %rdx ; \
+ mov reg, %rsi ; \
+ mov %rdx, %rdi ; \
call EXT(func_name) ; \
- pop LMTX_REG ; \
- pop LMTX_REG_ORIG ; \
- LMTX_CHK_EXTENDED ; \
+ pop %rdx ; \
+ pop %rdi ; \
+ cmp %rdx, %rdi ; \
je 12f ; \
- pop LMTX_SSTATE_REG ; \
- pop LMTX_LGROUP_REG ; \
+ pop %r11 ; \
+ pop %r10 ; \
12:
*/
NONLEAF_ENTRY(lck_mtx_assert)
- LOAD_LMTX_REG(B_ARG0) /* Load lock address */
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG /* Load current thread */
+ mov %rdi, %rdx /* Load lock address */
+ mov %gs:CPU_ACTIVE_THREAD, %rax /* Load current thread */
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ mov M_STATE(%rdx), %ecx
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
jne 0f
- mov M_PTR(LMTX_REG), LMTX_REG /* If so, take indirection */
+ mov M_PTR(%rdx), %rdx /* If so, take indirection */
0:
- mov M_OWNER(LMTX_REG), LMTX_C_REG /* Load owner */
- LMTX_ASSERT_OWNED
+ mov M_OWNER(%rdx), %rcx /* Load owner */
+ cmp $(MUTEX_ASSERT_OWNED), %rsi
jne 2f /* Assert ownership? */
- cmp LMTX_A_REG, LMTX_C_REG /* Current thread match? */
+ cmp %rax, %rcx /* Current thread match? */
jne 3f /* no, go panic */
- testl $(M_ILOCKED_MSK | M_MLOCKED_MSK), M_STATE(LMTX_REG)
+ testl $(M_ILOCKED_MSK | M_MLOCKED_MSK), M_STATE(%rdx)
je 3f
1: /* yes, we own it */
NONLEAF_RET
2:
- cmp LMTX_A_REG, LMTX_C_REG /* Current thread match? */
+ cmp %rax, %rcx /* Current thread match? */
jne 1b /* No, return */
ALIGN_STACK()
- LOAD_PTR_ARG1(LMTX_REG)
+ LOAD_PTR_ARG1(%rdx)
LOAD_STRING_ARG0(mutex_assert_owned_str)
jmp 4f
3:
ALIGN_STACK()
- LOAD_PTR_ARG1(LMTX_REG)
+ LOAD_PTR_ARG1(%rdx)
LOAD_STRING_ARG0(mutex_assert_not_owned_str)
4:
CALL_PANIC()
lck_mtx_destroyed:
ALIGN_STACK()
- LOAD_PTR_ARG1(LMTX_REG)
+ LOAD_PTR_ARG1(%rdx)
LOAD_STRING_ARG0(mutex_interlock_destroyed_str)
CALL_PANIC()
* lck_mtx_convert_spin()
*/
NONLEAF_ENTRY(lck_mtx_lock_spin_always)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
jmp Llmls_avoid_check
NONLEAF_ENTRY(lck_mtx_lock_spin)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
CHECK_PREEMPTION_LEVEL()
Llmls_avoid_check:
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ mov M_STATE(%rdx), %ecx
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
jnz Llmls_slow
-Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- or $(M_ILOCKED_MSK | M_SPIN_MSK), LMTX_C_REG32
+Llmls_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_SPIN_MSK), %ecx
PREEMPTION_DISABLE
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
jne Llmls_busy_disabled
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
- mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of interlock */
+ mov %gs:CPU_ACTIVE_THREAD, %rax
+ mov %rax, M_OWNER(%rdx) /* record owner of interlock */
#if MACH_LDEBUG
- test LMTX_A_REG, LMTX_A_REG
+ test %rax, %rax
jz 1f
- incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+ incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1:
#endif /* MACH_LDEBUG */
- LMTX_CHK_EXTENDED_EXIT
/* return with the interlock held and preemption disabled */
leave
#if CONFIG_DTRACE
LOCKSTAT_LABEL(_lck_mtx_lock_spin_lockstat_patch_point)
ret
- /* inherit lock pointer in LMTX_REG above */
- LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, LMTX_REG)
+ /* inherit lock pointer in %rdx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_SPIN_ACQUIRE, %rdx)
#endif
ret
Llmls_slow:
- test $M_ILOCKED_MSK, LMTX_C_REG32 /* is the interlock held */
- jz Llml_contended /* no, must have been the mutex */
+ test $M_ILOCKED_MSK, %ecx /* is the interlock held */
+ jz Llml_contended /* no, must have been the mutex */
- cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
je lck_mtx_destroyed
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex */
- jne Llmls_loop /* no... must be interlocked */
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex */
+ jne Llmls_loop /* no... must be interlocked */
LMTX_ENTER_EXTENDED
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- test $(M_SPIN_MSK), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
+ test $(M_SPIN_MSK), %ecx
jz Llmls_loop1
LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
Llmls_loop:
PAUSE
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
Llmls_loop1:
- test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
jz Llmls_try
- test $(M_MLOCKED_MSK), LMTX_C_REG32
- jnz Llml_contended /* mutex owned by someone else, go contend for it */
+ test $(M_MLOCKED_MSK), %ecx
+ jnz Llml_contended /* mutex owned by someone else, go contend for it */
jmp Llmls_loop
Llmls_busy_disabled:
NONLEAF_ENTRY(lck_mtx_lock)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
CHECK_PREEMPTION_LEVEL()
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ mov M_STATE(%rdx), %ecx
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
jnz Llml_slow
-Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- or $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+Llml_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
PREEMPTION_DISABLE
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
jne Llml_busy_disabled
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
- mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+ mov %gs:CPU_ACTIVE_THREAD, %rax
+ mov %rax, M_OWNER(%rdx) /* record owner of mutex */
#if MACH_LDEBUG
- test LMTX_A_REG, LMTX_A_REG
+ test %rax, %rax
jz 1f
- incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+ incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1:
#endif /* MACH_LDEBUG */
- testl $(M_WAITERS_MSK), M_STATE(LMTX_REG)
+ testl $(M_WAITERS_MSK), M_STATE(%rdx)
jz Llml_finish
LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
Llml_finish:
- andl $(~M_ILOCKED_MSK), M_STATE(LMTX_REG)
+ andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
PREEMPTION_ENABLE
- LMTX_CHK_EXTENDED /* is this an extended mutex */
+ cmp %rdx, %rdi /* is this an extended mutex */
jne 2f
leave
#if CONFIG_DTRACE
LOCKSTAT_LABEL(_lck_mtx_lock_lockstat_patch_point)
ret
- /* inherit lock pointer in LMTX_REG above */
- LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, LMTX_REG)
+ /* inherit lock pointer in %rdx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_LOCK_ACQUIRE, %rdx)
#endif
ret
2:
- LMTX_EXIT_EXTENDED
leave
#if CONFIG_DTRACE
LOCKSTAT_LABEL(_lck_mtx_lock_ext_lockstat_patch_point)
ret
- /* inherit lock pointer in LMTX_REG above */
- LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, LMTX_REG)
+ /* inherit lock pointer in %rdx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_LOCK_ACQUIRE, %rdx)
#endif
ret
Llml_slow:
- test $M_ILOCKED_MSK, LMTX_C_REG32 /* is the interlock held */
- jz Llml_contended /* no, must have been the mutex */
+ test $M_ILOCKED_MSK, %ecx /* is the interlock held */
+ jz Llml_contended /* no, must have been the mutex */
- cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
je lck_mtx_destroyed
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
- jne Llml_loop /* no... must be interlocked */
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
+ jne Llml_loop /* no... must be interlocked */
LMTX_ENTER_EXTENDED
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- test $(M_SPIN_MSK), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
+ test $(M_SPIN_MSK), %ecx
jz Llml_loop1
LMTX_UPDATE_MISS /* M_SPIN_MSK was set, so M_ILOCKED_MSK must also be present */
Llml_loop:
PAUSE
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
Llml_loop1:
- test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
jz Llml_try
- test $(M_MLOCKED_MSK), LMTX_C_REG32
- jnz Llml_contended /* mutex owned by someone else, go contend for it */
+ test $(M_MLOCKED_MSK), %ecx
+ jnz Llml_contended /* mutex owned by someone else, go contend for it */
jmp Llml_loop
Llml_busy_disabled:
Llml_contended:
- LMTX_CHK_EXTENDED /* is this an extended mutex */
+ cmp %rdx, %rdi /* is this an extended mutex */
je 0f
LMTX_UPDATE_MISS
0:
LMTX_CALLEXT1(lck_mtx_lock_spinwait_x86)
- test LMTX_RET_REG, LMTX_RET_REG
+ test %rax, %rax
jz Llml_acquired /* acquired mutex, interlock held and preemption disabled */
- cmp $1, LMTX_RET_REG /* check for direct wait status */
+ cmp $1, %rax /* check for direct wait status */
je 2f
- LMTX_CHK_EXTENDED /* is this an extended mutex */
+ cmp %rdx, %rdi /* is this an extended mutex */
je 2f
LMTX_UPDATE_DIRECT_WAIT
2:
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- test $(M_ILOCKED_MSK), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
+ test $(M_ILOCKED_MSK), %ecx
jnz 6f
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- or $(M_ILOCKED_MSK), LMTX_C_REG32 /* try to take the interlock */
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK), %ecx /* try to take the interlock */
PREEMPTION_DISABLE
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
jne 5f
- test $(M_MLOCKED_MSK), LMTX_C_REG32 /* we've got the interlock and */
+ test $(M_MLOCKED_MSK), %ecx /* we've got the interlock and */
jnz 3f
- or $(M_MLOCKED_MSK), LMTX_C_REG32 /* the mutex is free... grab it directly */
- mov LMTX_C_REG32, M_STATE(LMTX_REG)
+ or $(M_MLOCKED_MSK), %ecx /* the mutex is free... grab it directly */
+ mov %ecx, M_STATE(%rdx)
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
- mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+ mov %gs:CPU_ACTIVE_THREAD, %rax
+ mov %rax, M_OWNER(%rdx) /* record owner of mutex */
#if MACH_LDEBUG
- test LMTX_A_REG, LMTX_A_REG
+ test %rax, %rax
jz 1f
- incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+ incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1:
#endif /* MACH_LDEBUG */
Llml_acquired:
- testl $(M_WAITERS_MSK), M_STATE(LMTX_REG)
+ testl $(M_WAITERS_MSK), M_STATE(%rdx)
jnz 1f
- mov M_OWNER(LMTX_REG), LMTX_A_REG
- mov TH_WAS_PROMOTED_ON_WAKEUP(LMTX_A_REG), LMTX_A_REG32
- test LMTX_A_REG32, LMTX_A_REG32
+ mov M_OWNER(%rdx), %rax
+ mov TH_WAS_PROMOTED_ON_WAKEUP(%rax), %eax
+ test %eax, %eax
jz Llml_finish
1:
LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
jmp Llml_finish
3: /* interlock held, mutex busy */
- LMTX_CHK_EXTENDED /* is this an extended mutex */
+ cmp %rdx, %rdi /* is this an extended mutex */
je 4f
LMTX_UPDATE_WAIT
4:
NONLEAF_ENTRY(lck_mtx_try_lock_spin_always)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
jmp Llmts_avoid_check
NONLEAF_ENTRY(lck_mtx_try_lock_spin)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
Llmts_avoid_check:
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ mov M_STATE(%rdx), %ecx
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
jnz Llmts_slow
-Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- or $(M_ILOCKED_MSK | M_SPIN_MSK), LMTX_C_REG
+Llmts_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_SPIN_MSK), %rcx
PREEMPTION_DISABLE
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
jne Llmts_busy_disabled
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
- mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+ mov %gs:CPU_ACTIVE_THREAD, %rax
+ mov %rax, M_OWNER(%rdx) /* record owner of mutex */
#if MACH_LDEBUG
- test LMTX_A_REG, LMTX_A_REG
+ test %rax, %rax
jz 1f
- incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+ incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1:
#endif /* MACH_LDEBUG */
- LMTX_CHK_EXTENDED_EXIT
leave
#if CONFIG_DTRACE
- mov $1, LMTX_RET_REG /* return success */
+ mov $1, %rax /* return success */
LOCKSTAT_LABEL(_lck_mtx_try_lock_spin_lockstat_patch_point)
ret
- /* inherit lock pointer in LMTX_REG above */
- LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, LMTX_REG)
+ /* inherit lock pointer in %rdx above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE, %rdx)
#endif
- mov $1, LMTX_RET_REG /* return success */
+ mov $1, %rax /* return success */
ret
Llmts_slow:
- test $(M_ILOCKED_MSK), LMTX_C_REG32 /* is the interlock held */
+ test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
jz Llmts_fail /* no, must be held as a mutex */
- cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
je lck_mtx_destroyed
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
jne Llmts_loop1
LMTX_ENTER_EXTENDED
Llmts_loop:
PAUSE
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
Llmts_loop1:
- test $(M_MLOCKED_MSK | M_SPIN_MSK), LMTX_C_REG32
+ test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
jnz Llmts_fail
- test $(M_ILOCKED_MSK), LMTX_C_REG32
+ test $(M_ILOCKED_MSK), %ecx
jz Llmts_try
jmp Llmts_loop
NONLEAF_ENTRY(lck_mtx_try_lock)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* is the interlock or mutex held */
+ mov M_STATE(%rdx), %ecx
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* is the interlock or mutex held */
jnz Llmt_slow
-Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- or $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+Llmt_try: /* no - can't be INDIRECT, DESTROYED or locked */
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
PREEMPTION_DISABLE
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
jne Llmt_busy_disabled
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
- mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+ mov %gs:CPU_ACTIVE_THREAD, %rax
+ mov %rax, M_OWNER(%rdx) /* record owner of mutex */
#if MACH_LDEBUG
- test LMTX_A_REG, LMTX_A_REG
+ test %rax, %rax
jz 1f
- incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+ incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1:
#endif /* MACH_LDEBUG */
- LMTX_CHK_EXTENDED_EXIT
-
- test $(M_WAITERS_MSK), LMTX_C_REG32
+ test $(M_WAITERS_MSK), %ecx
jz 0f
LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
0:
- andl $(~M_ILOCKED_MSK), M_STATE(LMTX_REG)
+ andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
PREEMPTION_ENABLE
leave
#if CONFIG_DTRACE
- mov $1, LMTX_RET_REG /* return success */
+ mov $1, %rax /* return success */
/* Dtrace probe: LS_LCK_MTX_TRY_LOCK_ACQUIRE */
LOCKSTAT_LABEL(_lck_mtx_try_lock_lockstat_patch_point)
ret
- /* inherit lock pointer in LMTX_REG from above */
- LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, LMTX_REG)
+ /* inherit lock pointer in %rdx from above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_TRY_LOCK_ACQUIRE, %rdx)
#endif
- mov $1, LMTX_RET_REG /* return success */
+ mov $1, %rax /* return success */
ret
Llmt_slow:
- test $(M_ILOCKED_MSK), LMTX_C_REG32 /* is the interlock held */
+ test $(M_ILOCKED_MSK), %ecx /* is the interlock held */
jz Llmt_fail /* no, must be held as a mutex */
- cmp $(MUTEX_DESTROYED), LMTX_C_REG32 /* check to see if its marked destroyed */
+ cmp $(MUTEX_DESTROYED), %ecx /* check to see if its marked destroyed */
je lck_mtx_destroyed
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
jne Llmt_loop
LMTX_ENTER_EXTENDED
Llmt_loop:
PAUSE
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
Llmt_loop1:
- test $(M_MLOCKED_MSK | M_SPIN_MSK), LMTX_C_REG32
+ test $(M_MLOCKED_MSK | M_SPIN_MSK), %ecx
jnz Llmt_fail
- test $(M_ILOCKED_MSK), LMTX_C_REG32
+ test $(M_ILOCKED_MSK), %ecx
jz Llmt_try
jmp Llmt_loop
Llmt_fail:
Llmts_fail:
- LMTX_CHK_EXTENDED /* is this an extended mutex */
+ cmp %rdx, %rdi /* is this an extended mutex */
je 0f
LMTX_UPDATE_MISS
- LMTX_EXIT_EXTENDED
0:
- xor LMTX_RET_REG, LMTX_RET_REG
+ xor %rax, %rax
NONLEAF_RET
NONLEAF_ENTRY(lck_mtx_convert_spin)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ mov M_STATE(%rdx), %ecx
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
jne 0f
- mov M_PTR(LMTX_REG), LMTX_REG /* If so, take indirection */
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_PTR(%rdx), %rdx /* If so, take indirection */
+ mov M_STATE(%rdx), %ecx
0:
- test $(M_MLOCKED_MSK), LMTX_C_REG32 /* already owned as a mutex, just return */
+ test $(M_MLOCKED_MSK), %ecx /* already owned as a mutex, just return */
jnz 2f
- test $(M_WAITERS_MSK), LMTX_C_REG32 /* are there any waiters? */
+ test $(M_WAITERS_MSK), %ecx /* are there any waiters? */
jz 1f
LMTX_CALLEXT1(lck_mtx_lock_acquire_x86)
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
1:
- and $(~(M_ILOCKED_MSK | M_SPIN_MSK)), LMTX_C_REG32 /* convert from spin version to mutex */
- or $(M_MLOCKED_MSK), LMTX_C_REG32
- mov LMTX_C_REG32, M_STATE(LMTX_REG) /* since I own the interlock, I don't need an atomic update */
+ and $(~(M_ILOCKED_MSK | M_SPIN_MSK)), %ecx /* convert from spin version to mutex */
+ or $(M_MLOCKED_MSK), %ecx
+ mov %ecx, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
PREEMPTION_ENABLE
2:
NONLEAF_ENTRY(lck_mtx_unlock)
- LOAD_LMTX_REG(B_ARG0) /* fetch lock pointer */
+ mov %rdi, %rdx /* fetch lock pointer */
Llmu_entry:
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
Llmu_prim:
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
je Llmu_ext
Llmu_chktype:
- test $(M_MLOCKED_MSK), LMTX_C_REG32 /* check for full mutex */
+ test $(M_MLOCKED_MSK), %ecx /* check for full mutex */
jz Llmu_unlock
Llmu_mutex:
- test $(M_ILOCKED_MSK), LMTX_C_REG /* have to wait for interlock to clear */
+ test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
jnz Llmu_busy
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- and $(~M_MLOCKED_MSK), LMTX_C_REG32 /* drop mutex */
- or $(M_ILOCKED_MSK), LMTX_C_REG32 /* pick up interlock */
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ and $(~M_MLOCKED_MSK), %ecx /* drop mutex */
+ or $(M_ILOCKED_MSK), %ecx /* pick up interlock */
PREEMPTION_DISABLE
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
- jne Llmu_busy_disabled /* branch on failure to spin loop */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
+ jne Llmu_busy_disabled /* branch on failure to spin loop */
Llmu_unlock:
- xor LMTX_A_REG, LMTX_A_REG
- mov LMTX_A_REG, M_OWNER(LMTX_REG)
- mov LMTX_C_REG, LMTX_A_REG /* keep original state in %ecx for later evaluation */
- and $(~(M_ILOCKED_MSK | M_SPIN_MSK | M_PROMOTED_MSK)), LMTX_A_REG
+ xor %rax, %rax
+ mov %rax, M_OWNER(%rdx)
+ mov %rcx, %rax /* keep original state in %ecx for later evaluation */
+ and $(~(M_ILOCKED_MSK | M_SPIN_MSK | M_PROMOTED_MSK)), %rax
- test $(M_WAITERS_MSK), LMTX_A_REG32
+ test $(M_WAITERS_MSK), %eax
jz 2f
- dec LMTX_A_REG32 /* decrement waiter count */
+ dec %eax /* decrement waiter count */
2:
- mov LMTX_A_REG32, M_STATE(LMTX_REG) /* since I own the interlock, I don't need an atomic update */
+ mov %eax, M_STATE(%rdx) /* since I own the interlock, I don't need an atomic update */
#if MACH_LDEBUG
/* perform lock statistics after drop to prevent delay */
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
- test LMTX_A_REG, LMTX_A_REG
+ mov %gs:CPU_ACTIVE_THREAD, %rax
+ test %rax, %rax
jz 1f
- decl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+ decl TH_MUTEX_COUNT(%rax) /* lock statistic */
1:
#endif /* MACH_LDEBUG */
- test $(M_PROMOTED_MSK | M_WAITERS_MSK), LMTX_C_REG32
+ test $(M_PROMOTED_MSK | M_WAITERS_MSK), %ecx
jz 3f
- LMTX_CALLEXT2(lck_mtx_unlock_wakeup_x86, LMTX_C_REG)
+ LMTX_CALLEXT2(lck_mtx_unlock_wakeup_x86, %rcx)
3:
PREEMPTION_ENABLE
- LMTX_CHK_EXTENDED
+ cmp %rdx, %rdi
jne 4f
leave
/* Dtrace: LS_LCK_MTX_UNLOCK_RELEASE */
LOCKSTAT_LABEL(_lck_mtx_unlock_lockstat_patch_point)
ret
- /* inherit lock pointer in LMTX_REG from above */
- LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, LMTX_REG)
+ /* inherit lock pointer in %rdx from above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_UNLOCK_RELEASE, %rdx)
#endif
ret
4:
/* Dtrace: LS_LCK_MTX_EXT_UNLOCK_RELEASE */
LOCKSTAT_LABEL(_lck_mtx_ext_unlock_lockstat_patch_point)
ret
- /* inherit lock pointer in LMTX_REG from above */
- LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, LMTX_REG)
+ /* inherit lock pointer in %rdx from above */
+ LOCKSTAT_RECORD(LS_LCK_MTX_EXT_UNLOCK_RELEASE, %rdx)
#endif
ret
PREEMPTION_ENABLE
Llmu_busy:
PAUSE
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
jmp Llmu_mutex
Llmu_ext:
- mov M_PTR(LMTX_REG), LMTX_REG
- mov M_OWNER(LMTX_REG), LMTX_A_REG
- mov %gs:CPU_ACTIVE_THREAD, LMTX_C_REG
- CHECK_UNLOCK(LMTX_C_REG, LMTX_A_REG)
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_PTR(%rdx), %rdx
+ mov M_OWNER(%rdx), %rax
+ mov %gs:CPU_ACTIVE_THREAD, %rcx
+ CHECK_UNLOCK(%rcx, %rax)
+ mov M_STATE(%rdx), %ecx
jmp Llmu_chktype
LEAF_ENTRY(lck_mtx_ilk_unlock)
- LOAD_LMTX_REG(L_ARG0) /* fetch lock pointer - no indirection here */
+ mov %rdi, %rdx /* fetch lock pointer - no indirection here */
- andl $(~M_ILOCKED_MSK), M_STATE(LMTX_REG)
+ andl $(~M_ILOCKED_MSK), M_STATE(%rdx)
- PREEMPTION_ENABLE /* need to re-enable preemption */
+ PREEMPTION_ENABLE /* need to re-enable preemption */
LEAF_RET
LEAF_ENTRY(lck_mtx_lock_grab_mutex)
- LOAD_LMTX_REG(L_ARG0) /* fetch lock pointer - no indirection here */
+ mov %rdi, %rdx /* fetch lock pointer - no indirection here */
- mov M_STATE(LMTX_REG), LMTX_C_REG32
+ mov M_STATE(%rdx), %ecx
- test $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32 /* can't have the mutex yet */
+ test $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx /* can't have the mutex yet */
jnz 3f
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- or $(M_ILOCKED_MSK | M_MLOCKED_MSK), LMTX_C_REG32
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK | M_MLOCKED_MSK), %ecx
PREEMPTION_DISABLE
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
jne 2f /* branch on failure to spin loop */
- mov %gs:CPU_ACTIVE_THREAD, LMTX_A_REG
- mov LMTX_A_REG, M_OWNER(LMTX_REG) /* record owner of mutex */
+ mov %gs:CPU_ACTIVE_THREAD, %rax
+ mov %rax, M_OWNER(%rdx) /* record owner of mutex */
#if MACH_LDEBUG
- test LMTX_A_REG, LMTX_A_REG
+ test %rax, %rax
jz 1f
- incl TH_MUTEX_COUNT(LMTX_A_REG) /* lock statistic */
+ incl TH_MUTEX_COUNT(%rax) /* lock statistic */
1:
#endif /* MACH_LDEBUG */
- mov $1, LMTX_RET_REG /* return success */
+ mov $1, %rax /* return success */
LEAF_RET
2:
PREEMPTION_ENABLE
3:
- xor LMTX_RET_REG, LMTX_RET_REG /* return failure */
+ xor %rax, %rax /* return failure */
LEAF_RET
LEAF_ENTRY(lck_mtx_lock_mark_destroyed)
- LOAD_LMTX_REG(L_ARG0)
+ mov %rdi, %rdx
1:
- mov M_STATE(LMTX_REG), LMTX_C_REG32
- cmp $(MUTEX_IND), LMTX_C_REG32 /* Is this an indirect mutex? */
+ mov M_STATE(%rdx), %ecx
+ cmp $(MUTEX_IND), %ecx /* Is this an indirect mutex? */
jne 2f
- movl $(MUTEX_DESTROYED), M_STATE(LMTX_REG) /* convert to destroyed state */
+ movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
jmp 3f
2:
- test $(M_ILOCKED_MSK), LMTX_C_REG /* have to wait for interlock to clear */
+ test $(M_ILOCKED_MSK), %rcx /* have to wait for interlock to clear */
jnz 5f
PREEMPTION_DISABLE
- mov LMTX_C_REG, LMTX_A_REG /* eax contains snapshot for cmpxchgl */
- or $(M_ILOCKED_MSK), LMTX_C_REG32
+ mov %rcx, %rax /* eax contains snapshot for cmpxchgl */
+ or $(M_ILOCKED_MSK), %ecx
lock
- cmpxchg LMTX_C_REG32, M_STATE(LMTX_REG) /* atomic compare and exchange */
- jne 4f /* branch on failure to spin loop */
- movl $(MUTEX_DESTROYED), M_STATE(LMTX_REG) /* convert to destroyed state */
+ cmpxchg %ecx, M_STATE(%rdx) /* atomic compare and exchange */
+ jne 4f /* branch on failure to spin loop */
+ movl $(MUTEX_DESTROYED), M_STATE(%rdx) /* convert to destroyed state */
PREEMPTION_ENABLE
3:
- LEAF_RET /* return with M_ILOCKED set */
+ LEAF_RET /* return with M_ILOCKED set */
4:
PREEMPTION_ENABLE
5:
#endif /* MACH_RT */
LEAF_RET
-
-LEAF_ENTRY(i_bit_set)
- lock
- bts %edi,(%rsi)
- LEAF_RET
-
-LEAF_ENTRY(i_bit_clear)
- lock
- btr %edi,(%rsi)
- LEAF_RET
-
-
-LEAF_ENTRY(bit_lock)
-1:
- lock
- bts %edi,(%rsi)
- jb 1b
- LEAF_RET
-
-
-LEAF_ENTRY(bit_lock_try)
- lock
- bts %edi,(%rsi)
- jb bit_lock_failed
- movl $1, %eax
- LEAF_RET
-bit_lock_failed:
- xorl %eax,%eax
- LEAF_RET
-
-LEAF_ENTRY(bit_unlock)
- lock
- btr %edi,(%rsi)
- LEAF_RET
-
-
/*
* Atomic primitives, prototyped in kern/simple_lock.h
*/
1:
#endif
movl %esi, %eax /* Load addend */
- lock xaddl %eax, (%rdi) /* Atomic exchange and add */
+ lock xaddl %eax, (%rdi) /* Atomic exchange and add */
addl %esi, %eax /* Calculate result */
LEAF_RET
#endif
negl %esi
movl %esi, %eax
- lock xaddl %eax, (%rdi) /* Atomic exchange and add */
+ lock xaddl %eax, (%rdi) /* Atomic exchange and add */
addl %esi, %eax /* Calculate result */
LEAF_RET
ud2
1:
#endif
- lock andl %esi, (%rdi) /* Atomic OR */
+ lock andl %esi, (%rdi) /* Atomic OR */
LEAF_RET