- movl M_ILK,%eax /* read interlock */
- testl %eax,%eax /* unlocked? */
- je Lmu_retry /* yes, go try to grab it */
- jmp Lmu_ilk_loop /* no - keep spinning */
-
-Lmu_wakeup:
- pushl M_LOCKED
- pushl %edx /* push mutex address */
- call EXT(lck_mtx_unlock_wakeup)/* yes, wake a thread */
- addl $8,%esp
- movl B_ARG0,%edx /* restore lock pointer */
- jmp Lmu_doit
-
-/*
- * void lck_mtx_assert(lck_mtx_t* l, unsigned int)
- * void _mutex_assert(mutex_t, unsigned int)
- * Takes the address of a lock, and an assertion type as parameters.
- * The assertion can take one of two forms determine by the type
- * parameter: either the lock is held by the current thread, and the
- * type is LCK_MTX_ASSERT_OWNED, or it isn't and the type is
- * LCK_MTX_ASSERT_NOT_OWNED. Calls panic on assertion failure.
- *
- */
-
-Entry(lck_mtx_assert)
-Entry(_mutex_assert)
- movl S_ARG0,%edx /* Load lock address */
- movl %gs:CPU_ACTIVE_THREAD,%ecx /* Load current thread */
-
- cmpl $(MUTEX_IND),M_ITAG /* Is this an indirect mutex? */
- cmove M_PTR,%edx /* If so, take indirection */
-
- movl M_LOCKED,%eax /* Load lock word */
- cmpl $(MUTEX_LOCKED_AS_SPIN),%eax /* check for spin variant */
- cmove M_ILK,%eax /* yes, spin lock owner is in the interlock */
-
- cmpl $(MUTEX_ASSERT_OWNED),S_ARG1 /* Determine assert type */
- jne 2f /* Assert ownership? */
- cmpl %eax,%ecx /* Current thread match? */
- jne 3f /* no, go panic */
-1: /* yes, we own it */
- ret /* just return */
-2:
- cmpl %eax,%ecx /* Current thread match? */
- jne 1b /* No, return */
- movl %edx,S_ARG1 /* Prep assertion failure */
- movl $(mutex_assert_owned_str),S_ARG0
- jmp 4f
-3:
- movl %edx,S_ARG1 /* Prep assertion failure */
- movl $(mutex_assert_not_owned_str),S_ARG0
-4:
- jmp EXT(panic)
-
-.data
-mutex_assert_not_owned_str:
- .asciz "mutex (%p) not owned\n"
-mutex_assert_owned_str:
- .asciz "mutex (%p) owned\n"
-.text
-
-/* This preprocessor define controls whether the R-M-W update of the
- * per-group statistics elements are atomic (LOCK-prefixed)
- * Enabled by default.
- */
-#define ATOMIC_STAT_UPDATES 1
-
-#if defined(ATOMIC_STAT_UPDATES)
-#define LOCK_IF_ATOMIC_STAT_UPDATES lock
-#else
-#define LOCK_IF_ATOMIC_STAT_UPDATES
-#endif /* ATOMIC_STAT_UPDATES */
-
-
-/*
- * lck_mtx_lock()
- * lck_mtx_try_lock()
- * lck_mutex_unlock()
- * lck_mtx_lock_spin()
- * lck_mtx_convert_spin()
- *
- * These are variants of mutex_lock(), mutex_try(), mutex_unlock()
- * mutex_lock_spin and mutex_convert_spin without
- * DEBUG checks (which require fields not present in lck_mtx_t's).
- */
-
-NONLEAF_ENTRY(lck_mtx_lock_spin)
-
- movl B_ARG0,%edx /* fetch lock pointer */
- pushf /* save interrupt state */
-
- CHECK_NO_SIMPLELOCKS()
- CHECK_PREEMPTION_LEVEL()