+#if defined(__i386__) || defined(__x86_64__)
+
+/*
+ * Acquire lock seq for condition var signalling/broadcast
+ */
+__private_extern__ void
+__mtx_holdlock(npthread_mutex_t * mutex, uint32_t diff, uint32_t * flagp, uint32_t **pmtxp, uint32_t * mgenp, uint32_t * ugenp)
+{
+ uint32_t mgen, ugen, ngen;
+ int hold = 0;
+ int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_START, (uint32_t)mutex, diff, firstfit, 0, 0);
+#endif
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ /* no holds for shared mutexes */
+ hold = 2;
+ mgen = 0;
+ ugen = 0;
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ goto out;
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+retry:
+ mgen = *lseqaddr;
+ ugen = *useqaddr;
+ /* no need to do extra wrap */
+ ngen = mgen + (PTHRW_INC * diff);
+ hold = 0;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 0, mgen, ngen, 0);
+#endif
+ /* can we acquire the lock ? */
+ if ((mgen & PTHRW_EBIT) == 0) {
+ /* if it is firstfit, no need to hold till the cvar returns */
+ if (firstfit == 0) {
+ ngen |= PTHRW_EBIT;
+ hold = 1;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 1, mgen, ngen, 0);
+#endif
+ }
+
+ /* update lockseq */
+ if (OSAtomicCompareAndSwap32(mgen, ngen, (volatile int32_t *)lseqaddr) != TRUE)
+ goto retry;
+ if (hold == 1) {
+ mutex->m_tid = PTHREAD_MTX_TID_SWITCHING ;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_NONE, (uint32_t)mutex, 2, hold, 0, 0);
+#endif
+
+out:
+ if (flagp != NULL) {
+ if (hold == 1) {
+ *flagp = (mutex->mtxopts.value | _PTHREAD_MTX_OPT_HOLD);
+ } else if (hold == 2) {
+ *flagp = (mutex->mtxopts.value | _PTHREAD_MTX_OPT_NOHOLD);
+ } else {
+ *flagp = mutex->mtxopts.value;
+ }
+ }
+ if (mgenp != NULL)
+ *mgenp = mgen;
+ if (ugenp != NULL)
+ *ugenp = ugen;
+ if (pmtxp != NULL)
+ *pmtxp = lseqaddr;
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MHOLD | DBG_FUNC_END, (uint32_t)mutex, hold, 0, 0, 0);
+#endif
+}
+
+
+/*
+ * Drop the mutex unlock references(from cond wait or mutex_unlock().
+ * mgenp and ugenp valid only if notifyp is set
+ *
+ */
+__private_extern__ int
+__mtx_droplock(npthread_mutex_t * mutex, int count, uint32_t * flagp, uint32_t ** pmtxp, uint32_t * mgenp, uint32_t * ugenp, uint32_t *notifyp)
+{
+ int oldval, newval, lockval, unlockval;
+ uint64_t oldtid;
+ pthread_t self = pthread_self();
+ uint32_t notify = 0;
+ uint64_t oldval64, newval64;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+ int firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_START, (uint32_t)mutex, count, 0, 0, 0);
+#endif
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ if (flagp != NULL)
+ *flagp = mutex->mtxopts.value;
+
+ if (firstfit != 0)
+ notify |= 0x80000000;
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED)
+ notify |= 0x40000000;
+
+ if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL)
+ {
+ if (mutex->m_tid != (uint64_t)((uintptr_t)self))
+ {
+ PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t *)mutex, EPERM);
+ return(EPERM);
+ } else if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE &&
+ --mutex->mtxopts.options.lock_count)
+ {
+ PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t *)mutex, 1);
+ goto out;
+ }
+ }
+
+
+ if (mutex->m_tid != (uint64_t)((uintptr_t)self))
+ return(EINVAL);
+
+
+ml0:
+ oldval = *useqaddr;
+ unlockval = oldval + (PTHRW_INC * count);
+ lockval = *lseqaddr;
+
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 10, lockval, oldval, 0);
+#endif
+#if 1
+ if (lockval == oldval)
+ LIBC_ABORT("same unlock and lockseq \n");
+#endif
+
+ if ((lockval & PTHRW_COUNT_MASK) == unlockval) {
+ oldtid = mutex->m_tid;
+
+ mutex->m_tid = 0;
+
+ oldval64 = (((uint64_t)oldval) << 32);
+ oldval64 |= lockval;
+
+ newval64 = 0;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) == TRUE) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
+#endif
+ goto out;
+ } else {
+ mutex->m_tid = oldtid;
+ /* fall thru for kernel call */
+ goto ml0;
+ }
+ }
+
+ if (firstfit != 0) {
+ /* reset ebit along with unlock */
+ newval = (lockval & ~PTHRW_EBIT);
+
+ lockval = newval;
+ oldval64 = (((uint64_t)oldval) << 32);
+ oldval64 |= lockval;
+
+ newval64 = (((uint64_t)unlockval) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE) {
+ goto ml0;
+ }
+ lockval = newval;
+ } else {
+ /* fairshare , just update and go to kernel */
+ if (OSAtomicCompareAndSwap32(oldval, unlockval, (volatile int32_t *)useqaddr) != TRUE)
+ goto ml0;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, unlockval, 0);
+#endif
+ }
+
+ notify |= 1;
+
+ if (notifyp != 0) {
+ if (mgenp != NULL)
+ *mgenp = lockval;
+ if (ugenp != NULL)
+ *ugenp = unlockval;
+ if (pmtxp != NULL)
+ *pmtxp = lseqaddr;
+ *notifyp = notify;
+ }
+out:
+ if (notifyp != 0) {
+ *notifyp = notify;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return(0);
+}
+
+int
+__mtx_updatebits(npthread_mutex_t *mutex, uint32_t oupdateval, int firstfit, int fromcond)
+{
+ uint32_t lgenval, newval, bits;
+ int isebit = 0;
+ uint32_t updateval = oupdateval;
+ pthread_mutex_t * omutex = (pthread_mutex_t *)mutex;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_START, (uint32_t)mutex, oupdateval, firstfit, fromcond, 0);
+#endif
+
+retry:
+ lgenval = *lseqaddr;
+ bits = updateval & PTHRW_BIT_MASK;
+
+ if (lgenval == updateval)
+ goto out;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 1, lgenval, updateval, 0);
+#endif
+ if ((lgenval & PTHRW_BIT_MASK) == bits)
+ goto out;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 2, lgenval, bits, 0);
+#endif
+ /* firsfit might not have EBIT */
+ if (firstfit != 0) {
+ lgenval &= ~PTHRW_EBIT; /* see whether EBIT is set */
+ if ((lgenval & PTHRW_EBIT) != 0)
+ isebit = 1;
+ }
+
+ if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 3, lgenval, updateval, 0);
+#endif
+ updateval |= PTHRW_EBIT; /* just in case.. */
+ if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE) {
+ if (firstfit == 0)
+ goto retry;
+ goto handleffit;
+ }
+ /* update succesfully */
+ goto out;
+ }
+
+
+ if (((lgenval & PTHRW_WBIT) != 0) && ((updateval & PTHRW_WBIT) == 0)) {
+ newval = lgenval | (bits | PTHRW_WBIT | PTHRW_EBIT);
+ } else {
+ newval = lgenval | (bits | PTHRW_EBIT);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_NONE, (uint32_t)mutex, 4, lgenval, newval, 0);
+#endif
+ if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE) {
+ if (firstfit == 0)
+ goto retry;
+ goto handleffit;
+ }
+out:
+ /* succesful bits updation */
+ mutex->m_tid = (uint64_t)((uintptr_t)pthread_self());
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return(0);
+
+handleffit:
+ /* firstfit failure */
+ newval = *lseqaddr;
+ if ((newval & PTHRW_EBIT) == 0)
+ goto retry;
+ if (((lgenval & PTHRW_COUNT_MASK) == (newval & PTHRW_COUNT_MASK)) && (isebit == 1)) {
+ if (fromcond == 0)
+ return(1);
+ else {
+ /* called from condition variable code block again */
+ml1:
+#if USE_COMPAGE /* [ */
+ updateval = __psynch_mutexwait((pthread_mutex_t *)lseqaddr, newval | PTHRW_RETRYBIT, *useqaddr, (uint64_t)0,
+ mutex->mtxopts.value);
+#else /* USECOMPAGE ][ */
+ updateval = __psynch_mutexwait(omutex, newval | PTHRW_RETRYBIT, *useqaddr, (uint64_t)0,
+#endif /* USE_COMPAGE ] */
+ if (updateval == (uint32_t)-1) {
+ goto ml1;
+ }
+
+ goto retry;
+ }
+ }
+ /* seqcount changed, retry */
+ goto retry;
+}
+
+int
+_new_pthread_mutex_lock(pthread_mutex_t *omutex)
+{
+ pthread_t self;
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ int sig = mutex->sig;
+ int retval;
+ uint32_t oldval, newval, uval, updateval;
+ int gotlock = 0;
+ int firstfit = 0;
+ int retrybit = 0;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+ int updatebitsonly = 0;
+#if USE_COMPAGE
+ uint64_t mytid;
+ int sysret = 0;
+ uint32_t mask;
+#else
+
+#endif
+
+ /* To provide backwards compat for apps using mutex incorrectly */
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ if (sig != _PTHREAD_MUTEX_SIG) {
+ LOCK(mutex->lock);
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
+ /* static initializer, init the mutex */
+ _new_pthread_mutex_init(omutex, NULL);
+ self = _PTHREAD_MUTEX_OWNER_SELF;
+ } else {
+ UNLOCK(mutex->lock);
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(mutex->lock);
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ self = pthread_self();
+ if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
+ if (mutex->m_tid == (uint64_t)((uintptr_t)self)) {
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ {
+ if (mutex->mtxopts.options.lock_count < USHRT_MAX)
+ {
+ mutex->mtxopts.options.lock_count++;
+ PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
+ retval = 0;
+ } else {
+ retval = EAGAIN;
+ PLOCKSTAT_MUTEX_ERROR(omutex, retval);
+ }
+ } else { /* PTHREAD_MUTEX_ERRORCHECK */
+ retval = EDEADLK;
+ PLOCKSTAT_MUTEX_ERROR(omutex, retval);
+ }
+ return (retval);
+ }
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
+#endif
+loop:
+#if USE_COMPAGE /* [ */
+
+ mytid = (uint64_t)((uintptr_t)pthread_self());
+
+ml0:
+ mask = PTHRW_EBIT;
+ retval = _commpage_pthread_mutex_lock(lseqaddr, mutex->mtxopts.value, mytid, mask, &mutex->m_tid, &sysret);
+ if (retval == 0) {
+ gotlock = 1;
+ } else if (retval == 1) {
+ gotlock = 1;
+ updateval = sysret;
+ /* returns 0 on succesful update */
+ if (__mtx_updatebits( mutex, updateval, firstfit, 0) == 1) {
+ /* could not acquire, may be locked in ffit case */
+#if USE_COMPAGE
+ LIBC_ABORT("comapge implementatin looping in libc \n");
+#endif
+ goto ml0;
+ }
+ }
+#if NEVERINCOMPAGE
+ else if (retval == 3) {
+ cthread_set_errno_self(sysret);
+ oldval = *lseqaddr;
+ uval = *useqaddr;
+ newval = oldval + PTHRW_INC;
+ gotlock = 0;
+ /* to block in the kerenl again */
+ }
+#endif
+ else {
+ LIBC_ABORT("comapge implementatin bombed \n");
+ }
+
+
+#else /* USECOMPAGE ][ */
+ oldval = *lseqaddr;
+ uval = *useqaddr;
+ newval = oldval + PTHRW_INC;
+
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, uval, 0);
+
+ if((oldval & PTHRW_EBIT) == 0) {
+ gotlock = 1;
+ newval |= PTHRW_EBIT;
+ } else {
+ gotlock = 0;
+ newval |= PTHRW_WBIT;
+ }
+
+ if (OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ if (gotlock != 0)
+ mutex->m_tid = (uint64_t)((uintptr_t)self);
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 2, oldval, newval, 0);
+#endif
+ } else
+ goto loop;
+
+
+ retrybit = 0;
+ if (gotlock == 0) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 3, 0, 0, 0);
+#endif
+ firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
+ml1:
+ updateval = __psynch_mutexwait(omutex, newval | retrybit, uval, (uint64_t)0,
+ mutex->mtxopts.value);
+
+ if (updateval == (uint32_t)-1) {
+ updatebitsonly = 0;
+ goto ml1;
+ }
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_NONE, (uint32_t)mutex, 4, updateval, 0, 0);
+#endif
+ /* returns 0 on succesful update */
+ if (__mtx_updatebits( mutex, updateval, firstfit, 0) == 1) {
+ /* could not acquire, may be locked in ffit case */
+ retrybit = PTHRW_RETRYBIT;
+#if USE_COMPAGE
+ LIBC_ABORT("comapge implementatin looping in libc \n");
+
+#endif
+ goto ml1;
+ }
+ }
+#endif /* USE_COMPAGE ] */
+
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ mutex->mtxopts.options.lock_count++;
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return (0);
+}
+
+/*
+ * Attempt to lock a mutex, but don't block if this isn't possible.
+ */
+int
+_new_pthread_mutex_trylock(pthread_mutex_t *omutex)
+{
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ int sig = mutex->sig;
+ uint32_t oldval, newval;
+ int error = 0;
+ pthread_t self;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+ /* To provide backwards compat for apps using mutex incorrectly */
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+
+ if (sig != _PTHREAD_MUTEX_SIG) {
+ LOCK(mutex->lock);
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
+ /* static initializer, init the mutex */
+ _new_pthread_mutex_init(omutex, NULL);
+ self = _PTHREAD_MUTEX_OWNER_SELF;
+ } else {
+ UNLOCK(mutex->lock);
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(mutex->lock);
+ }
+
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ self = pthread_self();
+ if (mutex->mtxopts.options.type != PTHREAD_MUTEX_NORMAL) {
+ if (mutex->m_tid == (uint64_t)((uintptr_t)self)) {
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ {
+ if (mutex->mtxopts.options.lock_count < USHRT_MAX)
+ {
+ mutex->mtxopts.options.lock_count++;
+ PLOCKSTAT_MUTEX_ACQUIRE(omutex, 1, 0);
+ error = 0;
+ } else {
+ error = EAGAIN;
+ PLOCKSTAT_MUTEX_ERROR(omutex, error);
+ }
+ } else { /* PTHREAD_MUTEX_ERRORCHECK */
+ error = EDEADLK;
+ PLOCKSTAT_MUTEX_ERROR(omutex, error);
+ }
+ return (error);
+ }
+ }
+retry:
+ oldval = *lseqaddr;
+
+ if ((oldval & PTHRW_EBIT) != 0) {
+ newval = oldval | PTHRW_TRYLKBIT;
+ if (OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ error = EBUSY;
+ } else
+ goto retry;
+ } else {
+ newval = (oldval + PTHRW_INC)| PTHRW_EBIT;
+ if ((OSAtomicCompareAndSwap32(oldval, newval, (volatile int32_t *)lseqaddr) == TRUE)) {
+ mutex->m_tid = (uint64_t)((uintptr_t)self);
+ if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
+ mutex->mtxopts.options.lock_count++;
+ } else
+ goto retry;
+ }
+
+ return(error);
+}
+
+/*
+ * Unlock a mutex.
+ * TODO: Priority inheritance stuff
+ */
+int
+_new_pthread_mutex_unlock(pthread_mutex_t *omutex)
+{
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ int retval;
+ uint32_t mtxgen, mtxugen, flags, notify;
+ int sig = mutex->sig;
+ pthread_t self = pthread_self();
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+ /* To provide backwards compat for apps using mutex incorrectly */
+
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_START, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig != _PTHREAD_MUTEX_SIG_init)) {
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ if (sig != _PTHREAD_MUTEX_SIG) {
+ LOCK(mutex->lock);
+ if ((sig != _PTHREAD_MUTEX_SIG) && (sig == _PTHREAD_MUTEX_SIG_init)) {
+ /* static initializer, init the mutex */
+ _new_pthread_mutex_init(omutex, NULL);
+ self = _PTHREAD_MUTEX_OWNER_SELF;
+ } else {
+ UNLOCK(mutex->lock);
+ PLOCKSTAT_MUTEX_ERROR(omutex, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(mutex->lock);
+ }
+
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+ notify = 0;
+ retval = __mtx_droplock(mutex, 1, &flags, NULL, &mtxgen, &mtxugen, ¬ify);
+ if (retval != 0)
+ return(retval);
+
+ if ((notify & 1) != 0) {
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_NONE, (uint32_t)mutex, 1, 0, 0, 0);
+#endif
+#if USE_COMPAGE /* [ */
+ if ( __psynch_mutexdrop((pthread_mutex_t *)lseqaddr, mtxgen, mtxugen, (uint64_t)0, flags)== (uint32_t)-1)
+#else /* USECOMPAGE ][ */
+ if ( __psynch_mutexdrop(omutex, mtxgen, mtxugen, (uint64_t)0, flags)== (uint32_t)-1)
+#endif /* USE_COMPAGE ] */
+ {
+ if (errno == EINTR)
+ return(0);
+ else
+ return(errno);
+ }
+ }
+#if _KSYN_TRACE_
+ (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK | DBG_FUNC_END, (uint32_t)mutex, 0, 0, 0, 0);
+#endif
+ return(0);
+}
+
+
+/*
+ * Initialize a mutex variable, possibly with additional attributes.
+ */
+int
+_new_pthread_mutex_init(pthread_mutex_t *omutex, const pthread_mutexattr_t *attr)
+{
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+
+ if (attr)
+ {
+ if (attr->sig != _PTHREAD_MUTEX_ATTR_SIG)
+ return (EINVAL);
+ mutex->prioceiling = attr->prioceiling;
+ mutex->mtxopts.options.protocol = attr->protocol;
+ mutex->mtxopts.options.policy = attr->policy;
+ mutex->mtxopts.options.type = attr->type;
+ mutex->mtxopts.options.pshared = attr->pshared;
+ } else {
+ mutex->prioceiling = _PTHREAD_DEFAULT_PRIOCEILING;
+ mutex->mtxopts.options.protocol = _PTHREAD_DEFAULT_PROTOCOL;
+ mutex->mtxopts.options.policy = _PTHREAD_MUTEX_POLICY_FAIRSHARE;
+ mutex->mtxopts.options.type = PTHREAD_MUTEX_DEFAULT;
+ mutex->mtxopts.options.pshared = _PTHREAD_DEFAULT_PSHARED;
+ }
+
+ mutex->mtxopts.options.lock_count = 0;
+ /* address 8byte aligned? */
+ if (((uintptr_t)mutex & 0x07) != 0) {
+ /* 4byte alinged */
+ mutex->mtxopts.options.misalign = 1;
+#if defined(__LP64__)
+ mutex->m_lseqaddr = &mutex->m_seq[0];
+ mutex->m_useqaddr = &mutex->m_seq[1];
+#else /* __LP64__ */
+ mutex->m_lseqaddr = &mutex->m_seq[1];
+ mutex->m_useqaddr = &mutex->m_seq[2];
+#endif /* __LP64__ */
+ } else {
+ /* 8byte alinged */
+ mutex->mtxopts.options.misalign = 0;
+#if defined(__LP64__)
+ mutex->m_lseqaddr = &mutex->m_seq[1];
+ mutex->m_useqaddr = &mutex->m_seq[2];
+#else /* __LP64__ */
+ mutex->m_lseqaddr = &mutex->m_seq[0];
+ mutex->m_useqaddr = &mutex->m_seq[1];
+#endif /* __LP64__ */
+ }
+ mutex->m_tid = 0;
+ mutex->m_seq[0] = 0;
+ mutex->m_seq[1] = 0;
+ mutex->m_seq[2] = 0;
+ mutex->prioceiling = 0;
+ mutex->priority = 0;
+ mutex->sig = _PTHREAD_MUTEX_SIG;
+ return (0);
+}
+
+
+
+/*
+ * Destroy a mutex variable.
+ */
+int
+_new_pthread_mutex_destroy(pthread_mutex_t *omutex)
+{
+ int res;
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+
+ LOCK(mutex->lock);
+ res = _new_pthread_mutex_destroy_locked(omutex);
+ UNLOCK(mutex->lock);
+
+ return(res);
+}
+
+
+int
+_new_pthread_mutex_destroy_locked(pthread_mutex_t *omutex)
+{
+ int res;
+ npthread_mutex_t * mutex = (npthread_mutex_t *)omutex;
+ uint32_t lgenval;
+ uint32_t * lseqaddr;
+ uint32_t * useqaddr;
+
+
+ if (mutex->sig == _PTHREAD_MUTEX_SIG)
+ {
+ if (mutex->mtxopts.options.pshared == PTHREAD_PROCESS_SHARED) {
+ MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr);
+ } else {
+ lseqaddr = mutex->m_lseqaddr;
+ useqaddr = mutex->m_useqaddr;
+ }
+
+ lgenval = *(lseqaddr);
+ if ((mutex->m_tid == (uint64_t)0) &&
+ ((lgenval & PTHRW_COUNT_MASK) == 0))
+ {
+ mutex->sig = _PTHREAD_NO_SIG;
+ res = 0;
+ }
+ else
+ res = EBUSY;
+ } else
+ res = EINVAL;
+
+ return (res);
+}
+
+#endif /* __i386__ || __x86_64__ */