/* maximum number of times a read lock may be obtained */
#define MAX_READ_LOCKS (INT_MAX - 1)
-#if defined(__i386__) || defined(__x86_64__)
#ifndef BUILDING_VARIANT /* [ */
-int usenew_impl = 0;
+__private_extern__ int usenew_impl = 1;
#else /* BUILDING_VARIANT */
extern int usenew_impl;
#endif /* BUILDING_VARIANT */
+extern int PR_5243343_flag;
#if defined(__LP64__)
-#define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
+#define RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr) \
{ \
if (rwlock->misalign != 0) { \
- lseqaddr = &rwlock->rw_seq[1]; \
- wcaddr = &rwlock->rw_seq[2]; \
- useqaddr = &rwlock->rw_seq[3]; \
+ lcntaddr = &rwlock->rw_seq[1]; \
+ seqaddr = &rwlock->rw_seq[2]; \
+ ucntaddr = &rwlock->rw_seq[3]; \
} else { \
- lseqaddr = &rwlock->rw_seq[0]; \
- wcaddr = &rwlock->rw_seq[1]; \
- useqaddr = &rwlock->rw_seq[2]; \
+ lcntaddr = &rwlock->rw_seq[0]; \
+ seqaddr = &rwlock->rw_seq[1]; \
+ ucntaddr = &rwlock->rw_seq[2]; \
} \
}
#else /* __LP64__ */
-#define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
+#define RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr) \
{ \
if (rwlock->misalign != 0) { \
- lseqaddr = &rwlock->rw_seq[0]; \
- wcaddr = &rwlock->rw_seq[1]; \
- useqaddr = &rwlock->rw_seq[2]; \
- }else { \
- lseqaddr = &rwlock->rw_seq[1]; \
- wcaddr = &rwlock->rw_seq[2]; \
- useqaddr = &rwlock->rw_seq[3]; \
+ lcntaddr = &rwlock->rw_seq[1]; \
+ seqaddr = &rwlock->rw_seq[2]; \
+ ucntaddr = &rwlock->rw_seq[3]; \
+ } else { \
+ lcntaddr = &rwlock->rw_seq[0]; \
+ seqaddr = &rwlock->rw_seq[1]; \
+ ucntaddr = &rwlock->rw_seq[2]; \
} \
}
#endif /* __LP64__ */
-int _new_pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
-int _new_pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_longrdlock_np(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_downgrade_np(pthread_rwlock_t *rwlock);
-int _new_pthread_rwlock_upgrade_np(pthread_rwlock_t *rwlock);
+__private_extern__ int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
+
#define _KSYN_TRACE_ 0
#if _KSYN_TRACE_
+#include <sys/sysctl.h>
+#ifndef BUILDING_VARIANT /* [ */
+static void set_enable(int);
+#endif /* !BUILDING_VARIANT ] */
+
/* The Function qualifiers */
#define DBG_FUNC_START 1
#define DBG_FUNC_END 2
#define _KSYN_TRACE_RW_UNACT2 0x9008090
#define _KSYN_TRACE_RW_UNACTK 0x9008094
#define _KSYN_TRACE_RW_UNACTE 0x9008098
+#define _KSYN_TRACE_RW_UNACTR 0x900809c
+#define _KSYN_TRACE_RW_TOOMANY 0x90080a0
+#define _KSYN_TRACE_RW_TRYWRLOCK 0x90080a4
+#define _KSYN_TRACE_RW_TRYRDLOCK 0x90080a8
#endif /* _KSYN_TRACE_ */
-#endif /* __i386__ || __x86_64__ */
-
-#ifndef BUILDING_VARIANT /* [ */
-#if defined(__i386__) || defined(__x86_64__)
-static int rwlock_unlock_action_onread(pthread_rwlock_t * rwlock, uint32_t updateval);
-static int rwlock_unlock_action1(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
-static int rwlock_unlock_action2(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
-static uint32_t modbits(uint32_t lgenval, uint32_t updateval);
-static int rwlock_unlock_action_k(pthread_rwlock_t * rwlock, uint32_t lgenval, uint32_t updateval);
-static int rwlock_exclusive_lockreturn(pthread_rwlock_t * rwlock, uint32_t updateval);
-static int rw_diffgenseq(uint32_t x, uint32_t y);
-#endif /* __i386__ || __x86_64__ */
+__private_extern__ void rwlock_action_onreturn(pthread_rwlock_t * rwlock, uint32_t updateval);
+__private_extern__ int rw_diffgenseq(uint32_t x, uint32_t y);
+#ifndef BUILDING_VARIANT /* [ */
+static uint32_t modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits);
int
pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
}
-#if defined(__i386__) || defined(__x86_64__) /* [ */
-int
-_new_pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
+__private_extern__ int
+__pthread_rwlock_init(pthread_rwlock_t * orwlock, const pthread_rwlockattr_t *attr)
{
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
-#if __DARWIN_UNIX03
- uint32_t rw_lseqcnt, rw_useqcnt;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-#endif /* __DARWIN_UNIX03 */
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- return(EINVAL);
- } else {
-#if __DARWIN_UNIX03
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
- rw_lseqcnt = *lseqaddr;
- rw_useqcnt = *useqaddr;
-
- if((rw_lseqcnt & PTHRW_COUNT_MASK) != rw_useqcnt)
- return(EBUSY);
-
-#endif /* __DARWIN_UNIX03 */
- //bzero(rwlock, sizeof(npthread_rwlock_t));
- rwlock->sig = _PTHREAD_NO_SIG;
- return(0);
- }
-}
-
-int
-_new_pthread_rwlock_init(pthread_rwlock_t * orwlock, const pthread_rwlockattr_t *attr)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
-#if __DARWIN_UNIX03
- uint32_t rw_lseqcnt, rw_useqcnt;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-#endif /* __DARWIN_UNIX03 */
-
-#if __DARWIN_UNIX03
- if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
- return(EINVAL);
- }
-
- /* if already inited check whether it is in use, then return EBUSY */
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
- rw_lseqcnt = *lseqaddr;
- rw_useqcnt = *useqaddr;
-
- if ((rw_lseqcnt & PTHRW_COUNT_MASK) != rw_useqcnt)
- return(EBUSY);
-
- }
-#endif /* __DARWIN_UNIX03 */
-
- /* initialize the lock */
- bzero(rwlock, sizeof(pthread_rwlock_t));
-
if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
rwlock->pshared = PTHREAD_PROCESS_SHARED;
rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
-
} else {
rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
if (((uintptr_t)rwlock & 0x07) != 0) {
rwlock->misalign = 1;
#if defined(__LP64__)
- rwlock->rw_lseqaddr = &rwlock->rw_seq[1];
- rwlock->rw_wcaddr = &rwlock->rw_seq[2];
- rwlock->rw_useqaddr = &rwlock->rw_seq[3];
- rwlock->rw_seq[1]= PTHRW_RW_INIT;
+ rwlock->rw_lcntaddr = &rwlock->rw_seq[1];
+ rwlock->rw_seqaddr = &rwlock->rw_seq[2];
+ rwlock->rw_ucntaddr = &rwlock->rw_seq[3];
+ rwlock->rw_seq[1]= PTHRW_RWLOCK_INIT;
+ rwlock->rw_seq[2]= PTHRW_RWS_INIT;
+ rwlock->rw_seq[3]= 0;
#else /* __LP64__ */
- rwlock->rw_lseqaddr = &rwlock->rw_seq[0];
- rwlock->rw_wcaddr = &rwlock->rw_seq[1];
- rwlock->rw_useqaddr = &rwlock->rw_seq[2];
- rwlock->rw_seq[0]= PTHRW_RW_INIT;
+ rwlock->rw_lcntaddr = &rwlock->rw_seq[1];
+ rwlock->rw_seqaddr = &rwlock->rw_seq[2];
+ rwlock->rw_ucntaddr = &rwlock->rw_seq[3];
+ rwlock->rw_seq[1]= PTHRW_RWLOCK_INIT;
+ rwlock->rw_seq[2]= PTHRW_RWS_INIT;
+ rwlock->rw_seq[3]= 0;
#endif /* __LP64__ */
} else {
rwlock->misalign = 0;
#if defined(__LP64__)
- rwlock->rw_lseqaddr = &rwlock->rw_seq[0];
- rwlock->rw_wcaddr = &rwlock->rw_seq[1];
- rwlock->rw_useqaddr = &rwlock->rw_seq[2];
- rwlock->rw_seq[0]= PTHRW_RW_INIT;
+ rwlock->rw_lcntaddr = &rwlock->rw_seq[0];
+ rwlock->rw_seqaddr = &rwlock->rw_seq[1];
+ rwlock->rw_ucntaddr = &rwlock->rw_seq[2];
+ rwlock->rw_seq[0]= PTHRW_RWLOCK_INIT;
+ rwlock->rw_seq[1]= PTHRW_RWS_INIT;
+ rwlock->rw_seq[2]= 0;
#else /* __LP64__ */
- rwlock->rw_lseqaddr = &rwlock->rw_seq[1];
- rwlock->rw_wcaddr = &rwlock->rw_seq[2];
- rwlock->rw_useqaddr = &rwlock->rw_seq[3];
- rwlock->rw_seq[1]= PTHRW_RW_INIT;
+ rwlock->rw_lcntaddr = &rwlock->rw_seq[0];
+ rwlock->rw_seqaddr = &rwlock->rw_seq[1];
+ rwlock->rw_ucntaddr = &rwlock->rw_seq[2];
+ rwlock->rw_seq[0]= PTHRW_RWLOCK_INIT;
+ rwlock->rw_seq[1]= PTHRW_RWS_INIT;
+ rwlock->rw_seq[2]= 0;
#endif /* __LP64__ */
}
+
+ rwlock->reserv = 0;
+ rwlock->rw_owner = NULL;
+#if defined(__LP64__)
+ memset(rwlock->rfu, 0, PTHRW_RFU_64BIT);
+#else
+ memset(rwlock->rfu, 0, PTHRW_RFU_32BIT);
+#endif
+
rwlock->sig = _PTHREAD_RWLOCK_SIG;
return(0);
}
-int
-_new_pthread_rwlock_rdlock(pthread_rwlock_t * orwlock)
+#if _KSYN_TRACE_
+static void
+set_enable(int val)
{
-#if __DARWIN_UNIX03
- pthread_t self;
-#endif /* __DARWIN_UNIX03 */
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint32_t lgenval, ugenval, rw_wc, newval, updateval;
- int error = 0, ret;
- uint64_t oldval64, newval64;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ int mib[6];
+ size_t needed = 0;
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDENABLE;
+ mib[3] = val;
+ mib[4] = 0;
+ mib[5] = 0;
+ /* best effort to stop the trace */
+ (void)sysctl(mib, 4, NULL, &needed, NULL, 0);
+}
+#endif
+
+static uint32_t
+modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
+{
+ uint32_t lval = lgenval & PTHRW_BIT_MASK;
+ uint32_t uval = updateval & PTHRW_BIT_MASK;
+ uint32_t rval, nlval;
+
+ nlval = (lval | uval) & ~(PTH_RWL_MBIT);
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
- return(error);
- }
- } else {
- PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
- return(EINVAL);
+ /* reconcile bits on the lock with what kernel needs to set */
+ if ((uval & PTH_RWL_LBIT) != 0)
+ nlval &= ~PTH_RWL_KBIT;
+ else if (((uval & PTH_RWL_KBIT) == 0) && ((lval & PTH_RWL_WBIT) == 0))
+ nlval &= ~PTH_RWL_KBIT;
+
+ if (savebits !=0 ) {
+ if (((savebits & PTH_RWS_WSVBIT) != 0) && ((nlval & PTH_RWL_WBIT) == 0) &&
+ ((nlval & PTH_RWL_EBIT) == 0)) {
+ if ((nlval & PTH_RWL_LBIT) == 0)
+ nlval |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
+ else
+ nlval |= PTH_RWL_WBIT;
+ }
+ if (((savebits & PTH_RWS_YSVBIT) != 0) && ((nlval & PTH_RWL_YBIT) == 0) &&
+ ((nlval & PTH_RWL_EBIT) == 0)) {
+ nlval |= PTH_RWL_YBIT;
+ }
+ if (((savebits & PTH_RWS_USVBIT) != 0) && ((nlval & PTH_RWL_EBIT) == 0)) {
+ if ((nlval & PTH_RWL_LBIT) == 0)
+ nlval |= (PTH_RWL_UBIT | PTH_RWL_KBIT);
+ else
+ nlval |= PTH_RWL_UBIT;
}
}
+ rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
+ return(rval);
+}
+
+
+__private_extern__ void
+rwlock_action_onreturn(pthread_rwlock_t * orwlock, uint32_t updateval)
+{
+
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lcntval, rw_seq, newval = 0, newsval, lval, uval;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t oldval64, newval64;
+ int setbits = 0;
+ int overlap = 0;
+ uint32_t savebits = 0;
+ int isoverlap = 0;
+ /* TBD: restore U bit */
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-loop:
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lgenval, newval, rw_wc, 0);
-#endif
-
- if (is_rw_lbit_set(lgenval))
- goto gotlock;
- if(is_rw_ewubit_clear(lgenval))
- goto gotlock;
-
-#if __DARWIN_UNIX03
- if (is_rw_ebit_set(lgenval)) {
- self = pthread_self();
- if(rwlock->rw_owner == self) {
- error = EDEADLK;
- goto out;
- }
+ lcntaddr = rwlock->rw_lcntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
-#endif /* __DARWIN_UNIX03 */
-
- /* mean Lbit is set and R bit not set; block in kernel */
- newval = (lgenval + PTHRW_INC);
-
- oldval64 = (((uint64_t)rw_wc) << 32);
- oldval64 |= lgenval;
-
- newval64 = (((uint64_t)(rw_wc + 1)) << 32);
- newval64 |= newval;
-
- if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
- goto loop;
-
- /* give writers priority over readers */
- PLOCKSTAT_RW_BLOCK(orwlock, READ_LOCK_PLOCKSTAT);
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lgenval, newval, rw_wc+1, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_START, updateval, 0, 0, 0, 0);
#endif
-retry:
- updateval = __psynch_rw_rdlock(orwlock, (newval & ~PTHRW_RW_INIT), ugenval, rw_wc, rwlock->rw_flags);
-
- if (updateval == (uint32_t)-1) {
- error = errno;
- } else
- error = 0;
-
- if (error == EINTR)
- goto retry;
-
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
-
-
+ isoverlap = updateval & PTH_RWL_MBIT;
- if (error == 0) {
- if ((updateval & PTHRW_RW_HUNLOCK) != 0) {
- ret = rwlock_unlock_action_onread(orwlock, (updateval & ~PTHRW_RW_HUNLOCK));
- if (ret != 0) {
- LIBC_ABORT("rdlock_unlock handling failed");
- }
+loop:
+ setbits = 0;
+ lcntval = *lcntaddr;
+ rw_seq = *seqaddr;
+ savebits = 0;
+
+ if (isoverlap != 0) {
+ /* overlap return, just increment and inspect bits */
+ setbits = 1;
+ overlap = 1;
+ /* set s word, increment by specified value */
+ newsval = rw_seq + (updateval & PTHRW_COUNT_MASK);
+ if ((newsval & PTHRW_RWS_SAVEMASK) != 0) {
+ savebits = newsval & PTHRW_RWS_SAVEMASK;
+ newsval &= ~PTHRW_RWS_SAVEMASK;
}
- PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
- PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
- return(0);
} else {
- PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
- goto out;
- }
- /* Not reached */
-
-gotlock:
- /* check for max readers */
- ugenval = *useqaddr;
- if (rw_diffgenseq(lgenval, ugenval) >= PTHRW_MAX_READERS) {
- error = EAGAIN;
- goto out;
+ /* normal return */
+ if (is_rws_setunlockinit(rw_seq) != 0) {
+ setbits = 1;
+ /* set s word to passed in value */
+ newsval = (rw_seq & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
+ if ((rw_seq & PTHRW_RWS_SAVEMASK) != 0) {
+ savebits = rw_seq & PTHRW_RWS_SAVEMASK;
+ newsval &= ~PTHRW_RWS_SAVEMASK;
+ }
+ } else {
+ newval = lcntval;
+ newsval = rw_seq;
+ }
}
-
- newval = (lgenval + PTHRW_INC);
+ if (setbits != 0) {
+ newval = modbits(lcntval, updateval, savebits);
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, newval, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, rw_seq, newsval, 0xeeeeeeee, updateval, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lcntval, newval, 0xeeeeeeee, updateval, 0);
#endif
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
- PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, 0, 0, 0);
-#endif
- return(0);
- } else
- goto loop;
-out:
- PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
+ /* Check for consistency */
+ lval = lcntval & PTHRW_BIT_MASK;
+ uval = updateval & PTHRW_BIT_MASK;
+ }
+
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_END, rw_seq, newsval, 0xffffffff, 0, 0);
#endif
- return(error);
+ return;
}
-
-int
-_new_pthread_rwlock_tryrdlock(pthread_rwlock_t * orwlock)
+/* returns are not bit shifted */
+__private_extern__ int
+rw_diffgenseq(uint32_t x, uint32_t y)
{
- uint32_t lgenval, newval, ugenval;
- int error = 0;
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- /* check for static initialization */
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
- return(error);
- }
- } else {
- PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
- return(EINVAL);
- }
- }
-
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ uint32_t lx = (x & PTHRW_COUNT_MASK);
+ uint32_t ly = (y &PTHRW_COUNT_MASK);
+
+ if (lx > ly) {
+ return(lx-ly);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ return((PTHRW_MAX_READERS - y) + lx + PTHRW_INC);
}
-loop:
- lgenval = *lseqaddr;
- if (is_rw_lbit_set(lgenval))
- goto gotlock;
- if (is_rw_ewubit_clear(lgenval))
- goto gotlock;
-
-
- error = EBUSY;
- goto out;
-
-gotlock:
- ugenval = *useqaddr;
- if (rw_diffgenseq(lgenval, ugenval) >= PTHRW_MAX_READERS) {
- error = EAGAIN;
- goto out;
- }
-
- newval = (lgenval + PTHRW_INC);
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
- PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
- return(0);
- } else
- goto loop;
-out:
- PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
- return(error);
}
#ifdef NOTYET
-/*****************************************************************************/
-/* TBD need to add towards MAX_READERS */
+/********************************************************** */
+static int pthread_rwlock_upgrade_internal(pthread_rwlock_t * orwlock, int trylock);
+
int
-_new_pthread_rwlock_longrdlock_np(pthread_rwlock_t * orwlock)
+pthread_rwlock_longrdlock_np(pthread_rwlock_t * orwlock)
{
pthread_t self;
- uint32_t lgenval, ugenval, rw_wc, newval, updateval;
- int error = 0, ret;
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
+ int error = 0, retry_count = 0;
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
uint64_t oldval64, newval64;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t myid = 0;
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ LOCK(rwlock->lock);
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
return(error);
}
- } else {
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+ UNLOCK(rwlock->lock);
}
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
loop:
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
-
- if (is_rw_ewuybit_clear(lgenval))
+ if (can_rwl_longreadinuser(lcntval))
goto gotlock;
-
- /* if w bit is set ensure there is no deadlock */
- if (is_rw_ebit_set(lgenval)) {
+
+#if __DARWIN_UNIX03
+ if (is_rwl_ebit_set(lcntval)) {
self = pthread_self();
if(rwlock->rw_owner == self) {
error = EDEADLK;
goto out;
}
}
+#endif /* __DARWIN_UNIX03 */
- newval = (lgenval + PTHRW_INC);
+ /* need to block in kernel */
+ newval = (lcntval + PTHRW_INC);
+
+ newsval = rw_seq;
+ if (is_rws_setseq(rw_seq)) {
+ newsval &= PTHRW_SW_Reset_BIT_MASK;
+ newsval |= (newval & PTHRW_COUNT_MASK);
+ }
+
/* update lock seq and block in kernel */
- oldval64 = (((uint64_t)rw_wc) << 32);
- oldval64 |= lgenval;
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
- newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 = (((uint64_t)(newsval)) << 32);
newval64 |= newval;
- if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
goto loop;
kblock:
- updateval = __psynch_rw_longrdlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
+ updateval = __psynch_rw_longrdlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
if (updateval == (uint32_t)-1) {
error = errno;
} else
if (error == EINTR)
goto kblock;
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
if (error == 0) {
-
- if ((updateval & PTHRW_RW_HUNLOCK) != 0) {
- ret = rwlock_unlock_action_onread(orwlock, (updateval & ~PTHRW_RW_HUNLOCK));
- if (ret != 0) {
- LIBC_ABORT("rdlock_unlock handling failed");
- }
- }
-
- error = FALSE;
- while (error == FALSE) {
- lgenval = *lseqaddr;
- newval = lgenval | PTHRW_LBIT;
- error = OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr);
+ rwlock_action_onreturn(orwlock, updateval);
+ if ( is_rwl_lbit_clear(updateval)) {
+#if _KSYN_TRACE_
+ set_enable(2);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("yieldwrlock from kernel without EBit %x: tid %x\n", updateval, (uint32_t)myid);
+ /* kernel cannot wakeup without granting E bit */
}
-
goto successout;
- } else
+ } else {
+#if _KSYN_TRACE_
+ set_enable(2);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("yieldwrlock from kernel with unknown error %x: tid %x\n", updateval, (uint32_t)myid);
goto out;
- goto successout;
+ }
gotlock:
- newval = ((lgenval + PTHRW_INC)| PTHRW_LBIT);
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
- goto loop;
+ if (rw_diffgenseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
+ /* since ucntval may be newer, just redo */
+ retry_count++;
+ if (retry_count > 1024) {
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY | DBG_FUNC_NONE, (uint32_t)rwlock, 0XEEEEEEEE, lcntval, ucntval, 0);
+#endif
+ error = EAGAIN;
+ goto out;
+ } else {
+ sched_yield();
+ goto loop;
+ }
+ }
+
+ /* Need to update L and S word */
+ newval = (lcntval + PTHRW_INC) | PTH_RWL_LBIT;
+ newsval = (rw_seq + PTHRW_INC);
+
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
+
successout:
PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
return(0);
PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
return(error);
}
-/**************************************************************/
-#endif /* NOTYET */
int
-_new_pthread_rwlock_trywrlock(pthread_rwlock_t * orwlock)
+pthread_rwlock_yieldwrlock_np(pthread_rwlock_t * orwlock)
{
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
int error = 0;
- uint32_t lgenval, newval;
#if __DARWIN_UNIX03
pthread_t self = pthread_self();
#endif /* __DARWIN_UNIX03 */
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t myid = 0;
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- /* check for static initialization */
+ LOCK(rwlock->lock);
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
return(error);
}
- } else {
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+ UNLOCK(rwlock->lock);
}
-
+
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
- lgenval = PTHRW_RW_INIT;
- newval = PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT;
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
-#if __DARWIN_UNIX03
- rwlock->rw_owner = self;
-#endif /* __DARWIN_UNIX03 */
- PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
- return(0);
- }
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EBUSY);
- return(EBUSY);
-}
+loop:
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
-int
-_new_pthread_rwlock_wrlock(pthread_rwlock_t * orwlock)
-{
- uint32_t lgenval, newval, ugenval, updateval, rw_wc;
- int error = 0;
-#if __DARWIN_UNIX03
- pthread_t self = pthread_self();
-#endif /* __DARWIN_UNIX03 */
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint64_t oldval64, newval64;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- /* check for static initialization */
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
- return(error);
- }
- } else {
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
- return(EINVAL);
- }
- }
-
-
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
-#endif
-loop:
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lgenval, ugenval, rw_wc, 0);
-#endif
#if __DARWIN_UNIX03
- if (is_rw_ebit_set(lgenval)) {
- if(rwlock->rw_owner == self) {
+ if (is_rwl_ebit_set(lcntval)) {
+ if (rwlock->rw_owner == self) {
error = EDEADLK;
goto out;
}
}
#endif /* __DARWIN_UNIX03 */
- if (lgenval == PTHRW_RW_INIT) {
- newval = ( PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT);
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ if (lcntval == PTHRW_RWL_INIT) {
+ /* if we can acquire set L and S word */
+ lcntval = PTHRW_RWL_INIT;
+ newval = PTHRW_RWL_INIT | PTHRW_INC | PTH_RWL_KBIT| PTH_RWL_EBIT;
+ newsval = rw_seq + PTHRW_INC;
+
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) == TRUE) {
goto gotit;
- }
+ } else
+ goto loop;
}
- newval = (lgenval + PTHRW_INC) | PTHRW_WBIT | PTHRW_SHADOW_W;
+ newval = (lcntval + PTHRW_INC)| PTH_RWL_YBIT;
+
+ newsval = rw_seq;
+ if (is_rws_setseq(rw_seq)) {
+ newsval &= PTHRW_SW_Reset_BIT_MASK;
+ newsval |= (newval & PTHRW_COUNT_MASK);
+ }
- /* update lock seq and block in kernel */
- oldval64 = (((uint64_t)rw_wc) << 32);
- oldval64 |= lgenval;
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
- newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 = (((uint64_t)(newsval)) << 32);
newval64 |= newval;
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, newval, 0);
-#endif
- if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
goto loop;
-
-retry:
+
PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
-retry1:
- updateval = __psynch_rw_wrlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
+retry:
+ updateval = __psynch_rw_yieldwrlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
if (updateval == (uint32_t)-1) {
error = errno;
} else
error = 0;
- if (error == EINTR) {
- goto retry1;
- }
+ if (error == EINTR)
+ goto retry;
+
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x33333333, newval, updateval, 0);
-#endif
PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
if (error != 0) {
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
- goto out;
- }
-
- if (is_rw_ebit_clear(updateval)) {
- /* kernel cannot wakeup without granting E bit */
- abort();
+#if _KSYN_TRACE_
+ set_enable(2);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("yieldwrlock from kernel with unknown error %x: tid %x\n", updateval, (uint32_t)myid);
}
- error = rwlock_exclusive_lockreturn(orwlock, updateval);
- if (error == EAGAIN)
- goto retry;
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
+out:
if (error == 0) {
gotit:
+ rwlock_action_onreturn(orwlock, updateval);
+ if ( is_rwl_ebit_clear(updateval)) {
+#if _KSYN_TRACE_
+ set_enable(2);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("yieldwrlock from kernel without EBit %x: tid %x\n", updateval, (uint32_t)myid);
+ }
#if __DARWIN_UNIX03
rwlock->rw_owner = self;
#endif /* __DARWIN_UNIX03 */
PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
-#endif
return(0);
- }
-out:
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
-#endif
- return(error);
+ } else {
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
}
-
-
-#ifdef NOTYET
-/*****************************************************************************/
int
-_new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t * orwlock)
+pthread_rwlock_downgrade_np(pthread_rwlock_t * orwlock)
{
- uint32_t lgenval, newval, ugenval, updateval, rw_wc;
- int error = 0;
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
+ int error = 0, haswbit = 0, hasubit = 0, hasybit = 0;
#if __DARWIN_UNIX03
pthread_t self = pthread_self();
#endif /* __DARWIN_UNIX03 */
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
uint64_t oldval64, newval64;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t myid = 0;
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- /* check for static initialization */
+ LOCK(rwlock->lock);
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
return(error);
}
- } else {
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+ UNLOCK(rwlock->lock);
}
-
-
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
+ }
+
+loop:
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
+
+
+ /* if not holding exclusive lock, return */
+ if ((is_rwl_ebit_set(lcntval )== 0) || (rwlock->rw_owner != self)) {
+ return(EINVAL);
}
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
+ /* no other waiters and be granted in user space? ? */
+ if ((lcntval & PTHRW_COUNT_MASK) == (ucntval + PTHRW_INC)) {
+#if 0
+ /* should have no write waiters pending */
+ if (is_rwl_wbit_set(lcntval) != 0) {
+#if _KSYN_TRACE_
+ set_enable(2);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("downgrade in user mode but W bit set %x: tid %x\n", lcntval, (uint32_t)myid);
+ }
+#endif
+ /* preserve count and remove ke bits */
+ newval = lcntval & ~(PTH_RWL_EBIT | PTH_RWL_KBIT);
+ /* if we can acquire set L and S word */
+ newsval = rw_seq;
+
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) == TRUE) {
#if __DARWIN_UNIX03
- if (is_rw_ebit_set(lgenval)) {
- if (rwlock->rw_owner == self) {
- error = EDEADLK;
- goto out;
+ rwlock->rw_owner = (pthread_t)0;
+#endif /* __DARWIN_UNIX03 */
+ return(0);
+ } else
+ goto loop;
+ } else {
+
+ haswbit = lcntval & PTH_RWL_WBIT;
+ hasubit = lcntval & PTH_RWL_UBIT;
+ hasybit = lcntval & PTH_RWL_YBIT;
+
+ /* reset all bits and set k */
+ newval = (lcntval & PTHRW_COUNT_MASK) | PTH_RWL_KBIT;
+ /* set I bit on S word */
+ newsval = rw_seq | PTH_RWS_IBIT;
+ if (haswbit != 0)
+ newsval |= PTH_RWS_WSVBIT;
+ if (hasubit != 0)
+ newsval |= PTH_RWS_USVBIT;
+ if (hasybit != 0)
+ newsval |= PTH_RWS_YSVBIT;
+
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
+
+#if __DARWIN_UNIX03
+ rwlock->rw_owner = 0;
+#endif /* __DARWIN_UNIX03 */
+
+retry:
+ updateval = __psynch_rw_downgrade(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ /* TBD: what to do with the error, EINTR ?? */
+ if (error == EINTR)
+ goto retry;
+
+ if (error == 0) {
+ rwlock_action_onreturn(orwlock, updateval);
+ return(0);
+ } else {
+#if _KSYN_TRACE_
+ set_enable(1);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("downgrade from kernel with unknown error %x with tid %x\n", updateval, (uint32_t)myid);
}
+ /* Not reached */
}
+ return(EINVAL);
+}
+
+int
+pthread_rwlock_upgrade_np(pthread_rwlock_t * orwlock)
+{
+ return(pthread_rwlock_upgrade_internal(orwlock, 0));
+}
+
+int
+pthread_rwlock_tryupgrade_np(pthread_rwlock_t *orwlock)
+{
+ return(pthread_rwlock_upgrade_internal(orwlock, 1));
+}
+
+static int
+pthread_rwlock_upgrade_internal(pthread_rwlock_t * orwlock, int trylock)
+{
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
+ int error = 0, flags ;
+#if __DARWIN_UNIX03
+ pthread_t self = pthread_self();
#endif /* __DARWIN_UNIX03 */
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t myid = 0;
- if (lgenval == PTHRW_RW_INIT) {
- newval = PTHRW_RW_INIT | PTHRW_INC | PTHRW_EBIT;
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
- goto gotit;
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ /* check for static initialization */
+ LOCK(rwlock->lock);
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ return(error);
+ }
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ return(EINVAL);
}
+ UNLOCK(rwlock->lock);
+ }
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
+ } else {
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
+ }
+
+loop:
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
+
+ if (is_rwl_eubit_set(lcntval) !=0) {
+ return(EBUSY);
}
- newval = (lgenval + PTHRW_INC);
- if ((lgenval & PTHRW_WBIT) == 0)
- newval |= PTHRW_YBIT;
+ /* set U and K bit and go to kernel */
+ newval = (lcntval | (PTH_RWL_UBIT | PTH_RWL_KBIT));
+ newsval = rw_seq;
+#if 0
+ if (is_rws_setseq(rw_seq)) {
+ newsval &= PTHRW_SW_Reset_BIT_MASK;
+ newsval |= (newval & PTHRW_COUNT_MASK);
+ }
+#endif
+
+ /* update lock seq and block in kernel */
- oldval64 = (((uint64_t)rw_wc) << 32);
- oldval64 |= lgenval;
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
- newval64 = (((uint64_t)(rw_wc + 1)) << 32);
+ newval64 = (((uint64_t)(newsval)) << 32);
newval64 |= newval;
- if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
- PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
+ flags = rwlock->rw_flags;
+ if (trylock != 0) {
+ flags |= _PTHREAD_RWLOCK_UPGRADE_TRY;
+ }
retry:
- updateval = __psynch_rw_yieldwrlock(orwlock, newval, ugenval, (rw_wc+1), rwlock->rw_flags);
+ updateval = __psynch_rw_upgrade(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
if (updateval == (uint32_t)-1) {
error = errno;
} else
error = 0;
-
+
if (error == EINTR)
goto retry;
-
-
- PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
- if (error != 0) {
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
- goto out;
- }
-
- if (is_rw_ebit_clear(updateval)) {
- /* kernel cannot wakeup without granting E bit */
- abort();
- }
-
- error = rwlock_exclusive_lockreturn(orwlock, updateval);
- if (error == EAGAIN)
- goto retry;
-
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
+
+
if (error == 0) {
- gotit:
+ rwlock_action_onreturn(orwlock, updateval);
+ if ( is_rwl_ebit_clear(updateval)) {
+#if _KSYN_TRACE_
+ set_enable(2);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("upgrade from kernel without EBit %x: tid %x\n", updateval, (uint32_t)myid);
+ }
#if __DARWIN_UNIX03
rwlock->rw_owner = self;
#endif /* __DARWIN_UNIX03 */
- PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
return(0);
} else {
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ if (trylock != 0) {
+ return (EBUSY);
+ }
}
- return(error);
-out:
- PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
- return(error);
+
+ return(error);
}
-/**************************************************************/
-#endif /* NOTYET */
-int
-_new_pthread_rwlock_unlock(pthread_rwlock_t * orwlock)
+/* Returns true if the rwlock is held for reading by any thread or held for writing by the current thread */
+int
+pthread_rwlock_held_np(pthread_rwlock_t * orwlock)
{
- uint32_t lgenval, ugenval, rw_wc, newval, nlval, ulval;
+ uint32_t lcntval, ucntval, rw_seq;
int error = 0;
- int wrlock = 0, kern_trans;
- uint32_t updateval, bits, newbits;
- uint32_t isupgrade = 0;
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- int retry_count = 0, retry_count1 = 0;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
- pthread_t self = NULL;
- uint64_t threadid = 0;
- int ubitchanged = 0, initbitset = 0, num;
-
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- /* check for static initialization */
+ LOCK(rwlock->lock);
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
- return(error);
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ return(0);
}
- } else {
- PLOCKSTAT_RW_ERROR(orwlock, wrlock, EINVAL);
- return(EINVAL);
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ return(-1);
}
+ UNLOCK(rwlock->lock);
}
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
+
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
-#endif
-loop:
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
+ if ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))
+ return(0);
+ return(1);
+}
-loop1:
- if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK)) {
- retry_count++;
- sched_yield();
- if (retry_count < 1024)
- goto loop;
- error = EINVAL;
- goto out;
- }
- retry_count = 0;
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lgenval, ugenval, 0);
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, rw_wc, 0, 0);
-#endif
- if (is_rw_ebit_set(lgenval)) {
- wrlock = 1;
-#if __DARWIN_UNIX03
- rwlock->rw_owner = (pthread_t)0;
-#endif /* __DARWIN_UNIX03 */
- }
+/* Returns true if the rwlock is held for reading by any thread */
+int
+pthread_rwlock_rdheld_np(pthread_rwlock_t * orwlock)
+{
+ uint32_t lcntval, ucntval, rw_seq;
+ int error = 0;
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
- /* last unlock ? */
- if((lgenval & PTHRW_COUNT_MASK) == (ugenval + PTHRW_INC)) {
- if (OSAtomicCompareAndSwap32(ugenval, 0, (volatile int32_t *)useqaddr) != TRUE) {
- goto loop;
- }
- if (OSAtomicCompareAndSwap32(lgenval, PTHRW_RW_INIT, (volatile int32_t *)lseqaddr) != TRUE) {
- if (OSAtomicCompareAndSwap32(0, ugenval, (volatile int32_t *)useqaddr) != TRUE) {
-lp1:
- ulval = *useqaddr;
- nlval = ugenval+ulval;
- if (OSAtomicCompareAndSwap32(ulval, nlval, (volatile int32_t *)useqaddr) != TRUE)
- goto lp1;
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ LOCK(rwlock->lock);
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ return(0);
}
-
- goto loop;
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ return(-1);
}
-
- goto succout;
- }
-
- /* do we need kernel trans? */
-
-lp11:
- nlval = lgenval & PTHRW_COUNT_MASK;
- if (ubitchanged == 0)
- ulval = (ugenval + PTHRW_INC) & PTHRW_COUNT_MASK;
- else
- ulval = ugenval & PTHRW_COUNT_MASK;
-
- num = rw_diffgenseq(nlval, ulval);
- kern_trans = ( num == (rw_wc << PTHRW_COUNT_SHIFT));
- /* if three more waiters than needed for kernel tras*/
- if ((ubitchanged ==0) && (kern_trans == 0) && (num < (rw_wc << PTHRW_COUNT_SHIFT))) {
- retry_count1++;
- sched_yield();
- if (retry_count1 < 1024)
- goto loop;
- }
- retry_count1 = 0;
-
- if (ubitchanged == 0) {
- if (OSAtomicCompareAndSwap32(ugenval, ugenval+PTHRW_INC, (volatile int32_t *)useqaddr) != TRUE)
- goto loop;
- ubitchanged = 1;
+ UNLOCK(rwlock->lock);
}
- if (kern_trans == 0) {
- goto succout;
- }
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 1, ugenval+PTHRW_INC, 0);
-#endif
- initbitset = 0;
- bits = lgenval & PTHRW_BIT_MASK;
- newbits = bits;
- /* if this is first unlock to kernel, notify kernel of init status */
- if ((bits & PTHRW_RW_INIT) != 0) {
- /* reset the initbit if present */
- newbits &= ~PTHRW_RW_INIT;
- initbitset = PTHRW_RW_INIT;
- }
- if (((bits & PTHRW_EBIT) != 0) && ((bits & PTHRW_WBIT) == 0)) {
- /* reset E bit is no U bit is set */
- newbits &= ~PTHRW_EBIT;
- }
- /* clear shadow bit, as W is going to be sent to kernel */
- if ((bits & PTHRW_WBIT) != 0) {
- newbits &= ~PTHRW_SHADOW_W;
- }
-
- /* reset L bit */
- if (bits & PTHRW_LBIT)
- newbits &= ~PTHRW_LBIT;
- if (bits & PTHRW_UBIT) {
- /* reset U and set E bit */
- newbits &= ~PTHRW_LBIT;
- newbits |= PTHRW_EBIT;
- isupgrade = PTHRW_UBIT;
- }
-
- /* updates bits on the L */
- newval = (lgenval & PTHRW_COUNT_MASK) | newbits;
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE) {
- /* reread the value */
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
- /* since lgen changed check for trans again */
- goto lp11;
- }
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 2, newval, 0);
-#endif
-
- /* send upgrade bit to kernel */
- newval |= (isupgrade | initbitset);
- updateval = __psynch_rw_unlock(orwlock, newval, ugenval+PTHRW_INC, rw_wc, rwlock->rw_flags);
- if (updateval == (uint32_t)-1) {
- error = errno;
- } else
- error = 0;
-
- if(error != 0) {
- /* not sure what is the scenario */
- if(error != EINTR)
- goto out;
- }
-
- /*
- * If the unlock is spurious return. Also if the
- * exclusive lock is being granted, let that thread
- * manage the status bits, otherwise stale bits exclusive
- * bit can be set, if that thread had already unlocked.
- */
- if ((updateval & (PTHRW_RW_SPURIOUS | PTHRW_EBIT)) != 0) {
- goto succout;
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
+ } else {
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
-lp2:
- lgenval = *lseqaddr;
-
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 3, lgenval, 0);
-#endif
- /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
- if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
- if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
- goto lp2;
- goto succout;
- }
-
- /* state bits are same? */
- if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
- /* nothing to do */
- goto succout;
+ if ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))
+ return(0);
+
+ if (is_rwl_ebit_set(lcntval) !=0) {
+ return(0);
}
-
- newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 4, newval, 0);
-#endif
- /* high bits are state on the lock; lowbits are one kernel need to set */
- switch (newval) {
- /* W States */
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
-
-
- /* L states */
- case ((PTHRW_LBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* Y states */
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
-
- /* YU states */
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
-
- /* E states */
- case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* WE states */
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* WL states */
- case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- default:
- /* illegal states */
- self = pthread_self();
- threadid = self->thread_id;
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
-#endif
- LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
-
- };
-
- if (error != 0)
- goto lp2;
-succout:
- PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
-#endif
- return(0);
-out:
- PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
-#endif
- return(error);
+ return(1);
}
-#ifdef NOTYET
-/*****************************************************************************/
-int
-_new_pthread_rwlock_downgrade_np(pthread_rwlock_t * orwlock)
+/* Returns true if the rwlock is held for writing by the current thread */
+int
+pthread_rwlock_wrheld_np(pthread_rwlock_t * orwlock)
{
- uint32_t lgenval, newval, ugenval, rw_wc;
- int error = 0;
+ uint32_t lcntval, ucntval, rw_seq;
pthread_t self = pthread_self();
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ int error = 0;
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- /* check for static initialization */
+ LOCK(rwlock->lock);
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- return(error);
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ return(0);
}
- } else {
- return(EINVAL);
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ return(-1);
}
+ UNLOCK(rwlock->lock);
}
+
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
-
-loop:
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
-
- if ((is_rw_ebit_set(lgenval )) && (rwlock->rw_owner != self)) {
- return(EINVAL);
+
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
+
+ if ((is_rwl_ebit_set(lcntval)) && (rwlock->rw_owner == self)) {
+ return(1);
}
-
- if ((lgenval & PTHRW_COUNT_MASK) != ugenval) {
-
- newval = lgenval & ~PTHRW_EBIT;
-
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
+ return(0);
+}
+/******************************************************/
+#endif /* NOTYET */
+
+
+#endif /* !BUILDING_VARIANT ] */
+
+int
+pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
+{
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
#if __DARWIN_UNIX03
- rwlock->rw_owner = 0;
+ uint32_t rw_lcnt, rw_ucnt;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
#endif /* __DARWIN_UNIX03 */
- if (rw_wc != 0) {
- error = __psynch_rw_downgrade(orwlock, newval, ugenval, rw_wc, rwlock->rw_flags);
-
- }
- return(0);
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG && rwlock->sig != _PTHREAD_RWLOCK_SIG_init)
+ return(EINVAL);
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
+#if __DARWIN_UNIX03
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- goto loop;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
- }
- return(EINVAL);
+
+ rw_lcnt = *lcntaddr;
+ rw_ucnt = *ucntaddr;
+
+ if((rw_lcnt & PTHRW_COUNT_MASK) != rw_ucnt)
+ return(EBUSY);
+
+#endif /* __DARWIN_UNIX03 */
+ //bzero(rwlock, sizeof(npthread_rwlock_t));
+ rwlock->sig = _PTHREAD_NO_SIG;
+ return(0);
+ } else if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ rwlock->sig = _PTHREAD_NO_SIG;
+ return(0);
+ } else
+ return(EINVAL);
}
int
-_new_pthread_rwlock_upgrade_np(pthread_rwlock_t * orwlock)
+pthread_rwlock_init(pthread_rwlock_t * orwlock, const pthread_rwlockattr_t *attr)
{
- uint32_t lgenval, newval, ugenval, ulval, updateval, rw_wc;
- int error = 0, kern_trans;
- pthread_t self = pthread_self();
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint64_t oldval64, newval64;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- /* check for static initialization */
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- return(error);
- }
- } else {
- return(EINVAL);
- }
- }
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-loop:
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
+#if __DARWIN_UNIX03
+ uint32_t rw_lcnt, rw_ucnt;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+#endif /* __DARWIN_UNIX03 */
- if (is_rw_uebit_set(lgenval)) {
+#if __DARWIN_UNIX03
+ if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
return(EINVAL);
-
}
- if ((lgenval & PTHRW_COUNT_MASK) == ugenval)
- return(EINVAL);
-
- if (lgenval > ugenval)
- ulval = (lgenval & PTHRW_COUNT_MASK) - (ugenval & PTHRW_COUNT_MASK);
- else
- ulval = (ugenval & PTHRW_COUNT_MASK) - (lgenval & PTHRW_COUNT_MASK);
-
-
- newval = lgenval | PTHRW_UBIT;
-
- kern_trans = 1;
- if (rw_wc != 0) {
- if (ulval == ((rw_wc - 1) << PTHRW_COUNT_SHIFT))
- kern_trans = 0;
- } else if (ulval == 1)
- kern_trans = 0;
-
- if (kern_trans == 0) {
- newval = ((lgenval | PTHRW_EBIT) & ~PTHRW_LBIT);
- } else {
- newval = lgenval | PTHRW_UBIT;
- }
- if (kern_trans == 0) {
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
- goto loop;
-
- } else {
- newval = (lgenval + PTHRW_INC);
-
- oldval64 = (((uint64_t)rw_wc) << 32);
- oldval64 |= lgenval;
-
- newval64 = (((uint64_t)(rw_wc + 1)) << 32);
- newval64 |= newval;
-
- if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lseqaddr) != TRUE)
- goto loop;
- /* kern_trans == 1 */
- retry:
- updateval = __psynch_rw_upgrade(orwlock, newval, ugenval, rw_wc+1, rwlock->rw_flags);
- if (updateval == (uint32_t)-1) {
- error = errno;
- } else
- error = 0;
-
- if (error == EINTR)
- goto retry;
-
- if (error != 0) {
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
- goto out;
- }
-
- if (is_rw_ebit_set(updateval)) {
- /* kernel cannot wakeup without granting E bit */
- abort();
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
+ } else {
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
- error = rwlock_exclusive_lockreturn(orwlock, updateval);
- if (error == EAGAIN)
- goto retry;
+ rw_lcnt = *lcntaddr;
+ rw_ucnt = *ucntaddr;
- OSAtomicDecrement32((volatile int32_t *)wcaddr);
+ if ((rw_lcnt & PTHRW_COUNT_MASK) != rw_ucnt)
+ return(EBUSY);
}
- if (error == 0) {
- rwlock->rw_owner = self;
- PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
- return(0);
- }
+#endif
+ LOCK_INIT(rwlock->lock);
+ return(__pthread_rwlock_init(orwlock, attr));
-out:
- return(error);
}
int
-pthread_rwlock_tryupgrade_np(pthread_rwlock_t *orwlock)
+pthread_rwlock_rdlock(pthread_rwlock_t * orwlock)
{
- pthread_t self = pthread_self();
- uint32_t lgenval, newval, ugenval, ulval, rw_wc;
- int error = 0, kern_trans;
+#if __DARWIN_UNIX03
+ pthread_t self;
+#endif /* __DARWIN_UNIX03 */
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
+ int error = 0, retry_count = 0;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t myid = 0;
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ LOCK(rwlock->lock);
if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
return(error);
}
- } else {
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
return(EINVAL);
}
+ UNLOCK(rwlock->lock);
}
+
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
+#endif
loop:
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
- rw_wc = *wcaddr;
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lcntval, (ucntval | 0xee), rw_seq, 0);
+#endif
- if (is_rw_uebit_set(lgenval)) {
- return(EBUSY);
+ /* if l bit is on or u and k bit is clear, acquire lock in userland */
+ if (can_rwl_readinuser(lcntval))
+ goto gotlock;
+
+#if __DARWIN_UNIX03
+ if (is_rwl_ebit_set(lcntval)) {
+ self = pthread_self();
+ if(rwlock->rw_owner == self) {
+ error = EDEADLK;
+ goto out;
+ }
}
+#endif /* __DARWIN_UNIX03 */
- if ((lgenval & PTHRW_COUNT_MASK) == ugenval)
- return(EINVAL);
+
+ /* Need to block in kernel , remove Rbit */
+ newval = (lcntval + PTHRW_INC) & PTH_RWLOCK_RESET_RBIT;
- if (lgenval > ugenval)
- ulval = (lgenval & PTHRW_COUNT_MASK) - (ugenval & PTHRW_COUNT_MASK);
- else
- ulval = (ugenval & PTHRW_COUNT_MASK) - (lgenval & PTHRW_COUNT_MASK);
+ newsval = rw_seq;
+ if (is_rws_setseq(rw_seq)) {
+ newsval &= PTHRW_SW_Reset_BIT_MASK;
+ newsval |= (newval & PTHRW_COUNT_MASK);
+ }
+
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
- newval = lgenval | PTHRW_UBIT;
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
+
+ /* give writers priority over readers */
+ PLOCKSTAT_RW_BLOCK(orwlock, READ_LOCK_PLOCKSTAT);
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lcntval, newval, newsval, 0);
+#endif
+
+retry:
+ updateval = __psynch_rw_rdlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
- kern_trans = 1;
- if (rw_wc != 0) {
- /* there is only one reader thread */
- if (ulval == (rw_wc - 1))
- kern_trans = 0;
- } else if (ulval == 1)
- kern_trans = 0;
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
- if (kern_trans == 0) {
- newval = (lgenval | PTHRW_EBIT) & ~PTHRW_LBIT;
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
- goto loop;
-
- rwlock->rw_owner = self;
- PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
- return(0);
- }
- return(EBUSY);
-}
+ if (error == EINTR)
+ goto retry;
-/* Returns true if the rwlock is held for reading by any thread or held for writing by the current thread */
-int
-pthread_rwlock_held_np(pthread_rwlock_t * orwlock)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint32_t lgenval, ugenval;
- int error = 0;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- return(0);
- }
- } else {
- return(-1);
- }
- }
-
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
- lgenval = *lseqaddr;
- ugenval = *useqaddr;
-
- if ((lgenval & PTHRW_COUNT_MASK) == (ugenval & PTHRW_COUNT_MASK))
+ if (error == 0) {
+ rwlock_action_onreturn(orwlock, updateval);
+ PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
+ PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
return(0);
-
- return(1);
-}
-
-/* Returns true if the rwlock is held for reading by any thread */
-int
-pthread_rwlock_rdheld_np(pthread_rwlock_t * orwlock)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint32_t lgenval;
- int error = 0;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- return(0);
- }
- } else {
- return(-1);
- }
- }
-
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
- lgenval = *lseqaddr;
-
- if (is_rw_ebit_set(lgenval)) {
- return(0);
+ PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
+#if _KSYN_TRACE_
+ set_enable(1);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("rdlock from kernel with unknown error %x with tid %x\n", updateval, (uint32_t)myid);
+ goto out;
}
- return(0);
-}
-
-/* Returns true if the rwlock is held for writing by the current thread */
-int
-pthread_rwlock_wrheld_np(pthread_rwlock_t * orwlock)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- pthread_t self;
- uint32_t lgenval;
- int error = 0;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ /* Not reached */
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((error = pthread_rwlock_init(orwlock, NULL)) != 0) {
- return(0);
- }
+gotlock:
+ if (rw_diffgenseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
+ /* since ucntval may be newer, just redo */
+ retry_count++;
+ if (retry_count > 1024) {
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY | DBG_FUNC_NONE, (uint32_t)rwlock, 0XEEEEEEEE, lcntval, ucntval, 0);
+#endif
+ error = EAGAIN;
+ goto out;
} else {
- return(-1);
+ sched_yield();
+ goto loop;
}
}
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
- self = pthread_self();
-
- lgenval = *lseqaddr;
- if ((is_rw_ebit_set(lgenval)) && (rwlock->rw_owner == self)) {
- return(1);
- }
- return(0);
-}
-/**************************************************************/
-#endif /* NOTYET */
-
-static int
-rwlock_unlock_action_onread(pthread_rwlock_t * orwlock, uint32_t updateval)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- int error = 0;
- uint32_t lgenval, newval;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
- pthread_t self;
- uint64_t threadid;
-
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
- lgenval = *lseqaddr;
+ /* Need to update L (remove R bit) and S word */
+ newval = (lcntval + PTHRW_INC) & PTH_RWLOCK_RESET_RBIT;
+ newsval = (rw_seq + PTHRW_INC);
-lp2:
- lgenval = *lseqaddr;
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 3, lgenval, 0);
-#endif
- /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
- if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
- if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
- goto lp2;
- goto succout;
- }
-
- /* state bits are same? */
- if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
- /* nothing to do */
- goto succout;
- }
-
- newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 4, newval, 0);
-#endif
- /* high bits are state on the lock; lowbits are one kernel need to set */
- switch (newval) {
- /* W States */
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
-
-
- /* L states */
- case ((PTHRW_LBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* Y states */
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
-
- /* YU states */
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action_k(orwlock, lgenval, updateval);
- //goto ktrans;
- }
- break;
-
- /* E states */
- case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* WE states */
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action2(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* WL states */
- case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_LBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_LBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- default:
- /* illegal states */
- self = pthread_self();
- threadid = self->thread_id;
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lcntval, newval, 0);
#endif
- LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
- };
-
- if (error != 0)
- goto lp2;
-succout:
+ PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, 0, 0, 0);
#endif
return(0);
-}
-
-
-static uint32_t
-modbits(uint32_t lgenval, uint32_t updateval)
-{
- uint32_t lval = lgenval & PTHRW_BIT_MASK;
- uint32_t uval = updateval & PTHRW_BIT_MASK;
- uint32_t rval, nlval;
-
- nlval = (lval | uval);
- if ((uval & PTHRW_EBIT) == 0)
- nlval &= ~PTHRW_EBIT;
- if ((nlval & (PTHRW_WBIT | PTHRW_YBIT)) == (PTHRW_WBIT | PTHRW_YBIT))
- nlval &= ~PTHRW_YBIT;
- /* no new writers and kernel resets w bit, reset W bit on the lock */
- if (((nlval & (PTHRW_WBIT | PTHRW_SHADOW_W)) == PTHRW_WBIT) && ((updateval & PTHRW_WBIT) == 0))
- nlval &= ~PTHRW_WBIT;
-
- rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
- return(rval);
-}
-
-static int
-rwlock_unlock_action1(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- int error = 0;
- uint32_t newval;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
-
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
- newval = modbits(lgenval, updateval);
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) != TRUE)
- error = EINVAL;
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
#endif
return(error);
}
-static int
-rwlock_unlock_action2(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
+int
+pthread_rwlock_tryrdlock(pthread_rwlock_t * orwlock)
{
npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint32_t newval;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval;
+ int error = 0, retry_count = 0;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
- newval = modbits(lgenval, updateval);
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
- /* roundtrip kernel */
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2 | DBG_FUNC_NONE, lgenval, newval, 0, 0, 0);
-#endif
- (void) __psynch_rw_unlock2(orwlock, lgenval, *useqaddr, *wcaddr, rwlock->rw_flags);
- return(0);
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ LOCK(rwlock->lock);
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(rwlock->lock);
}
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2 | DBG_FUNC_NONE, 0xffffffff, 0, 0, 0, 0);
-#endif
-
- return(EINVAL);
-}
-
-/* This is used when an exclusive write lock of any kind is being granted. For unlock thread, it needs to try to set the bit, if not move on */
-static int
-rwlock_unlock_action_k(pthread_rwlock_t * orwlock, uint32_t lgenval, uint32_t updateval)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint32_t newval;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
} else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
- newval = modbits(lgenval, updateval);
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, lgenval, updateval, newval, 0, 0);
-#endif
- /* try to set, if not not a prolem as the thread taking exclusive will take care of the discrepency */
-
- if (OSAtomicCompareAndSwap32(lgenval, newval, (volatile int32_t *)lseqaddr) == TRUE) {
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, 0x55555555, lgenval, newval, 0, 0);
-#endif
-
- } else {
+loop:
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK | DBG_FUNC_NONE, 0xAAAAAAAA, lgenval, newval, 0, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lcntval, ucntval, rw_seq, 0);
#endif
- }
-
- return(0);
-}
-
-static int
-rwlock_exclusive_lockreturn(pthread_rwlock_t * orwlock, uint32_t updateval)
-{
- npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
- uint32_t lgenval, newval;
- volatile uint32_t * lseqaddr, *useqaddr, *wcaddr;
- pthread_t self;
- uint64_t threadid;
-
- int error = 0;
-
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr);
- } else {
- lseqaddr = rwlock->rw_lseqaddr;
- useqaddr = rwlock->rw_useqaddr;
- wcaddr = rwlock->rw_wcaddr;
- }
-
-lp2:
- lgenval = *lseqaddr;
-
- /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
- if ((lgenval & PTHRW_COUNT_MASK) == (updateval & PTHRW_COUNT_MASK)) {
- if (OSAtomicCompareAndSwap32(lgenval, updateval, (volatile int32_t *)lseqaddr) != TRUE)
- goto lp2;
- goto out;
- }
-
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, lgenval, updateval, 1, 0, 0);
-#endif
- /* state bits are same? */
- if ((lgenval & PTHRW_BIT_MASK) == (updateval & PTHRW_BIT_MASK)) {
- /* nothing to do */
- goto out;
- }
-
+ /* if l bit is on or u and k bit is clear, acquire lock in userland */
+ if (can_rwl_readinuser(lcntval))
+ goto gotlock;
- newval = ((lgenval & PTHRW_UN_BIT_MASK) << PTHRW_COUNT_SHIFT) | (updateval & PTHRW_BIT_MASK);
+ error = EBUSY;
+ goto out;
+gotlock:
+ if (rw_diffgenseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
+ /* since ucntval may be newer, just redo */
+ retry_count++;
+ if (retry_count > 1024) {
+
#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, newval, 0, 2, 0, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY | DBG_FUNC_NONE, (uint32_t)rwlock, 0XEEEEEEEE, lcntval, ucntval, 0);
#endif
- /* high bits are state on the lock; lowbits are one kernel need to set */
- switch (newval) {
- /* W States */
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case ((PTHRW_WBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
error = EAGAIN;
- }
- break;
-
-
- /* All L states illegal here */
-
- /* Y states */
- case (PTHRW_YBIT << PTHRW_COUNT_SHIFT) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = EAGAIN;
- }
- break;
- case ((PTHRW_YBIT << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = EAGAIN;
- }
- break;
-
- /* YU states */
- case ((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = EAGAIN;
- }
- break;
-
- case (((PTHRW_YBIT | PTHRW_UBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = EAGAIN;
- }
- break;
-
- /* E states */
- case ((PTHRW_EBIT << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* WE states */
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_WBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
- case (((PTHRW_WBIT | PTHRW_EBIT) << PTHRW_COUNT_SHIFT) | (PTHRW_YBIT | PTHRW_EBIT)) : {
- error = rwlock_unlock_action1(orwlock, lgenval, updateval);
- }
- break;
-
- /* All WL states are illegal*/
-
- default:
- /* illegal states */
- self = pthread_self();
- threadid = self->thread_id;
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 6, lgenval, 0);
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, 7, updateval, 0);
-#endif
- LIBC_ABORT("rwlock_exclusive_lockreturn: incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval, lgenval, updateval, (uint32_t)threadid);
- };
-
- if (error == EINVAL)
- goto lp2;
-out:
-#if _KSYN_TRACE_
- (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE | DBG_FUNC_NONE, error, 0, 0xffffffff, 0, 0);
-#endif
- return(error);
-}
-
-/* returns are not bit shifted */
-static int
-rw_diffgenseq(uint32_t x, uint32_t y)
-{
- uint32_t lx = (x & PTHRW_COUNT_MASK);
- uint32_t ly = (y &PTHRW_COUNT_MASK);
-
- if (lx > ly) {
- return(lx-ly);
- } else {
- return((PTHRW_MAX_READERS - y) + lx + PTHRW_INC);
- }
-
-}
-
-#endif /* i386 || x86_64 ] */
-
-
-#endif /* !BUILDING_VARIANT ] */
-
-int
-pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
-{
-#if defined(__i386__) || defined(__x86_64__) || defined(__DARWIN_UNIX03)
- int ret;
-#endif /* __i386__ || __x86_64__ */
-
-
-#if defined(__i386__) || defined(__x86_64__)
- if ((usenew_impl != 0)) {
- return(_new_pthread_rwlock_destroy(rwlock));
- }
-#endif /* __i386__ || __x86_64__ */
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- return(EINVAL);
- }
-#if defined(__i386__) || defined(__x86_64__)
- else if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- ret = _new_pthread_rwlock_destroy(rwlock);
- return(ret);
- }
-#endif /* __i386__ || __x86_64__ */
- else {
-#if __DARWIN_UNIX03
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0)
- return(ret);
-
- if (rwlock->state != 0) {
- pthread_mutex_unlock(&rwlock->lock);
- return(EBUSY);
- }
- pthread_mutex_unlock(&rwlock->lock);
-#endif /* __DARWIN_UNIX03 */
-
- pthread_mutex_destroy(&rwlock->lock);
- pthread_cond_destroy(&rwlock->read_signal);
- pthread_cond_destroy(&rwlock->write_signal);
- rwlock->sig = _PTHREAD_NO_SIG;
- return(0);
- }
-}
-
-int
-pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
-{
- int ret;
-
-#if defined(__i386__) || defined(__x86_64__)
- if ((usenew_impl != 0)) {
- return(_new_pthread_rwlock_init(rwlock, attr));
- }
-#endif /* __i386__ || __x86_64__ */
-
-#if __DARWIN_UNIX03
- if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
- return(EINVAL);
- }
-#endif /* __DARWIN_UNIX03 */
-
-#if defined(__i386__) || defined(__x86_64__)
- if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
- ret = _new_pthread_rwlock_init(rwlock, attr);
- return(ret);
- }
-#endif /* __i386__ || __x86_64__ */
-
-#if __DARWIN_UNIX03
- /* if already inited check whether it is in use, then return EBUSY */
- if ((rwlock->sig == _PTHREAD_RWLOCK_SIG) && (rwlock->state !=0 )) {
- return(EBUSY);
- }
-#endif /* __DARWIN_UNIX03 */
-
- /* initialize the lock */
- if ((ret = pthread_mutex_init(&rwlock->lock, NULL)) != 0)
- return(ret);
- else {
- /* initialize the read condition signal */
- ret = pthread_cond_init(&rwlock->read_signal, NULL);
-
- if (ret != 0) {
- pthread_mutex_destroy(&rwlock->lock);
- return(ret);
+ goto out;
} else {
- /* initialize the write condition signal */
- ret = pthread_cond_init(&rwlock->write_signal, NULL);
-
- if (ret != 0) {
- pthread_cond_destroy(&rwlock->read_signal);
- pthread_mutex_destroy(&rwlock->lock);
- return(ret);
- } else {
- /* success */
- rwlock->state = 0;
- rwlock->owner = (pthread_t)0;
- rwlock->blocked_writers = 0;
- if (attr)
- rwlock->pshared = attr->pshared;
- else
- rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
-
- rwlock->sig = _PTHREAD_RWLOCK_SIG;
- return(0);
- }
- }
- }
-}
-
-int
-pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
-{
- int ret;
-#if __DARWIN_UNIX03
- pthread_t self = pthread_self();
-#endif
-
-#if defined(__i386__) || defined(__x86_64__)
- if ((usenew_impl != 0)) {
- return(_new_pthread_rwlock_rdlock(rwlock));
- }
-#endif /* __i386__ || __x86_64__ */
-
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- return(ret);
- }
- }
-
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EINVAL);
- return(EINVAL);
- }
-#if defined(__i386__) || defined(__x86_64__)
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- ret = _new_pthread_rwlock_rdlock(rwlock);
- return(ret);
- }
-#endif /* __i386__ || __x86_64__ */
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- return(ret);
- }
-
-#if __DARWIN_UNIX03
- if ((rwlock->state < 0) && (rwlock->owner == self)) {
- pthread_mutex_unlock(&rwlock->lock);
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EDEADLK);
- return(EDEADLK);
- }
-#endif /* __DARWIN_UNIX03 */
-
-#if __DARWIN_UNIX03
- while (rwlock->blocked_writers || ((rwlock->state < 0) && (rwlock->owner != self)))
-#else /* __DARWIN_UNIX03 */
- while (rwlock->blocked_writers || rwlock->state < 0)
-
-#endif /* __DARWIN_UNIX03 */
- {
- /* give writers priority over readers */
- PLOCKSTAT_RW_BLOCK(rwlock, READ_LOCK_PLOCKSTAT);
- ret = pthread_cond_wait(&rwlock->read_signal, &rwlock->lock);
-
- if (ret != 0) {
- /* can't do a whole lot if this fails */
- pthread_mutex_unlock(&rwlock->lock);
- PLOCKSTAT_RW_BLOCKED(rwlock, READ_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- return(ret);
+ sched_yield();
+ goto loop;
}
-
- PLOCKSTAT_RW_BLOCKED(rwlock, READ_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
}
+
+ /* Need to update L(remove Rbit ) and S word */
+ newval = (lcntval + PTHRW_INC) & PTH_RWLOCK_RESET_RBIT;
+ newsval = (rw_seq + PTHRW_INC);
- /* check lock count */
- if (rwlock->state == MAX_READ_LOCKS) {
- ret = EAGAIN;
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- }
- else {
- ++rwlock->state; /* indicate we are locked for reading */
- PLOCKSTAT_RW_ACQUIRE(rwlock, READ_LOCK_PLOCKSTAT);
- }
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
- /*
- * Something is really wrong if this call fails. Returning
- * error won't do because we've already obtained the read
- * lock. Decrementing 'state' is no good because we probably
- * don't have the monitor lock.
- */
- pthread_mutex_unlock(&rwlock->lock);
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lcntval, newval, 0);
+#endif
+
+ PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
+ return(0);
- return(ret);
+out:
+ PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, error, 0, 0);
+#endif
+ return(error);
}
int
-pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
+pthread_rwlock_trywrlock(pthread_rwlock_t * orwlock)
{
- int ret;
-
-#if defined(__i386__) || defined(__x86_64__)
- if ((usenew_impl != 0)) {
- return(_new_pthread_rwlock_tryrdlock(rwlock));
- }
-#endif /* __i386__ || __x86_64__ */
-
- /* check for static initialization */
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- return(ret);
- }
- }
-
+#if __DARWIN_UNIX03
+ pthread_t self = pthread_self();
+#endif /* __DARWIN_UNIX03 */
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lcntval, rw_seq, newval, newsval;
+ int error = 0, gotlock = 0;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, EINVAL);
- return(EINVAL);
+ LOCK(rwlock->lock);
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(rwlock->lock);
}
-#if defined(__i386__) || defined(__x86_64__)
+
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- ret = _new_pthread_rwlock_tryrdlock(rwlock);
- return(ret);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
+ } else {
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
-#endif /* __i386__ || __x86_64__ */
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- return(ret);
- }
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
+#endif
+loop:
+ lcntval = *lcntaddr;
+ rw_seq = *seqaddr;
+
+ /* can we acquire in userland? */
+ if ((lcntval & PTH_RWL_RBIT) != 0) {
+ newval = ((lcntval + PTHRW_INC) & PTHRW_COUNT_MASK) | PTH_RWL_IBIT | PTH_RWL_KBIT| PTH_RWL_EBIT;
+ newsval = rw_seq + PTHRW_INC;
+ gotlock = 1;
+ } else
+ gotlock = 0;
- /* give writers priority over readers */
- if (rwlock->blocked_writers || rwlock->state < 0) {
- ret = EBUSY;
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- }
- else if (rwlock->state == MAX_READ_LOCKS) {
- ret = EAGAIN; /* too many read locks acquired */
- PLOCKSTAT_RW_ERROR(rwlock, READ_LOCK_PLOCKSTAT, ret);
- }
- else {
- ++rwlock->state; /* indicate we are locked for reading */
- PLOCKSTAT_RW_ACQUIRE(rwlock, READ_LOCK_PLOCKSTAT);
- }
- /* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&rwlock->lock);
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+
+ if (gotlock != 0) {
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+ } else
+ newval64 = oldval64;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE) {
+ goto loop;
+ }
+ if (gotlock == 1) {
+#if __DARWIN_UNIX03
+ rwlock->rw_owner = self;
+#endif /* __DARWIN_UNIX03 */
+ PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, 0, 0);
+#endif
+ return(0);
+ } else {
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, EBUSY, 0, 0);
+#endif
- return(ret);
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EBUSY);
+ return(EBUSY);
+ }
}
int
-pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
+pthread_rwlock_wrlock(pthread_rwlock_t * orwlock)
{
- int ret;
#if __DARWIN_UNIX03
pthread_t self = pthread_self();
#endif /* __DARWIN_UNIX03 */
-
-#if defined(__i386__) || defined(__x86_64__)
- if ((usenew_impl != 0)) {
- return(_new_pthread_rwlock_trywrlock(rwlock));
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
+ int error = 0, gotlock = 0;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t myid = 0;
+
+ if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
+ LOCK(rwlock->lock);
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+ return(error);
+ }
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(rwlock->lock);
+ }
+
+ if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
+ } else {
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
-#endif /* __i386__ || __x86_64__ */
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
+#endif
+loop:
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lcntval, ucntval, rw_seq, 0);
+#endif
- /* check for static initialization */
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
- return(ret);
+#if __DARWIN_UNIX03
+ if (is_rwl_ebit_set(lcntval)) {
+ if(rwlock->rw_owner == self) {
+ error = EDEADLK;
+ goto out;
}
}
+#endif /* __DARWIN_UNIX03 */
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
- return(EINVAL);
- }
+
+ if ((lcntval & PTH_RWL_RBIT) != 0) {
+ /* lock is restart state, writer can acquire the lock */
+ newval = ((lcntval + PTHRW_INC) & PTHRW_COUNT_MASK) | PTH_RWL_IBIT | PTH_RWL_KBIT| PTH_RWL_EBIT;
-#if defined(__i386__) || defined(__x86_64__)
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- ret = _new_pthread_rwlock_trywrlock(rwlock);
- return(ret);
- }
-#endif /* __i386__ || __x86_64__ */
+ newsval = rw_seq + PTHRW_INC;
+ gotlock = 1;
+
+ } else {
+ if (is_rwl_lbit_set(lcntval))
+ newval = (lcntval + PTHRW_INC)| PTH_RWL_WBIT;
+ else
+ newval = (lcntval + PTHRW_INC) | PTH_RWL_KBIT| PTH_RWL_WBIT;
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
- return(ret);
+ newsval = rw_seq;
+ if (is_rws_setseq(rw_seq)) {
+ newsval &= PTHRW_SW_Reset_BIT_MASK;
+ newsval |= (newval & PTHRW_COUNT_MASK);
+ }
+ gotlock = 0;
}
+ /* update lock seq */
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lcntval, newval, 0);
+#endif
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto loop;
+
+ /* lock acquired in userland itself? */
+ if (gotlock != 0)
+ goto gotit;
+
+ /* unable to acquire in userland, transition to kernel */
+
+ PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
+retry:
+ updateval = __psynch_rw_wrlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if (error == EINTR) {
+ goto retry;
+ }
- if (rwlock->state != 0) {
- ret = EBUSY;
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
+ if (error != 0) {
+#if _KSYN_TRACE_
+ set_enable(2);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("wrlock from kernel with unknown error %x: tid %x\n", updateval, (uint32_t)myid);
}
- else {
- /* indicate we are locked for writing */
- rwlock->state = -1;
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x33333333, newval, updateval, 0);
+#endif
+ PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
+ if (error == 0) {
+ rwlock_action_onreturn(orwlock, updateval);
+gotit:
#if __DARWIN_UNIX03
- rwlock->owner = self;
+ rwlock->rw_owner = self;
#endif /* __DARWIN_UNIX03 */
- PLOCKSTAT_RW_ACQUIRE(rwlock, WRITE_LOCK_PLOCKSTAT);
- }
-
- /* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&rwlock->lock);
-
- return(ret);
+ PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(0);
+ }
+#if __DARWIN_UNIX03
+out:
+#endif /* __DARWIN_UNIX03 */
+ PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(error);
}
+
int
-pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
+pthread_rwlock_unlock(pthread_rwlock_t * orwlock)
{
- int ret;
- int writer = (rwlock < 0) ? 1:0;
-
-#if defined(__i386__) || defined(__x86_64__)
- if ((usenew_impl != 0)) {
- return(_new_pthread_rwlock_unlock(rwlock));
- }
-#endif /* __i386__ || __x86_64__ */
-
+ npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
+ uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval, ulval;
+ int error = 0, wrlock = 0, haswbit = 0, hasubit = 0, hasybit = 0;
+ uint64_t oldval64, newval64;
+ volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
+ uint64_t myid = 0;
+
if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- PLOCKSTAT_RW_ERROR(rwlock, writer, EINVAL);
- return(EINVAL);
+ LOCK(rwlock->lock);
+ if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
+ if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
+ return(error);
+ }
+ } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
+ UNLOCK(rwlock->lock);
+ PLOCKSTAT_RW_ERROR(orwlock, wrlock, EINVAL);
+ return(EINVAL);
+ }
+ UNLOCK(rwlock->lock);
}
-
-#if defined(__i386__) || defined(__x86_64__)
+
if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- ret = _new_pthread_rwlock_unlock(rwlock);
- return(ret);
+ RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
+ } else {
+ lcntaddr = rwlock->rw_lcntaddr;
+ ucntaddr = rwlock->rw_ucntaddr;
+ seqaddr = rwlock->rw_seqaddr;
}
-#endif /* __i386__ || __x86_64__ */
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
+#endif
+loop:
+ lcntval = *lcntaddr;
+ ucntval = *ucntaddr;
+ rw_seq = *seqaddr;
+
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x51515151, lcntval, ucntval, 0);
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x51515151, rw_seq, 0, 0);
+#endif
+ /* check for spurious unlocks */
+ if ((lcntval & PTH_RWL_RBIT) != 0) {
+ newval = lcntval ;
+ newsval = rw_seq;
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, writer, ret);
- return(ret);
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) == TRUE) {
+ /* spurious unlock, return */
+ error = EINVAL;
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x1a1b1c1d, lcntval, ucntval, 0);
+#endif
+ goto succout;
+ } else
+ goto loop;
}
- if (rwlock->state > 0) {
- if (--rwlock->state == 0 && rwlock->blocked_writers)
- ret = pthread_cond_signal(&rwlock->write_signal);
- } else if (rwlock->state < 0) {
- rwlock->state = 0;
+ if (is_rwl_ebit_set(lcntval)) {
+ wrlock = 1;
#if __DARWIN_UNIX03
- rwlock->owner = (pthread_t)0;
+ rwlock->rw_owner = (pthread_t)0;
#endif /* __DARWIN_UNIX03 */
+ }
+
+ /* update U */
+
+ ulval = (ucntval + PTHRW_INC);
- if (rwlock->blocked_writers)
- ret = pthread_cond_signal(&rwlock->write_signal);
- else
- ret = pthread_cond_broadcast(&rwlock->read_signal);
- } else
- ret = EINVAL;
+ if (OSAtomicCompareAndSwap32(ucntval, ulval, (volatile int32_t *)ucntaddr) != TRUE)
+ goto loop;
+
+lp11:
+ /* just validate the l and S values */
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
- if (ret == 0) {
- PLOCKSTAT_RW_RELEASE(rwlock, writer);
- } else {
- PLOCKSTAT_RW_ERROR(rwlock, writer, ret);
+ newval64 = oldval64;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE) {
+ lcntval = *lcntaddr;
+ rw_seq = *seqaddr;
+ goto lp11;
}
- /* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&rwlock->lock);
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xd1d2d3d4, lcntval, rw_seq, 0);
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xd1d2d3d4, ulval, 0, 0);
+#endif
- return(ret);
-}
+ /* last unlock, note U is already updated ? */
+ if((lcntval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
-int
-pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
-{
- int ret;
-#if __DARWIN_UNIX03
- pthread_t self = pthread_self();
-#endif /* __DARWIN_UNIX03 */
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xbbbbbbbb, lcntval, ucntval, 0);
+#endif
+ /* Set L with R and init bits and set S to L */
+ newval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWLOCK_INIT;
+ newsval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWS_INIT;
-#if defined(__i386__) || defined(__x86_64__)
- if ((usenew_impl != 0)) {
- return(_new_pthread_rwlock_wrlock(rwlock));
- }
-#endif /* __i386__ || __x86_64__ */
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
- /* check for static initialization */
- if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
- if ((ret = pthread_rwlock_init(rwlock, NULL)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
- return(ret);
- }
- }
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
- if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
- return(EINVAL);
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE) {
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xcccccccc, 0, 0, 0);
+#endif
+ lcntval = *lcntaddr;
+ rw_seq = *seqaddr;
+ goto lp11;
+ }
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xdddddddd, lcntval, ucntval, 0);
+#endif
+ goto succout;
}
-#if defined(__i386__) || defined(__x86_64__)
- if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
- ret = _new_pthread_rwlock_wrlock(rwlock);
- return(ret);
+ /* if it is not exclusive or no Writer/yield pending, skip */
+ if ((lcntval & (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_YBIT | PTH_RWL_KBIT)) == 0) {
+ goto succout;
}
-#endif /* __i386__ || __x86_64__ */
+ /* kernel transition needed? */
+ /* U+1 == S? */
+ if ((ulval + PTHRW_INC) != (rw_seq & PTHRW_COUNT_MASK)) {
+ if ((lcntval & PTH_RWL_UBIT) != 0) {
+ /* if U bit is set U + 2 == S ? */
+ if ((ulval + PTHRW_INC + PTHRW_INC) != (rw_seq & PTHRW_COUNT_MASK))
+ goto succout;
+ } else
+ goto succout;
+ }
+
+ haswbit = lcntval & PTH_RWL_WBIT;
+ hasubit = lcntval & PTH_RWL_UBIT;
+ hasybit = lcntval & PTH_RWL_YBIT;
+
+ /* reset all bits and set k */
+ newval = (lcntval & PTHRW_COUNT_MASK) | PTH_RWL_KBIT;
+ /* set I bit on S word */
+ newsval = rw_seq | PTH_RWS_IBIT;
+ if (haswbit != 0)
+ newsval |= PTH_RWS_WSVBIT;
+ if (hasubit != 0)
+ newsval |= PTH_RWS_USVBIT;
+ if (hasybit != 0)
+ newsval |= PTH_RWS_YSVBIT;
+
+ oldval64 = (((uint64_t)rw_seq) << 32);
+ oldval64 |= lcntval;
+
+ newval64 = (((uint64_t)newsval) << 32);
+ newval64 |= newval;
+
+ if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
+ goto lp11;
- /* grab the monitor lock */
- if ((ret = pthread_mutex_lock(&rwlock->lock)) != 0) {
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
- return(ret);
- }
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555511, 1, ulval, 0);
+#endif
+ updateval = __psynch_rw_unlock(orwlock, lcntval, ulval, newsval, rwlock->rw_flags);
+ if (updateval == (uint32_t)-1) {
+ error = errno;
+ } else
+ error = 0;
+
+ if(error != 0) {
-#if __DARWIN_UNIX03
- if ((rwlock->state < 0) && (rwlock->owner == self)) {
- pthread_mutex_unlock(&rwlock->lock);
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, EDEADLK);
- return(EDEADLK);
- }
-#endif /* __DARWIN_UNIX03 */
- while (rwlock->state != 0) {
- ++rwlock->blocked_writers;
-
- PLOCKSTAT_RW_BLOCK(rwlock, WRITE_LOCK_PLOCKSTAT);
- ret = pthread_cond_wait(&rwlock->write_signal, &rwlock->lock);
-
- if (ret != 0) {
- --rwlock->blocked_writers;
- pthread_mutex_unlock(&rwlock->lock);
- PLOCKSTAT_RW_BLOCKED(rwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
- PLOCKSTAT_RW_ERROR(rwlock, WRITE_LOCK_PLOCKSTAT, ret);
- return(ret);
+ /* not sure what is the scenario */
+ if(error != EINTR) {
+#if _KSYN_TRACE_
+ set_enable(4);
+#endif /* _KSYN_TRACE_ */
+ (void)pthread_threadid_np(pthread_self(), &myid);
+ LIBC_ABORT("rwunlock from kernel with unknown error %x: tid %x\n", error, (uint32_t)myid);
+ goto succout;
}
-
- PLOCKSTAT_RW_BLOCKED(rwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
-
- --rwlock->blocked_writers;
+ error = 0;
}
+
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555522, 3, lcntval, 0);
+#endif
- /* indicate we are locked for writing */
- rwlock->state = -1;
-#if __DARWIN_UNIX03
- rwlock->owner = self;
-#endif /* __DARWIN_UNIX03 */
- PLOCKSTAT_RW_ACQUIRE(rwlock, WRITE_LOCK_PLOCKSTAT);
-
- /* see the comment on this in pthread_rwlock_rdlock */
- pthread_mutex_unlock(&rwlock->lock);
-
- return(ret);
+succout:
+ PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
+#if _KSYN_TRACE_
+ if (__pthread_lock_debug != 0)
+ (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
+#endif
+ return(0);
}