#define PLOCKSTAT_MUTEX_RELEASE(x, y)
#endif /* PLOCKSTAT */
-extern int __gettimeofday(struct timeval *, struct timezone *);
-extern void _pthread_testcancel(pthread_t thread, int isconforming);
-
-PTHREAD_NOEXPORT
-int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
- const struct timespec *abstime, int isRelative, int isconforming);
-
-PTHREAD_ALWAYS_INLINE
+typedef union {
+ uint64_t val;
+ struct {
+ uint32_t seq;
+ uint16_t waiters;
+ uint16_t signal;
+ };
+} pthread_ulock_cond_state_u;
+
+#define _PTHREAD_COND_WAITERS_INC \
+ (1ull << (offsetof(pthread_ulock_cond_state_u, waiters) * CHAR_BIT))
+
+OS_ALWAYS_INLINE
static inline void
-COND_GETSEQ_ADDR(_pthread_cond *cond,
+COND_GETSEQ_ADDR(pthread_cond_t *cond,
volatile uint64_t **c_lsseqaddr,
volatile uint32_t **c_lseqcnt,
volatile uint32_t **c_useqcnt,
*c_lsseqaddr = (volatile uint64_t *)*c_lseqcnt;
}
+OS_ALWAYS_INLINE
+static inline pthread_ulock_cond_state_u *
+_pthread_ulock_cond_state(pthread_cond_t *cond)
+{
+ return (pthread_ulock_cond_state_u *)&cond->c_seq[cond->misalign];
+}
+
#ifndef BUILDING_VARIANT /* [ */
-static void _pthread_cond_cleanup(void *arg);
-static void _pthread_cond_updateval(_pthread_cond * cond, int error,
- uint32_t updateval);
+static void _pthread_psynch_cond_cleanup(void *arg);
+static void _pthread_cond_updateval(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ int error, uint32_t updateval);
+
+static int
+_pthread_ulock_cond_wait_complete(pthread_ulock_cond_state_u *state,
+ pthread_mutex_t *mutex, int rc);
+static void
+_pthread_ulock_cond_cleanup(void *arg);
int
{
int res = EINVAL;
if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
-#if __DARWIN_UNIX03
- if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED)
-#else /* __DARWIN_UNIX03 */
- if (pshared == PTHREAD_PROCESS_PRIVATE)
-#endif /* __DARWIN_UNIX03 */
- {
+ if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
attr->pshared = pshared;
res = 0;
}
pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex,
const struct timespec *abstime)
{
- return _pthread_cond_wait(cond, mutex, abstime, 1, 0);
+ return _pthread_cond_wait(cond, mutex, abstime, 1,
+ PTHREAD_CONFORM_UNIX03_NOCANCEL);
}
#endif /* !BUILDING_VARIANT ] */
-PTHREAD_ALWAYS_INLINE
+OS_ALWAYS_INLINE
static inline int
-_pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conforming)
+_pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr,
+ uint32_t sig)
{
volatile uint64_t *c_lsseqaddr;
volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
cond->c_seq[2] = 0;
cond->unused = 0;
+ // TODO: PTHREAD_STRICT candidate
cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0;
COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
*c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c
- if (conforming) {
- if (attr) {
- cond->pshared = attr->pshared;
- } else {
- cond->pshared = _PTHREAD_DEFAULT_PSHARED;
- }
+ if (attr) {
+ cond->pshared = attr->pshared;
} else {
cond->pshared = _PTHREAD_DEFAULT_PSHARED;
}
- long sig = _PTHREAD_COND_SIG;
-
// Ensure all contents are properly set before setting signature.
#if defined(__LP64__)
// For binary compatibility reasons we cannot require natural alignment of
// the 64bit 'sig' long value in the struct. rdar://problem/21610439
- uint32_t *sig32_ptr = (uint32_t*)&cond->sig;
- uint32_t *sig32_val = (uint32_t*)&sig;
- *(sig32_ptr + 1) = *(sig32_val + 1);
- os_atomic_store(sig32_ptr, *sig32_val, release);
-#else
- os_atomic_store2o(cond, sig, sig, release);
+ cond->sig._pad = 0;
#endif
+ os_atomic_store(&cond->sig.val, sig, release);
return 0;
}
#ifndef BUILDING_VARIANT /* [ */
-PTHREAD_NOINLINE
+OS_ALWAYS_INLINE
static int
-_pthread_cond_check_init_slow(_pthread_cond *cond, bool *inited)
+_pthread_cond_check_signature(pthread_cond_t *cond, uint32_t sig_current,
+ uint32_t *sig_inout)
{
- int res = EINVAL;
- if (cond->sig == _PTHREAD_COND_SIG_init) {
- _PTHREAD_LOCK(cond->lock);
- if (cond->sig == _PTHREAD_COND_SIG_init) {
- res = _pthread_cond_init(cond, NULL, 0);
- if (inited) {
- *inited = true;
- }
- } else if (cond->sig == _PTHREAD_COND_SIG) {
- res = 0;
+ int res = 0;
+ switch (sig_current) {
+ case _PTHREAD_COND_SIG_init:
+ __builtin_unreachable();
+ break;
+ case _PTHREAD_COND_SIG_pristine:
+ if (*sig_inout != _PTHREAD_COND_SIG_pristine) {
+ os_atomic_store(&cond->sig.val, *sig_inout, relaxed);
}
- _PTHREAD_UNLOCK(cond->lock);
- } else if (cond->sig == _PTHREAD_COND_SIG) {
- res = 0;
+ break;
+ case _PTHREAD_COND_SIG_psynch:
+ case _PTHREAD_COND_SIG_ulock:
+ if (*sig_inout == _PTHREAD_COND_SIG_pristine) {
+ *sig_inout = sig_current;
+ } else if (*sig_inout != sig_current) {
+ PTHREAD_INTERNAL_CRASH(0, "Mixed ulock and psych condvar use");
+ }
+ break;
+ default:
+ // TODO: PTHREAD_STRICT candidate
+ res = EINVAL;
+ break;
}
+
return res;
}
-PTHREAD_ALWAYS_INLINE
-static inline int
-_pthread_cond_check_init(_pthread_cond *cond, bool *inited)
+OS_NOINLINE
+static int
+_pthread_cond_check_init_slow(pthread_cond_t *cond, uint32_t *sig_inout)
{
- int res = 0;
- if (cond->sig != _PTHREAD_COND_SIG) {
- return _pthread_cond_check_init_slow(cond, inited);
+ int res;
+ _pthread_lock_lock(&cond->lock);
+
+ uint32_t sig_current = os_atomic_load(&cond->sig.val, relaxed);
+ if (sig_current == _PTHREAD_COND_SIG_init) {
+ res = _pthread_cond_init(cond, NULL, *sig_inout);
+ } else {
+ res = _pthread_cond_check_signature(cond, sig_current, sig_inout);
}
+
+ _pthread_lock_unlock(&cond->lock);
return res;
}
+/*
+ * These routines maintain the signature of the condition variable, which
+ * encodes a small state machine:
+ * - a statically initialized condvar begins with SIG_init
+ * - explicit initialization via _cond_init() and implicit initialization
+ * transition to SIG_pristine, as there have been no waiters so we don't know
+ * what kind of mutex we'll be used with
+ * - the first _cond_wait() transitions to one of SIG_psynch or SIG_ulock
+ * according to the mutex being waited on
+ *
+ * On entry, *sig_inout is the furthest state we can transition to given the
+ * calling context. On exit, it is the actual state we observed, after any
+ * possible advancement.
+ */
+OS_ALWAYS_INLINE
+static inline int
+_pthread_cond_check_init(pthread_cond_t *cond, uint32_t *sig_inout)
+{
+ uint32_t sig_current = os_atomic_load(&cond->sig.val, relaxed);
+ if (sig_current == _PTHREAD_COND_SIG_init) {
+ return _pthread_cond_check_init_slow(cond, sig_inout);
+ } else {
+ return _pthread_cond_check_signature(cond, sig_current, sig_inout);
+ }
+}
+
PTHREAD_NOEXPORT_VARIANT
int
-pthread_cond_destroy(pthread_cond_t *ocond)
+pthread_cond_destroy(pthread_cond_t *cond)
{
- _pthread_cond *cond = (_pthread_cond *)ocond;
int res = EINVAL;
- if (cond->sig == _PTHREAD_COND_SIG) {
- _PTHREAD_LOCK(cond->lock);
+ uint32_t sig = os_atomic_load(&cond->sig.val, relaxed);
+ switch (sig) {
+ case _PTHREAD_COND_SIG_psynch:
+ _pthread_lock_lock(&cond->lock);
uint64_t oldval64, newval64;
uint32_t lcntval, ucntval, scntval;
flags |= _PTHREAD_MTX_OPT_PSHARED;
}
- cond->sig = _PTHREAD_NO_SIG;
+ os_atomic_store(&cond->sig.val, _PTHREAD_NO_SIG, relaxed);
res = 0;
- _PTHREAD_UNLOCK(cond->lock);
+ _pthread_lock_unlock(&cond->lock);
if (needclearpre) {
(void)__psynch_cvclrprepost(cond, lcntval, ucntval, scntval, 0, lcntval, flags);
}
- } else if (cond->sig == _PTHREAD_COND_SIG_init) {
+ break;
+ case _PTHREAD_COND_SIG_init:
// Compatibility for misbehaving applications that attempt to
// destroy a statically initialized condition variable.
- cond->sig = _PTHREAD_NO_SIG;
+ //
+ // fall through
+ case _PTHREAD_COND_SIG_pristine:
+ case _PTHREAD_COND_SIG_ulock:
+ os_atomic_store(&cond->sig.val, _PTHREAD_NO_SIG, relaxed);
res = 0;
+ break;
+ default:
+ // TODO: PTHREAD_STRICT candidate
+ break;
}
return res;
}
-PTHREAD_ALWAYS_INLINE
+OS_ALWAYS_INLINE
static inline int
-_pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
+_pthread_psynch_cond_signal(pthread_cond_t *cond, bool broadcast,
+ mach_port_t thread)
{
- int res;
- _pthread_cond *cond = (_pthread_cond *)ocond;
-
uint32_t updateval;
uint32_t diffgen;
uint32_t ulval;
int retry_count = 0, uretry_count = 0;
int ucountreset = 0;
- bool inited = false;
- res = _pthread_cond_check_init(cond, &inited);
- if (res != 0 || inited == true) {
- return res;
- }
-
COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
bool retry;
if (broadcast) {
// pass old U val so kernel will know the diffgen
uint64_t cvudgen = ((uint64_t)ucntval << 32) | diffgen;
- updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, NULL, 0, 0);
+ updateval = __psynch_cvbroad(cond, cvlsgen, cvudgen, flags, NULL, 0, 0);
} else {
- updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, thread, NULL, 0, 0, flags);
+ updateval = __psynch_cvsignal(cond, cvlsgen, ucntval, thread, NULL, 0, 0, flags);
}
if (updateval != (uint32_t)-1 && updateval != 0) {
- _pthread_cond_updateval(cond, 0, updateval);
+ _pthread_cond_updateval(cond, NULL, 0, updateval);
+ }
+
+ return 0;
+}
+
+OS_ALWAYS_INLINE
+static inline int
+_pthread_ulock_cond_signal(pthread_cond_t *cond, bool broadcast,
+ mach_port_t thread)
+{
+ pthread_ulock_cond_state_u *state = _pthread_ulock_cond_state(cond);
+
+ pthread_ulock_cond_state_u oldstate, newstate;
+ // release to pair with acquire after wait
+ os_atomic_rmw_loop(&state->val, oldstate.val, newstate.val, release, {
+ if (!oldstate.waiters || oldstate.waiters == oldstate.signal) {
+ os_atomic_rmw_loop_give_up(return 0);
+ }
+
+ newstate = (pthread_ulock_cond_state_u){
+ .seq = oldstate.seq + 1,
+ .waiters = oldstate.waiters,
+ .signal = broadcast ? oldstate.waiters :
+ MIN(oldstate.signal + 1, oldstate.waiters),
+ };
+ });
+
+ PTHREAD_TRACE(ulcond_signal, cond, oldstate.val, newstate.val, broadcast);
+
+ // Priority hole: if we're pre-empted here, nobody else can signal the
+ // waiter we took responsibility for signaling by incrementing the signal
+ // count.
+
+ if (oldstate.signal < oldstate.waiters) {
+ uint32_t wake_op = UL_COMPARE_AND_WAIT | ULF_NO_ERRNO;
+ if (broadcast) {
+ wake_op |= ULF_WAKE_ALL;
+ } else if (thread) {
+ wake_op |= ULF_WAKE_THREAD;
+ }
+
+ for (;;) {
+ int rc = __ulock_wake(wake_op, &state->seq, thread);
+ if (rc < 0) {
+ switch (-rc) {
+ case EINTR:
+ continue;
+ case ENOENT:
+ break;
+ case EALREADY:
+ if (!thread) {
+ PTHREAD_INTERNAL_CRASH(0, "EALREADY from ulock_wake");
+ }
+ // Compatibility with psynch: promote to broadcast
+ return pthread_cond_broadcast(cond);
+ default:
+ PTHREAD_INTERNAL_CRASH(-rc, "ulock_wake failure");
+ }
+ }
+ break;
+ }
}
return 0;
}
+OS_ALWAYS_INLINE
+static inline int
+_pthread_cond_signal(pthread_cond_t *cond, bool broadcast, mach_port_t thread)
+{
+ uint32_t sig = _PTHREAD_COND_SIG_pristine;
+ int res = _pthread_cond_check_init(cond, &sig);
+ if (res != 0 || sig == _PTHREAD_COND_SIG_pristine) {
+ return res;
+ }
+
+ switch (sig) {
+ case _PTHREAD_COND_SIG_psynch:
+ return _pthread_psynch_cond_signal(cond, broadcast, thread);
+ case _PTHREAD_COND_SIG_ulock:
+ return _pthread_ulock_cond_signal(cond, broadcast, thread);
+ default:
+ PTHREAD_INTERNAL_CRASH(sig, "impossible cond signature");
+ }
+}
+
/*
* Signal a condition variable, waking up all threads waiting for it.
*/
PTHREAD_NOEXPORT_VARIANT
int
-pthread_cond_broadcast(pthread_cond_t *ocond)
+pthread_cond_broadcast(pthread_cond_t *cond)
{
- return _pthread_cond_signal(ocond, true, MACH_PORT_NULL);
+ return _pthread_cond_signal(cond, true, MACH_PORT_NULL);
}
/*
*/
PTHREAD_NOEXPORT_VARIANT
int
-pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
+pthread_cond_signal_thread_np(pthread_cond_t *cond, pthread_t thread)
{
mach_port_t mp = MACH_PORT_NULL;
if (thread) {
mp = pthread_mach_thread_np((_Nonnull pthread_t)thread);
}
- return _pthread_cond_signal(ocond, false, mp);
+ return _pthread_cond_signal(cond, false, mp);
}
/*
*/
PTHREAD_NOEXPORT_VARIANT
int
-pthread_cond_signal(pthread_cond_t *ocond)
+pthread_cond_signal(pthread_cond_t *cond)
{
- return _pthread_cond_signal(ocond, false, MACH_PORT_NULL);
+ return _pthread_cond_signal(cond, false, MACH_PORT_NULL);
}
-/*
- * Manage a list of condition variables associated with a mutex
- */
-
-/*
- * Suspend waiting for a condition variable.
- * Note: we have to keep a list of condition variables which are using
- * this same mutex variable so we can detect invalid 'destroy' sequences.
- * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
- * remaining conforming behavior..
- */
-PTHREAD_NOEXPORT PTHREAD_NOINLINE
-int
-_pthread_cond_wait(pthread_cond_t *ocond,
- pthread_mutex_t *omutex,
- const struct timespec *abstime,
- int isRelative,
- int isconforming)
+static int
+_pthread_psynch_cond_wait(pthread_cond_t *cond,
+ pthread_mutex_t *mutex,
+ const struct timespec *then,
+ pthread_conformance_t conforming)
{
- int res;
- _pthread_cond *cond = (_pthread_cond *)ocond;
- _pthread_mutex *mutex = (_pthread_mutex *)omutex;
- struct timespec then = { 0, 0 };
uint32_t mtxgen, mtxugen, flags=0, updateval;
uint32_t lcntval, ucntval, scntval;
uint32_t nlval, ulval, savebits;
uint64_t oldval64, newval64, mugen, cvlsgen;
uint32_t *npmtx = NULL;
- res = _pthread_cond_check_init(cond, NULL);
- if (res != 0) {
- return res;
- }
-
- if (isconforming) {
- if (!_pthread_mutex_check_signature(mutex) &&
- !_pthread_mutex_check_signature_init(mutex)) {
- return EINVAL;
- }
- if (isconforming > 0) {
- _pthread_testcancel(pthread_self(), 1);
- }
- }
-
- /* send relative time to kernel */
- if (abstime) {
- if (isRelative == 0) {
- struct timespec now;
- struct timeval tv;
- __gettimeofday(&tv, NULL);
- TIMEVAL_TO_TIMESPEC(&tv, &now);
-
- /* Compute relative time to sleep */
- then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
- then.tv_sec = abstime->tv_sec - now.tv_sec;
- if (then.tv_nsec < 0) {
- then.tv_nsec += NSEC_PER_SEC;
- then.tv_sec--;
- }
- if (then.tv_sec < 0 || (then.tv_sec == 0 && then.tv_nsec == 0)) {
- return ETIMEDOUT;
- }
- if (isconforming &&
- (abstime->tv_sec < 0 ||
- abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= NSEC_PER_SEC)) {
- return EINVAL;
- }
- } else {
- then.tv_sec = abstime->tv_sec;
- then.tv_nsec = abstime->tv_nsec;
- if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
- return ETIMEDOUT;
- }
- }
- if (isconforming && (then.tv_sec < 0 || then.tv_nsec < 0)) {
- return EINVAL;
- }
- if (then.tv_nsec >= NSEC_PER_SEC) {
- return EINVAL;
- }
- }
-
- if (cond->busy != NULL && cond->busy != mutex) {
- return EINVAL;
- }
-
COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
do {
cond->busy = mutex;
- res = _pthread_mutex_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
+ int res = _pthread_mutex_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
/* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
if (res != 0) {
cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
// SUSv3 requires pthread_cond_wait to be a cancellation point
- if (isconforming) {
- pthread_cleanup_push(_pthread_cond_cleanup, (void *)cond);
- updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
- _pthread_testcancel(pthread_self(), isconforming);
+ if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
+ pthread_cleanup_push(_pthread_psynch_cond_cleanup, (void *)cond);
+ updateval = __psynch_cvwait(cond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)(then->tv_sec), (int32_t)(then->tv_nsec));
+ pthread_testcancel();
pthread_cleanup_pop(0);
} else {
- updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
+ updateval = __psynch_cvwait(cond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)(then->tv_sec), (int32_t)(then->tv_nsec));
}
if (updateval == (uint32_t)-1) {
int err = errno;
switch (err & 0xff) {
- case ETIMEDOUT:
- res = ETIMEDOUT;
- break;
- case EINTR:
- // spurious wakeup (unless canceled)
- res = 0;
- break;
- default:
- res = EINVAL;
- break;
+ case ETIMEDOUT:
+ res = ETIMEDOUT;
+ break;
+ case EINTR:
+ // spurious wakeup (unless canceled)
+ res = 0;
+ break;
+ default:
+ res = EINVAL;
+ break;
}
// add unlock ref to show one less waiter
- _pthread_cond_updateval(cond, err, 0);
+ _pthread_cond_updateval(cond, mutex, err, 0);
} else if (updateval != 0) {
// Successful wait
// The return due to prepost and might have bit states
// update S and return for prepo if needed
- _pthread_cond_updateval(cond, 0, updateval);
+ _pthread_cond_updateval(cond, mutex, 0, updateval);
}
- pthread_mutex_lock(omutex);
+ pthread_mutex_lock(mutex);
return res;
}
+struct pthread_ulock_cond_cancel_ctx_s {
+ pthread_cond_t *cond;
+ pthread_mutex_t *mutex;
+};
+
+static int
+_pthread_ulock_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *then, pthread_conformance_t conforming)
+{
+ bool cancelable = (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE);
+
+ uint64_t timeout_ns = 0;
+ if (then->tv_sec || then->tv_nsec) {
+ // psynch compatibility: cast and bitwise-truncate tv_nsec
+ uint64_t fraction_ns = ((uint32_t)then->tv_nsec) & 0x3fffffff;
+ if (os_mul_and_add_overflow(then->tv_sec, NSEC_PER_SEC, fraction_ns,
+ &timeout_ns)) {
+ // saturate (can't wait longer than 584 years...)
+ timeout_ns = UINT64_MAX;
+ }
+ }
+
+ pthread_ulock_cond_state_u *state = _pthread_ulock_cond_state(cond);
+
+ pthread_ulock_cond_state_u origstate = {
+ .val = os_atomic_add(&state->val, _PTHREAD_COND_WAITERS_INC, relaxed)
+ };
+
+ int rc = _pthread_mutex_ulock_unlock(mutex);
+ if (rc) {
+ return _pthread_ulock_cond_wait_complete(state, NULL, rc);
+ }
+
+ PTHREAD_TRACE(ulcond_wait, cond, origstate.val, timeout_ns, 0);
+
+ do {
+ const uint32_t wait_op = UL_COMPARE_AND_WAIT | ULF_NO_ERRNO;
+ if (cancelable) {
+ struct pthread_ulock_cond_cancel_ctx_s ctx = {
+ .cond = cond,
+ .mutex = mutex,
+ };
+ pthread_cleanup_push(_pthread_ulock_cond_cleanup, &ctx);
+ rc = __ulock_wait2(wait_op | ULF_WAIT_CANCEL_POINT, &state->seq,
+ origstate.seq, timeout_ns, 0);
+ pthread_testcancel();
+ pthread_cleanup_pop(0);
+ } else {
+ rc = __ulock_wait2(wait_op, &state->seq, origstate.seq, timeout_ns, 0);
+ }
+ if (rc < 0) {
+ switch (-rc) {
+ case EFAULT:
+ continue;
+ case EINTR:
+ // "These functions shall not return an error code of [EINTR]."
+ // => promote to spurious wake-up
+ rc = 0;
+ goto out;
+ case ETIMEDOUT:
+ rc = ETIMEDOUT;
+ goto out;
+ default:
+ PTHREAD_INTERNAL_CRASH(-rc, "ulock_wait failure");
+ }
+ } else {
+ // XXX for now don't care about other waiters
+ rc = 0;
+ }
+ } while (os_atomic_load(&state->seq, relaxed) == origstate.seq);
+
+out:
+ return _pthread_ulock_cond_wait_complete(state, mutex, rc);
+}
+
+static int
+_pthread_ulock_cond_wait_complete(pthread_ulock_cond_state_u *state,
+ pthread_mutex_t *mutex, int rc)
+{
+ if (mutex) {
+ // XXX Check this return value? Historically we haven't, but if rc == 0
+ // we could promote the return value to this one.
+ _pthread_mutex_ulock_lock(mutex, false);
+ }
+
+ pthread_ulock_cond_state_u oldstate, newstate;
+ // acquire to pair with release upon signal
+ os_atomic_rmw_loop(&state->val, oldstate.val, newstate.val, acquire, {
+ newstate = (pthread_ulock_cond_state_u){
+ .seq = oldstate.seq,
+ .waiters = oldstate.waiters - 1,
+ .signal = oldstate.signal ? oldstate.signal - 1 : 0,
+ };
+ });
+
+ return rc;
+}
+
+/*
+ * Suspend waiting for a condition variable.
+ * If conformance is not cancelable, we skip the pthread_testcancel(),
+ * but keep the remaining conforming behavior.
+ */
+PTHREAD_NOEXPORT OS_NOINLINE
+int
+_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime, int isRelative,
+ pthread_conformance_t conforming)
+{
+ int res;
+ struct timespec then = { 0, 0 };
+ bool timeout_elapsed = false;
+
+ if (!_pthread_mutex_check_signature(mutex) &&
+ !_pthread_mutex_check_signature_init(mutex)) {
+ return EINVAL;
+ }
+
+ bool ulock = _pthread_mutex_uses_ulock(mutex);
+ uint32_t sig = ulock ? _PTHREAD_COND_SIG_ulock : _PTHREAD_COND_SIG_psynch;
+ res = _pthread_cond_check_init(cond, &sig);
+ if (res != 0) {
+ return res;
+ }
+
+ if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
+ pthread_testcancel();
+ }
+
+ /* send relative time to kernel */
+ if (abstime) {
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= NSEC_PER_SEC) {
+ // TODO: PTHREAD_STRICT candidate
+ return EINVAL;
+ }
+
+ if (isRelative == 0) {
+ struct timespec now;
+ struct timeval tv;
+ __gettimeofday(&tv, NULL);
+ TIMEVAL_TO_TIMESPEC(&tv, &now);
+
+ if ((abstime->tv_sec == now.tv_sec) ?
+ (abstime->tv_nsec <= now.tv_nsec) :
+ (abstime->tv_sec < now.tv_sec)) {
+ timeout_elapsed = true;
+ } else {
+ /* Compute relative time to sleep */
+ then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
+ then.tv_sec = abstime->tv_sec - now.tv_sec;
+ if (then.tv_nsec < 0) {
+ then.tv_nsec += NSEC_PER_SEC;
+ then.tv_sec--;
+ }
+ }
+ } else {
+ then.tv_sec = abstime->tv_sec;
+ then.tv_nsec = abstime->tv_nsec;
+ if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
+ timeout_elapsed = true;
+ }
+ }
+ }
+
+ if (!ulock && cond->busy != NULL && cond->busy != mutex) {
+ // TODO: PTHREAD_STRICT candidate
+ return EINVAL;
+ }
+
+ /*
+ * If timeout is known to have elapsed, we still need to unlock and
+ * relock the mutex to allow other waiters to get in line and
+ * modify the condition state.
+ */
+ if (timeout_elapsed) {
+ res = pthread_mutex_unlock(mutex);
+ if (res != 0) {
+ return res;
+ }
+ res = pthread_mutex_lock(mutex);
+ if (res != 0) {
+ return res;
+ }
+
+ return ETIMEDOUT;
+ }
+
+ if (ulock) {
+ return _pthread_ulock_cond_wait(cond, mutex, &then, conforming);
+ } else {
+ return _pthread_psynch_cond_wait(cond, mutex, &then, conforming);
+ }
+}
+
static void
-_pthread_cond_cleanup(void *arg)
+_pthread_ulock_cond_cleanup(void *arg)
{
- _pthread_cond *cond = (_pthread_cond *)arg;
- pthread_mutex_t *mutex;
+ struct pthread_ulock_cond_cancel_ctx_s *ctx = arg;
+ pthread_ulock_cond_state_u *state = _pthread_ulock_cond_state(ctx->cond);
+
+ (void)_pthread_ulock_cond_wait_complete(state, ctx->mutex, 0);
+
+ // "A thread that has been unblocked because it has been canceled while
+ // blocked in a call to pthread_cond_timedwait() or pthread_cond_wait()
+ // shall not consume any condition signal that may be directed concurrently
+ // at the condition variable if there are other threads blocked on the
+ // condition variable."
+ //
+ // Since we have no way to know if we've eaten somebody else's signal, just
+ // signal again pessimistically.
+ pthread_cond_signal(ctx->cond);
+}
-// 4597450: begin
+static void
+_pthread_psynch_cond_cleanup(void *arg)
+{
+ pthread_cond_t *cond = (pthread_cond_t *)arg;
pthread_t thread = pthread_self();
- int thcanceled = 0;
-
- _PTHREAD_LOCK(thread->lock);
- thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
- _PTHREAD_UNLOCK(thread->lock);
+ pthread_mutex_t *mutex;
- if (thcanceled == 0) {
+// 4597450: begin
+ if (!thread->canceled) {
return;
}
-
// 4597450: end
- mutex = (pthread_mutex_t *)cond->busy;
+
+ mutex = cond->busy;
// add unlock ref to show one less waiter
- _pthread_cond_updateval(cond, thread->cancel_error, 0);
+ _pthread_cond_updateval(cond, mutex, thread->cancel_error, 0);
/*
** Can't do anything if this fails -- we're on the way out
}
}
-#define ECVCERORR 256
-#define ECVPERORR 512
-
static void
-_pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
+_pthread_cond_updateval(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ int error, uint32_t updateval)
{
int needclearpre;
if (error != 0) {
updateval = PTHRW_INC;
- if ((error & ECVCERORR) != 0) {
+ if (error & ECVCLEARED) {
updateval |= PTH_RWS_CV_CBIT;
}
- if ((error & ECVPERORR) != 0) {
+ if (error & ECVPREPOST) {
updateval |= PTH_RWS_CV_PBIT;
}
}
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
- if (diffgen <= 0) {
+ PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_START, cond, oldval64,
+ updateval, 0);
+
+ if (diffgen <= 0 && !is_rws_pbit_set(updateval)) {
/* TBD: Assert, should not be the case */
/* validate it is spurious and return */
newval64 = oldval64;
}
} while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
+ PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_END, cond, newval64,
+ (uint64_t)diffgen << 32 | needclearpre, 0);
+
if (diffgen > 0) {
// if L == S, then reset associated mutex
if ((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
cond->busy = NULL;
}
+ }
- if (needclearpre != 0) {
- uint32_t flags = 0;
- if (cond->pshared == PTHREAD_PROCESS_SHARED) {
- flags |= _PTHREAD_MTX_OPT_PSHARED;
- }
- (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
+ if (needclearpre) {
+ uint32_t flags = 0;
+ if (cond->pshared == PTHREAD_PROCESS_SHARED) {
+ flags |= _PTHREAD_MTX_OPT_PSHARED;
}
+ (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
}
}
PTHREAD_NOEXPORT_VARIANT
int
-pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr)
+pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
{
- int conforming;
-
-#if __DARWIN_UNIX03
- conforming = 1;
-#else /* __DARWIN_UNIX03 */
- conforming = 0;
-#endif /* __DARWIN_UNIX03 */
-
- _pthread_cond *cond = (_pthread_cond *)ocond;
- _PTHREAD_LOCK_INIT(cond->lock);
- return _pthread_cond_init(cond, attr, conforming);
+ _pthread_lock_init(&cond->lock);
+ return _pthread_cond_init(cond, attr, _PTHREAD_COND_SIG_pristine);
}