+ pthread_ulock_cond_state_u origstate = {
+ .val = os_atomic_add(&state->val, _PTHREAD_COND_WAITERS_INC, relaxed)
+ };
+
+ int rc = _pthread_mutex_ulock_unlock(mutex);
+ if (rc) {
+ return _pthread_ulock_cond_wait_complete(state, NULL, rc);
+ }
+
+ PTHREAD_TRACE(ulcond_wait, cond, origstate.val, timeout_ns, 0);
+
+ do {
+ const uint32_t wait_op = UL_COMPARE_AND_WAIT | ULF_NO_ERRNO;
+ if (cancelable) {
+ struct pthread_ulock_cond_cancel_ctx_s ctx = {
+ .cond = cond,
+ .mutex = mutex,
+ };
+ pthread_cleanup_push(_pthread_ulock_cond_cleanup, &ctx);
+ rc = __ulock_wait2(wait_op | ULF_WAIT_CANCEL_POINT, &state->seq,
+ origstate.seq, timeout_ns, 0);
+ pthread_testcancel();
+ pthread_cleanup_pop(0);
+ } else {
+ rc = __ulock_wait2(wait_op, &state->seq, origstate.seq, timeout_ns, 0);
+ }
+ if (rc < 0) {
+ switch (-rc) {
+ case EFAULT:
+ continue;
+ case EINTR:
+ // "These functions shall not return an error code of [EINTR]."
+ // => promote to spurious wake-up
+ rc = 0;
+ goto out;
+ case ETIMEDOUT:
+ rc = ETIMEDOUT;
+ goto out;
+ default:
+ PTHREAD_INTERNAL_CRASH(-rc, "ulock_wait failure");
+ }
+ } else {
+ // XXX for now don't care about other waiters
+ rc = 0;
+ }
+ } while (os_atomic_load(&state->seq, relaxed) == origstate.seq);
+
+out:
+ return _pthread_ulock_cond_wait_complete(state, mutex, rc);
+}
+
+static int
+_pthread_ulock_cond_wait_complete(pthread_ulock_cond_state_u *state,
+ pthread_mutex_t *mutex, int rc)
+{
+ if (mutex) {
+ // XXX Check this return value? Historically we haven't, but if rc == 0
+ // we could promote the return value to this one.
+ _pthread_mutex_ulock_lock(mutex, false);
+ }
+
+ pthread_ulock_cond_state_u oldstate, newstate;
+ // acquire to pair with release upon signal
+ os_atomic_rmw_loop(&state->val, oldstate.val, newstate.val, acquire, {
+ newstate = (pthread_ulock_cond_state_u){
+ .seq = oldstate.seq,
+ .waiters = oldstate.waiters - 1,
+ .signal = oldstate.signal ? oldstate.signal - 1 : 0,
+ };
+ });
+
+ return rc;
+}
+
+/*
+ * Suspend waiting for a condition variable.
+ * If conformance is not cancelable, we skip the pthread_testcancel(),
+ * but keep the remaining conforming behavior.
+ */
+PTHREAD_NOEXPORT OS_NOINLINE
+int
+_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
+ const struct timespec *abstime, int isRelative,
+ pthread_conformance_t conforming)
+{
+ int res;
+ struct timespec then = { 0, 0 };
+ bool timeout_elapsed = false;
+
+ if (!_pthread_mutex_check_signature(mutex) &&
+ !_pthread_mutex_check_signature_init(mutex)) {
+ return EINVAL;
+ }
+
+ bool ulock = _pthread_mutex_uses_ulock(mutex);
+ uint32_t sig = ulock ? _PTHREAD_COND_SIG_ulock : _PTHREAD_COND_SIG_psynch;
+ res = _pthread_cond_check_init(cond, &sig);
+ if (res != 0) {
+ return res;
+ }
+
+ if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
+ pthread_testcancel();