#endif /* PLOCKSTAT */
extern int __gettimeofday(struct timeval *, struct timezone *);
-extern void _pthread_testcancel(pthread_t thread, int isconforming);
PTHREAD_NOEXPORT
int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
#ifndef BUILDING_VARIANT /* [ */
static void _pthread_cond_cleanup(void *arg);
-static void _pthread_cond_updateval(_pthread_cond * cond, int error,
- uint32_t updateval);
+static void _pthread_cond_updateval(_pthread_cond *cond, _pthread_mutex *mutex,
+ int error, uint32_t updateval);
int
}
if (updateval != (uint32_t)-1 && updateval != 0) {
- _pthread_cond_updateval(cond, 0, updateval);
+ _pthread_cond_updateval(cond, NULL, 0, updateval);
}
return 0;
* Suspend waiting for a condition variable.
* Note: we have to keep a list of condition variables which are using
* this same mutex variable so we can detect invalid 'destroy' sequences.
- * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
- * remaining conforming behavior..
+ * If conformance is not cancelable, we skip the _pthread_testcancel(),
+ * but keep the remaining conforming behavior..
*/
PTHREAD_NOEXPORT PTHREAD_NOINLINE
int
pthread_mutex_t *omutex,
const struct timespec *abstime,
int isRelative,
- int isconforming)
+ int conforming)
{
int res;
_pthread_cond *cond = (_pthread_cond *)ocond;
volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
uint64_t oldval64, newval64, mugen, cvlsgen;
uint32_t *npmtx = NULL;
+ int timeout_elapsed = 0;
res = _pthread_cond_check_init(cond, NULL);
if (res != 0) {
return res;
}
- if (isconforming) {
+ if (conforming) {
if (!_pthread_mutex_check_signature(mutex) &&
!_pthread_mutex_check_signature_init(mutex)) {
return EINVAL;
}
- if (isconforming > 0) {
- _pthread_testcancel(pthread_self(), 1);
+ if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
+ _pthread_testcancel(conforming);
}
}
/* send relative time to kernel */
if (abstime) {
+ if (abstime->tv_nsec < 0 || abstime->tv_nsec >= NSEC_PER_SEC) {
+ return EINVAL;
+ }
+
if (isRelative == 0) {
struct timespec now;
struct timeval tv;
__gettimeofday(&tv, NULL);
TIMEVAL_TO_TIMESPEC(&tv, &now);
- /* Compute relative time to sleep */
- then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
- then.tv_sec = abstime->tv_sec - now.tv_sec;
- if (then.tv_nsec < 0) {
- then.tv_nsec += NSEC_PER_SEC;
- then.tv_sec--;
- }
- if (then.tv_sec < 0 || (then.tv_sec == 0 && then.tv_nsec == 0)) {
- return ETIMEDOUT;
- }
- if (isconforming &&
- (abstime->tv_sec < 0 ||
- abstime->tv_nsec < 0 ||
- abstime->tv_nsec >= NSEC_PER_SEC)) {
- return EINVAL;
+ if ((abstime->tv_sec == now.tv_sec) ?
+ (abstime->tv_nsec <= now.tv_nsec) :
+ (abstime->tv_sec < now.tv_sec)) {
+ timeout_elapsed = 1;
+ } else {
+ /* Compute relative time to sleep */
+ then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
+ then.tv_sec = abstime->tv_sec - now.tv_sec;
+ if (then.tv_nsec < 0) {
+ then.tv_nsec += NSEC_PER_SEC;
+ then.tv_sec--;
+ }
}
} else {
then.tv_sec = abstime->tv_sec;
then.tv_nsec = abstime->tv_nsec;
if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
- return ETIMEDOUT;
+ timeout_elapsed = 1;
}
}
- if (isconforming && (then.tv_sec < 0 || then.tv_nsec < 0)) {
- return EINVAL;
- }
- if (then.tv_nsec >= NSEC_PER_SEC) {
- return EINVAL;
- }
}
if (cond->busy != NULL && cond->busy != mutex) {
return EINVAL;
}
+ /*
+ * If timeout is known to have elapsed, we still need to unlock and
+ * relock the mutex to allow other waiters to get in line and
+ * modify the condition state.
+ */
+ if (timeout_elapsed) {
+ res = pthread_mutex_unlock(omutex);
+ if (res != 0) {
+ return res;
+ }
+ res = pthread_mutex_lock(omutex);
+ if (res != 0) {
+ return res;
+ }
+ return ETIMEDOUT;
+ }
+
COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
do {
cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
// SUSv3 requires pthread_cond_wait to be a cancellation point
- if (isconforming) {
+ if (conforming) {
pthread_cleanup_push(_pthread_cond_cleanup, (void *)cond);
updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
- _pthread_testcancel(pthread_self(), isconforming);
+ _pthread_testcancel(conforming);
pthread_cleanup_pop(0);
} else {
updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
}
// add unlock ref to show one less waiter
- _pthread_cond_updateval(cond, err, 0);
+ _pthread_cond_updateval(cond, mutex, err, 0);
} else if (updateval != 0) {
// Successful wait
// The return due to prepost and might have bit states
// update S and return for prepo if needed
- _pthread_cond_updateval(cond, 0, updateval);
+ _pthread_cond_updateval(cond, mutex, 0, updateval);
}
pthread_mutex_lock(omutex);
_pthread_cond_cleanup(void *arg)
{
_pthread_cond *cond = (_pthread_cond *)arg;
+ pthread_t thread = pthread_self();
pthread_mutex_t *mutex;
// 4597450: begin
- pthread_t thread = pthread_self();
- int thcanceled = 0;
-
- _PTHREAD_LOCK(thread->lock);
- thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
- _PTHREAD_UNLOCK(thread->lock);
-
- if (thcanceled == 0) {
+ if (!thread->canceled) {
return;
}
-
// 4597450: end
+
mutex = (pthread_mutex_t *)cond->busy;
// add unlock ref to show one less waiter
- _pthread_cond_updateval(cond, thread->cancel_error, 0);
+ _pthread_cond_updateval(cond, (_pthread_mutex *)mutex,
+ thread->cancel_error, 0);
/*
** Can't do anything if this fails -- we're on the way out
}
}
-#define ECVCERORR 256
-#define ECVPERORR 512
-
static void
-_pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
+_pthread_cond_updateval(_pthread_cond *cond, _pthread_mutex *mutex,
+ int error, uint32_t updateval)
{
int needclearpre;
if (error != 0) {
updateval = PTHRW_INC;
- if ((error & ECVCERORR) != 0) {
+ if (error & ECVCLEARED) {
updateval |= PTH_RWS_CV_CBIT;
}
- if ((error & ECVPERORR) != 0) {
+ if (error & ECVPREPOST) {
updateval |= PTH_RWS_CV_PBIT;
}
}
oldval64 = (((uint64_t)scntval) << 32);
oldval64 |= lcntval;
- if (diffgen <= 0) {
+ PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_START, cond, oldval64,
+ updateval, 0);
+
+ if (diffgen <= 0 && !is_rws_pbit_set(updateval)) {
/* TBD: Assert, should not be the case */
/* validate it is spurious and return */
newval64 = oldval64;
}
} while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
+ PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_END, cond, newval64,
+ (uint64_t)diffgen << 32 | needclearpre, 0);
+
if (diffgen > 0) {
// if L == S, then reset associated mutex
if ((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
cond->busy = NULL;
}
+ }
- if (needclearpre != 0) {
- uint32_t flags = 0;
- if (cond->pshared == PTHREAD_PROCESS_SHARED) {
- flags |= _PTHREAD_MTX_OPT_PSHARED;
- }
- (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
+ if (needclearpre) {
+ uint32_t flags = 0;
+ if (cond->pshared == PTHREAD_PROCESS_SHARED) {
+ flags |= _PTHREAD_MTX_OPT_PSHARED;
}
+ (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
}
}