2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 * POSIX Pthread Library
53 #include <sys/time.h> /* For struct timespec and getclock(). */
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
68 } pthread_ulock_cond_state_u
;
70 #define _PTHREAD_COND_WAITERS_INC \
71 (1ull << (offsetof(pthread_ulock_cond_state_u, waiters) * CHAR_BIT))
75 COND_GETSEQ_ADDR(pthread_cond_t
*cond
,
76 volatile uint64_t **c_lsseqaddr
,
77 volatile uint32_t **c_lseqcnt
,
78 volatile uint32_t **c_useqcnt
,
79 volatile uint32_t **c_sseqcnt
)
82 *c_lseqcnt
= &cond
->c_seq
[1];
83 *c_sseqcnt
= &cond
->c_seq
[2];
84 *c_useqcnt
= &cond
->c_seq
[0];
86 *c_lseqcnt
= &cond
->c_seq
[0];
87 *c_sseqcnt
= &cond
->c_seq
[1];
88 *c_useqcnt
= &cond
->c_seq
[2];
90 *c_lsseqaddr
= (volatile uint64_t *)*c_lseqcnt
;
94 static inline pthread_ulock_cond_state_u
*
95 _pthread_ulock_cond_state(pthread_cond_t
*cond
)
97 return (pthread_ulock_cond_state_u
*)&cond
->c_seq
[cond
->misalign
];
100 #ifndef BUILDING_VARIANT /* [ */
102 static void _pthread_psynch_cond_cleanup(void *arg
);
103 static void _pthread_cond_updateval(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
,
104 int error
, uint32_t updateval
);
107 _pthread_ulock_cond_wait_complete(pthread_ulock_cond_state_u
*state
,
108 pthread_mutex_t
*mutex
, int rc
);
110 _pthread_ulock_cond_cleanup(void *arg
);
114 pthread_condattr_init(pthread_condattr_t
*attr
)
116 attr
->sig
= _PTHREAD_COND_ATTR_SIG
;
117 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
122 pthread_condattr_destroy(pthread_condattr_t
*attr
)
124 attr
->sig
= _PTHREAD_NO_SIG
;
129 pthread_condattr_getpshared(const pthread_condattr_t
*attr
, int *pshared
)
132 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
) {
133 *pshared
= (int)attr
->pshared
;
140 pthread_condattr_setpshared(pthread_condattr_t
*attr
, int pshared
)
143 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
) {
144 if (pshared
== PTHREAD_PROCESS_PRIVATE
|| pshared
== PTHREAD_PROCESS_SHARED
) {
145 attr
->pshared
= pshared
;
153 pthread_cond_timedwait_relative_np(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
,
154 const struct timespec
*abstime
)
156 return _pthread_cond_wait(cond
, mutex
, abstime
, 1,
157 PTHREAD_CONFORM_UNIX03_NOCANCEL
);
160 #endif /* !BUILDING_VARIANT ] */
164 _pthread_cond_init(pthread_cond_t
*cond
, const pthread_condattr_t
*attr
,
167 volatile uint64_t *c_lsseqaddr
;
168 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
176 // TODO: PTHREAD_STRICT candidate
177 cond
->misalign
= (((uintptr_t)&cond
->c_seq
[0]) & 0x7) != 0;
178 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
179 *c_sseqcnt
= PTH_RWS_CV_CBIT
; // set Sword to 0c
182 cond
->pshared
= attr
->pshared
;
184 cond
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
187 // Ensure all contents are properly set before setting signature.
188 #if defined(__LP64__)
189 // For binary compatibility reasons we cannot require natural alignment of
190 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
193 os_atomic_store(&cond
->sig
.val
, sig
, release
);
198 #ifndef BUILDING_VARIANT /* [ */
202 _pthread_cond_check_signature(pthread_cond_t
*cond
, uint32_t sig_current
,
206 switch (sig_current
) {
207 case _PTHREAD_COND_SIG_init
:
208 __builtin_unreachable();
210 case _PTHREAD_COND_SIG_pristine
:
211 if (*sig_inout
!= _PTHREAD_COND_SIG_pristine
) {
212 os_atomic_store(&cond
->sig
.val
, *sig_inout
, relaxed
);
215 case _PTHREAD_COND_SIG_psynch
:
216 case _PTHREAD_COND_SIG_ulock
:
217 if (*sig_inout
== _PTHREAD_COND_SIG_pristine
) {
218 *sig_inout
= sig_current
;
219 } else if (*sig_inout
!= sig_current
) {
220 PTHREAD_INTERNAL_CRASH(0, "Mixed ulock and psych condvar use");
224 // TODO: PTHREAD_STRICT candidate
234 _pthread_cond_check_init_slow(pthread_cond_t
*cond
, uint32_t *sig_inout
)
237 _pthread_lock_lock(&cond
->lock
);
239 uint32_t sig_current
= os_atomic_load(&cond
->sig
.val
, relaxed
);
240 if (sig_current
== _PTHREAD_COND_SIG_init
) {
241 res
= _pthread_cond_init(cond
, NULL
, *sig_inout
);
243 res
= _pthread_cond_check_signature(cond
, sig_current
, sig_inout
);
246 _pthread_lock_unlock(&cond
->lock
);
251 * These routines maintain the signature of the condition variable, which
252 * encodes a small state machine:
253 * - a statically initialized condvar begins with SIG_init
254 * - explicit initialization via _cond_init() and implicit initialization
255 * transition to SIG_pristine, as there have been no waiters so we don't know
256 * what kind of mutex we'll be used with
257 * - the first _cond_wait() transitions to one of SIG_psynch or SIG_ulock
258 * according to the mutex being waited on
260 * On entry, *sig_inout is the furthest state we can transition to given the
261 * calling context. On exit, it is the actual state we observed, after any
262 * possible advancement.
266 _pthread_cond_check_init(pthread_cond_t
*cond
, uint32_t *sig_inout
)
268 uint32_t sig_current
= os_atomic_load(&cond
->sig
.val
, relaxed
);
269 if (sig_current
== _PTHREAD_COND_SIG_init
) {
270 return _pthread_cond_check_init_slow(cond
, sig_inout
);
272 return _pthread_cond_check_signature(cond
, sig_current
, sig_inout
);
276 PTHREAD_NOEXPORT_VARIANT
278 pthread_cond_destroy(pthread_cond_t
*cond
)
281 uint32_t sig
= os_atomic_load(&cond
->sig
.val
, relaxed
);
283 case _PTHREAD_COND_SIG_psynch
:
284 _pthread_lock_lock(&cond
->lock
);
286 uint64_t oldval64
, newval64
;
287 uint32_t lcntval
, ucntval
, scntval
;
288 volatile uint64_t *c_lsseqaddr
;
289 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
291 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
294 lcntval
= *c_lseqcnt
;
295 ucntval
= *c_useqcnt
;
296 scntval
= *c_sseqcnt
;
298 // validate it is not busy
299 if ((lcntval
& PTHRW_COUNT_MASK
) != (scntval
& PTHRW_COUNT_MASK
)) {
303 oldval64
= (((uint64_t)scntval
) << 32);
306 } while (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
));
308 // <rdar://problem/13782056> Need to clear preposts.
310 bool needclearpre
= ((scntval
& PTH_RWS_CV_PBIT
) != 0);
311 if (needclearpre
&& cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
312 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
315 os_atomic_store(&cond
->sig
.val
, _PTHREAD_NO_SIG
, relaxed
);
318 _pthread_lock_unlock(&cond
->lock
);
321 (void)__psynch_cvclrprepost(cond
, lcntval
, ucntval
, scntval
, 0, lcntval
, flags
);
324 case _PTHREAD_COND_SIG_init
:
325 // Compatibility for misbehaving applications that attempt to
326 // destroy a statically initialized condition variable.
329 case _PTHREAD_COND_SIG_pristine
:
330 case _PTHREAD_COND_SIG_ulock
:
331 os_atomic_store(&cond
->sig
.val
, _PTHREAD_NO_SIG
, relaxed
);
335 // TODO: PTHREAD_STRICT candidate
343 _pthread_psynch_cond_signal(pthread_cond_t
*cond
, bool broadcast
,
350 uint64_t oldval64
, newval64
;
351 uint32_t lcntval
, ucntval
, scntval
;
352 volatile uint64_t *c_lsseqaddr
;
353 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
355 int retry_count
= 0, uretry_count
= 0;
358 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
364 lcntval
= *c_lseqcnt
;
365 ucntval
= *c_useqcnt
;
366 scntval
= *c_sseqcnt
;
370 if (((lcntval
& PTHRW_COUNT_MASK
) == (scntval
& PTHRW_COUNT_MASK
)) ||
371 (thread
== MACH_PORT_NULL
&& ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
& PTHRW_COUNT_MASK
)))) {
372 /* validate it is spurious and return */
373 oldval64
= (((uint64_t)scntval
) << 32);
377 if (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
)) {
389 /* validate to eliminate spurious values, race snapshots */
390 if (is_seqhigher((scntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
391 /* since ucntval may be newer, just redo */
393 if (retry_count
> 8192) {
400 } else if (is_seqhigher((ucntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
401 /* since ucntval may be newer, just redo */
403 if (uretry_count
> 8192) {
405 * U value if not used for a while can go out of sync
406 * set this to S value and try one more time.
408 if (ucountreset
!= 0) {
410 } else if (os_atomic_cmpxchg(c_useqcnt
, ucntval
, (scntval
& PTHRW_COUNT_MASK
), seq_cst
)) {
411 /* now the U is reset to S value */
421 if (is_seqlower(ucntval
& PTHRW_COUNT_MASK
, scntval
& PTHRW_COUNT_MASK
) != 0) {
422 /* If U < S, set U = S+diff due to intr's TO, etc */
423 ulval
= (scntval
& PTHRW_COUNT_MASK
);
425 /* If U >= S, set U = U+diff due to intr's TO, etc */
426 ulval
= (ucntval
& PTHRW_COUNT_MASK
);
430 diffgen
= diff_genseq(lcntval
, ulval
);
432 ulval
= (lcntval
& PTHRW_COUNT_MASK
);
437 } while (retry
|| !os_atomic_cmpxchg(c_useqcnt
, ucntval
, ulval
, seq_cst
));
440 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
441 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
444 uint64_t cvlsgen
= ((uint64_t)scntval
<< 32) | lcntval
;
447 // pass old U val so kernel will know the diffgen
448 uint64_t cvudgen
= ((uint64_t)ucntval
<< 32) | diffgen
;
449 updateval
= __psynch_cvbroad(cond
, cvlsgen
, cvudgen
, flags
, NULL
, 0, 0);
451 updateval
= __psynch_cvsignal(cond
, cvlsgen
, ucntval
, thread
, NULL
, 0, 0, flags
);
454 if (updateval
!= (uint32_t)-1 && updateval
!= 0) {
455 _pthread_cond_updateval(cond
, NULL
, 0, updateval
);
463 _pthread_ulock_cond_signal(pthread_cond_t
*cond
, bool broadcast
,
466 pthread_ulock_cond_state_u
*state
= _pthread_ulock_cond_state(cond
);
468 pthread_ulock_cond_state_u oldstate
, newstate
;
469 // release to pair with acquire after wait
470 os_atomic_rmw_loop(&state
->val
, oldstate
.val
, newstate
.val
, release
, {
471 if (!oldstate
.waiters
|| oldstate
.waiters
== oldstate
.signal
) {
472 os_atomic_rmw_loop_give_up(return 0);
475 newstate
= (pthread_ulock_cond_state_u
){
476 .seq
= oldstate
.seq
+ 1,
477 .waiters
= oldstate
.waiters
,
478 .signal
= broadcast
? oldstate
.waiters
:
479 MIN(oldstate
.signal
+ 1, oldstate
.waiters
),
483 PTHREAD_TRACE(ulcond_signal
, cond
, oldstate
.val
, newstate
.val
, broadcast
);
485 // Priority hole: if we're pre-empted here, nobody else can signal the
486 // waiter we took responsibility for signaling by incrementing the signal
489 if (oldstate
.signal
< oldstate
.waiters
) {
490 uint32_t wake_op
= UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
;
492 wake_op
|= ULF_WAKE_ALL
;
494 wake_op
|= ULF_WAKE_THREAD
;
498 int rc
= __ulock_wake(wake_op
, &state
->seq
, thread
);
507 PTHREAD_INTERNAL_CRASH(0, "EALREADY from ulock_wake");
509 // Compatibility with psynch: promote to broadcast
510 return pthread_cond_broadcast(cond
);
512 PTHREAD_INTERNAL_CRASH(-rc
, "ulock_wake failure");
524 _pthread_cond_signal(pthread_cond_t
*cond
, bool broadcast
, mach_port_t thread
)
526 uint32_t sig
= _PTHREAD_COND_SIG_pristine
;
527 int res
= _pthread_cond_check_init(cond
, &sig
);
528 if (res
!= 0 || sig
== _PTHREAD_COND_SIG_pristine
) {
533 case _PTHREAD_COND_SIG_psynch
:
534 return _pthread_psynch_cond_signal(cond
, broadcast
, thread
);
535 case _PTHREAD_COND_SIG_ulock
:
536 return _pthread_ulock_cond_signal(cond
, broadcast
, thread
);
538 PTHREAD_INTERNAL_CRASH(sig
, "impossible cond signature");
543 * Signal a condition variable, waking up all threads waiting for it.
545 PTHREAD_NOEXPORT_VARIANT
547 pthread_cond_broadcast(pthread_cond_t
*cond
)
549 return _pthread_cond_signal(cond
, true, MACH_PORT_NULL
);
553 * Signal a condition variable, waking a specified thread.
555 PTHREAD_NOEXPORT_VARIANT
557 pthread_cond_signal_thread_np(pthread_cond_t
*cond
, pthread_t thread
)
559 mach_port_t mp
= MACH_PORT_NULL
;
561 mp
= pthread_mach_thread_np((_Nonnull pthread_t
)thread
);
563 return _pthread_cond_signal(cond
, false, mp
);
567 * Signal a condition variable, waking only one thread.
569 PTHREAD_NOEXPORT_VARIANT
571 pthread_cond_signal(pthread_cond_t
*cond
)
573 return _pthread_cond_signal(cond
, false, MACH_PORT_NULL
);
577 _pthread_psynch_cond_wait(pthread_cond_t
*cond
,
578 pthread_mutex_t
*mutex
,
579 const struct timespec
*then
,
580 pthread_conformance_t conforming
)
582 uint32_t mtxgen
, mtxugen
, flags
=0, updateval
;
583 uint32_t lcntval
, ucntval
, scntval
;
584 uint32_t nlval
, ulval
, savebits
;
585 volatile uint64_t *c_lsseqaddr
;
586 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
587 uint64_t oldval64
, newval64
, mugen
, cvlsgen
;
588 uint32_t *npmtx
= NULL
;
590 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
593 lcntval
= *c_lseqcnt
;
594 ucntval
= *c_useqcnt
;
595 scntval
= *c_sseqcnt
;
597 oldval64
= (((uint64_t)scntval
) << 32);
600 /* remove c and p bits on S word */
601 savebits
= scntval
& PTH_RWS_CV_BITSALL
;
602 ulval
= (scntval
& PTHRW_COUNT_MASK
);
603 nlval
= lcntval
+ PTHRW_INC
;
604 newval64
= (((uint64_t)ulval
) << 32);
606 } while (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
));
610 int res
= _pthread_mutex_droplock(mutex
, &flags
, &npmtx
, &mtxgen
, &mtxugen
);
612 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
616 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) == 0) {
620 mugen
= ((uint64_t)mtxugen
<< 32) | mtxgen
;
622 flags
&= ~_PTHREAD_MTX_OPT_MUTEX
; /* reset the mutex bit as this is cvar */
624 cvlsgen
= ((uint64_t)(ulval
| savebits
)<< 32) | nlval
;
626 // SUSv3 requires pthread_cond_wait to be a cancellation point
627 if (conforming
== PTHREAD_CONFORM_UNIX03_CANCELABLE
) {
628 pthread_cleanup_push(_pthread_psynch_cond_cleanup
, (void *)cond
);
629 updateval
= __psynch_cvwait(cond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)(then
->tv_sec
), (int32_t)(then
->tv_nsec
));
630 pthread_testcancel();
631 pthread_cleanup_pop(0);
633 updateval
= __psynch_cvwait(cond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)(then
->tv_sec
), (int32_t)(then
->tv_nsec
));
636 if (updateval
== (uint32_t)-1) {
638 switch (err
& 0xff) {
643 // spurious wakeup (unless canceled)
651 // add unlock ref to show one less waiter
652 _pthread_cond_updateval(cond
, mutex
, err
, 0);
653 } else if (updateval
!= 0) {
655 // The return due to prepost and might have bit states
656 // update S and return for prepo if needed
657 _pthread_cond_updateval(cond
, mutex
, 0, updateval
);
660 pthread_mutex_lock(mutex
);
665 struct pthread_ulock_cond_cancel_ctx_s
{
666 pthread_cond_t
*cond
;
667 pthread_mutex_t
*mutex
;
671 _pthread_ulock_cond_wait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
,
672 const struct timespec
*then
, pthread_conformance_t conforming
)
674 bool cancelable
= (conforming
== PTHREAD_CONFORM_UNIX03_CANCELABLE
);
676 uint64_t timeout_ns
= 0;
677 if (then
->tv_sec
|| then
->tv_nsec
) {
678 // psynch compatibility: cast and bitwise-truncate tv_nsec
679 uint64_t fraction_ns
= ((uint32_t)then
->tv_nsec
) & 0x3fffffff;
680 if (os_mul_and_add_overflow(then
->tv_sec
, NSEC_PER_SEC
, fraction_ns
,
682 // saturate (can't wait longer than 584 years...)
683 timeout_ns
= UINT64_MAX
;
687 pthread_ulock_cond_state_u
*state
= _pthread_ulock_cond_state(cond
);
689 pthread_ulock_cond_state_u origstate
= {
690 .val
= os_atomic_add(&state
->val
, _PTHREAD_COND_WAITERS_INC
, relaxed
)
693 int rc
= _pthread_mutex_ulock_unlock(mutex
);
695 return _pthread_ulock_cond_wait_complete(state
, NULL
, rc
);
698 PTHREAD_TRACE(ulcond_wait
, cond
, origstate
.val
, timeout_ns
, 0);
701 const uint32_t wait_op
= UL_COMPARE_AND_WAIT
| ULF_NO_ERRNO
;
703 struct pthread_ulock_cond_cancel_ctx_s ctx
= {
707 pthread_cleanup_push(_pthread_ulock_cond_cleanup
, &ctx
);
708 rc
= __ulock_wait2(wait_op
| ULF_WAIT_CANCEL_POINT
, &state
->seq
,
709 origstate
.seq
, timeout_ns
, 0);
710 pthread_testcancel();
711 pthread_cleanup_pop(0);
713 rc
= __ulock_wait2(wait_op
, &state
->seq
, origstate
.seq
, timeout_ns
, 0);
720 // "These functions shall not return an error code of [EINTR]."
721 // => promote to spurious wake-up
728 PTHREAD_INTERNAL_CRASH(-rc
, "ulock_wait failure");
731 // XXX for now don't care about other waiters
734 } while (os_atomic_load(&state
->seq
, relaxed
) == origstate
.seq
);
737 return _pthread_ulock_cond_wait_complete(state
, mutex
, rc
);
741 _pthread_ulock_cond_wait_complete(pthread_ulock_cond_state_u
*state
,
742 pthread_mutex_t
*mutex
, int rc
)
745 // XXX Check this return value? Historically we haven't, but if rc == 0
746 // we could promote the return value to this one.
747 _pthread_mutex_ulock_lock(mutex
, false);
750 pthread_ulock_cond_state_u oldstate
, newstate
;
751 // acquire to pair with release upon signal
752 os_atomic_rmw_loop(&state
->val
, oldstate
.val
, newstate
.val
, acquire
, {
753 newstate
= (pthread_ulock_cond_state_u
){
755 .waiters
= oldstate
.waiters
- 1,
756 .signal
= oldstate
.signal
? oldstate
.signal
- 1 : 0,
764 * Suspend waiting for a condition variable.
765 * If conformance is not cancelable, we skip the pthread_testcancel(),
766 * but keep the remaining conforming behavior.
768 PTHREAD_NOEXPORT OS_NOINLINE
770 _pthread_cond_wait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
,
771 const struct timespec
*abstime
, int isRelative
,
772 pthread_conformance_t conforming
)
775 struct timespec then
= { 0, 0 };
776 bool timeout_elapsed
= false;
778 if (!_pthread_mutex_check_signature(mutex
) &&
779 !_pthread_mutex_check_signature_init(mutex
)) {
783 bool ulock
= _pthread_mutex_uses_ulock(mutex
);
784 uint32_t sig
= ulock
? _PTHREAD_COND_SIG_ulock
: _PTHREAD_COND_SIG_psynch
;
785 res
= _pthread_cond_check_init(cond
, &sig
);
790 if (conforming
== PTHREAD_CONFORM_UNIX03_CANCELABLE
) {
791 pthread_testcancel();
794 /* send relative time to kernel */
796 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= NSEC_PER_SEC
) {
797 // TODO: PTHREAD_STRICT candidate
801 if (isRelative
== 0) {
804 __gettimeofday(&tv
, NULL
);
805 TIMEVAL_TO_TIMESPEC(&tv
, &now
);
807 if ((abstime
->tv_sec
== now
.tv_sec
) ?
808 (abstime
->tv_nsec
<= now
.tv_nsec
) :
809 (abstime
->tv_sec
< now
.tv_sec
)) {
810 timeout_elapsed
= true;
812 /* Compute relative time to sleep */
813 then
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
814 then
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
815 if (then
.tv_nsec
< 0) {
816 then
.tv_nsec
+= NSEC_PER_SEC
;
821 then
.tv_sec
= abstime
->tv_sec
;
822 then
.tv_nsec
= abstime
->tv_nsec
;
823 if ((then
.tv_sec
== 0) && (then
.tv_nsec
== 0)) {
824 timeout_elapsed
= true;
829 if (!ulock
&& cond
->busy
!= NULL
&& cond
->busy
!= mutex
) {
830 // TODO: PTHREAD_STRICT candidate
835 * If timeout is known to have elapsed, we still need to unlock and
836 * relock the mutex to allow other waiters to get in line and
837 * modify the condition state.
839 if (timeout_elapsed
) {
840 res
= pthread_mutex_unlock(mutex
);
844 res
= pthread_mutex_lock(mutex
);
853 return _pthread_ulock_cond_wait(cond
, mutex
, &then
, conforming
);
855 return _pthread_psynch_cond_wait(cond
, mutex
, &then
, conforming
);
860 _pthread_ulock_cond_cleanup(void *arg
)
862 struct pthread_ulock_cond_cancel_ctx_s
*ctx
= arg
;
863 pthread_ulock_cond_state_u
*state
= _pthread_ulock_cond_state(ctx
->cond
);
865 (void)_pthread_ulock_cond_wait_complete(state
, ctx
->mutex
, 0);
867 // "A thread that has been unblocked because it has been canceled while
868 // blocked in a call to pthread_cond_timedwait() or pthread_cond_wait()
869 // shall not consume any condition signal that may be directed concurrently
870 // at the condition variable if there are other threads blocked on the
871 // condition variable."
873 // Since we have no way to know if we've eaten somebody else's signal, just
874 // signal again pessimistically.
875 pthread_cond_signal(ctx
->cond
);
879 _pthread_psynch_cond_cleanup(void *arg
)
881 pthread_cond_t
*cond
= (pthread_cond_t
*)arg
;
882 pthread_t thread
= pthread_self();
883 pthread_mutex_t
*mutex
;
886 if (!thread
->canceled
) {
893 // add unlock ref to show one less waiter
894 _pthread_cond_updateval(cond
, mutex
, thread
->cancel_error
, 0);
897 ** Can't do anything if this fails -- we're on the way out
900 (void)pthread_mutex_lock(mutex
);
905 _pthread_cond_updateval(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
,
906 int error
, uint32_t updateval
)
910 uint32_t diffgen
, nsval
;
911 uint64_t oldval64
, newval64
;
912 uint32_t lcntval
, ucntval
, scntval
;
913 volatile uint64_t *c_lsseqaddr
;
914 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
917 updateval
= PTHRW_INC
;
918 if (error
& ECVCLEARED
) {
919 updateval
|= PTH_RWS_CV_CBIT
;
921 if (error
& ECVPREPOST
) {
922 updateval
|= PTH_RWS_CV_PBIT
;
926 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
929 lcntval
= *c_lseqcnt
;
930 ucntval
= *c_useqcnt
;
931 scntval
= *c_sseqcnt
;
935 diffgen
= diff_genseq(lcntval
, scntval
); // pending waiters
937 oldval64
= (((uint64_t)scntval
) << 32);
940 PTHREAD_TRACE(psynch_cvar_updateval
| DBG_FUNC_START
, cond
, oldval64
,
943 if (diffgen
<= 0 && !is_rws_pbit_set(updateval
)) {
944 /* TBD: Assert, should not be the case */
945 /* validate it is spurious and return */
950 // update scntval with number of expected returns and bits
951 nsval
= (scntval
& PTHRW_COUNT_MASK
) + (updateval
& PTHRW_COUNT_MASK
);
953 nsval
|= ((scntval
& PTH_RWS_CV_BITSALL
) | (updateval
& PTH_RWS_CV_BITSALL
));
955 // if L==S and c&p bits are set, needs clearpre
956 if (((nsval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) &&
957 ((nsval
& PTH_RWS_CV_BITSALL
) == PTH_RWS_CV_BITSALL
)) {
958 // reset p bit but retain c bit on the sword
959 nsval
&= PTH_RWS_CV_RESET_PBIT
;
963 newval64
= (((uint64_t)nsval
) << 32);
966 } while (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
));
968 PTHREAD_TRACE(psynch_cvar_updateval
| DBG_FUNC_END
, cond
, newval64
,
969 (uint64_t)diffgen
<< 32 | needclearpre
, 0);
972 // if L == S, then reset associated mutex
973 if ((nsval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) {
980 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
981 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
983 (void)__psynch_cvclrprepost(cond
, lcntval
, ucntval
, nsval
, 0, lcntval
, flags
);
987 #endif /* !BUILDING_VARIANT ] */
989 PTHREAD_NOEXPORT_VARIANT
991 pthread_cond_init(pthread_cond_t
*cond
, const pthread_condattr_t
*attr
)
993 _pthread_lock_init(&cond
->lock
);
994 return _pthread_cond_init(cond
, attr
, _PTHREAD_COND_SIG_pristine
);