2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 * POSIX Pthread Library
53 #include <sys/time.h> /* For struct timespec and getclock(). */
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
61 extern int __gettimeofday(struct timeval
*, struct timezone
*);
64 int _pthread_cond_wait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
,
65 const struct timespec
*abstime
, int isRelative
, int isconforming
);
69 COND_GETSEQ_ADDR(_pthread_cond
*cond
,
70 volatile uint64_t **c_lsseqaddr
,
71 volatile uint32_t **c_lseqcnt
,
72 volatile uint32_t **c_useqcnt
,
73 volatile uint32_t **c_sseqcnt
)
76 *c_lseqcnt
= &cond
->c_seq
[1];
77 *c_sseqcnt
= &cond
->c_seq
[2];
78 *c_useqcnt
= &cond
->c_seq
[0];
80 *c_lseqcnt
= &cond
->c_seq
[0];
81 *c_sseqcnt
= &cond
->c_seq
[1];
82 *c_useqcnt
= &cond
->c_seq
[2];
84 *c_lsseqaddr
= (volatile uint64_t *)*c_lseqcnt
;
87 #ifndef BUILDING_VARIANT /* [ */
89 static void _pthread_cond_cleanup(void *arg
);
90 static void _pthread_cond_updateval(_pthread_cond
*cond
, _pthread_mutex
*mutex
,
91 int error
, uint32_t updateval
);
95 pthread_condattr_init(pthread_condattr_t
*attr
)
97 attr
->sig
= _PTHREAD_COND_ATTR_SIG
;
98 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
103 pthread_condattr_destroy(pthread_condattr_t
*attr
)
105 attr
->sig
= _PTHREAD_NO_SIG
;
110 pthread_condattr_getpshared(const pthread_condattr_t
*attr
, int *pshared
)
113 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
) {
114 *pshared
= (int)attr
->pshared
;
121 pthread_condattr_setpshared(pthread_condattr_t
*attr
, int pshared
)
124 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
) {
126 if (pshared
== PTHREAD_PROCESS_PRIVATE
|| pshared
== PTHREAD_PROCESS_SHARED
)
127 #else /* __DARWIN_UNIX03 */
128 if (pshared
== PTHREAD_PROCESS_PRIVATE
)
129 #endif /* __DARWIN_UNIX03 */
131 attr
->pshared
= pshared
;
139 pthread_cond_timedwait_relative_np(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
,
140 const struct timespec
*abstime
)
142 return _pthread_cond_wait(cond
, mutex
, abstime
, 1, 0);
145 #endif /* !BUILDING_VARIANT ] */
147 PTHREAD_ALWAYS_INLINE
149 _pthread_cond_init(_pthread_cond
*cond
, const pthread_condattr_t
*attr
, int conforming
)
151 volatile uint64_t *c_lsseqaddr
;
152 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
160 cond
->misalign
= (((uintptr_t)&cond
->c_seq
[0]) & 0x7) != 0;
161 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
162 *c_sseqcnt
= PTH_RWS_CV_CBIT
; // set Sword to 0c
166 cond
->pshared
= attr
->pshared
;
168 cond
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
171 cond
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
174 long sig
= _PTHREAD_COND_SIG
;
176 // Ensure all contents are properly set before setting signature.
177 #if defined(__LP64__)
178 // For binary compatibility reasons we cannot require natural alignment of
179 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
180 uint32_t *sig32_ptr
= (uint32_t*)&cond
->sig
;
181 uint32_t *sig32_val
= (uint32_t*)&sig
;
182 *(sig32_ptr
+ 1) = *(sig32_val
+ 1);
183 os_atomic_store(sig32_ptr
, *sig32_val
, release
);
185 os_atomic_store2o(cond
, sig
, sig
, release
);
191 #ifndef BUILDING_VARIANT /* [ */
195 _pthread_cond_check_init_slow(_pthread_cond
*cond
, bool *inited
)
198 if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
199 _PTHREAD_LOCK(cond
->lock
);
200 if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
201 res
= _pthread_cond_init(cond
, NULL
, 0);
205 } else if (cond
->sig
== _PTHREAD_COND_SIG
) {
208 _PTHREAD_UNLOCK(cond
->lock
);
209 } else if (cond
->sig
== _PTHREAD_COND_SIG
) {
215 PTHREAD_ALWAYS_INLINE
217 _pthread_cond_check_init(_pthread_cond
*cond
, bool *inited
)
220 if (cond
->sig
!= _PTHREAD_COND_SIG
) {
221 return _pthread_cond_check_init_slow(cond
, inited
);
226 PTHREAD_NOEXPORT_VARIANT
228 pthread_cond_destroy(pthread_cond_t
*ocond
)
230 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
232 if (cond
->sig
== _PTHREAD_COND_SIG
) {
233 _PTHREAD_LOCK(cond
->lock
);
235 uint64_t oldval64
, newval64
;
236 uint32_t lcntval
, ucntval
, scntval
;
237 volatile uint64_t *c_lsseqaddr
;
238 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
240 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
243 lcntval
= *c_lseqcnt
;
244 ucntval
= *c_useqcnt
;
245 scntval
= *c_sseqcnt
;
247 // validate it is not busy
248 if ((lcntval
& PTHRW_COUNT_MASK
) != (scntval
& PTHRW_COUNT_MASK
)) {
252 oldval64
= (((uint64_t)scntval
) << 32);
255 } while (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
));
257 // <rdar://problem/13782056> Need to clear preposts.
259 bool needclearpre
= ((scntval
& PTH_RWS_CV_PBIT
) != 0);
260 if (needclearpre
&& cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
261 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
264 cond
->sig
= _PTHREAD_NO_SIG
;
267 _PTHREAD_UNLOCK(cond
->lock
);
270 (void)__psynch_cvclrprepost(cond
, lcntval
, ucntval
, scntval
, 0, lcntval
, flags
);
272 } else if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
273 // Compatibility for misbehaving applications that attempt to
274 // destroy a statically initialized condition variable.
275 cond
->sig
= _PTHREAD_NO_SIG
;
281 PTHREAD_ALWAYS_INLINE
283 _pthread_cond_signal(pthread_cond_t
*ocond
, bool broadcast
, mach_port_t thread
)
286 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
292 uint64_t oldval64
, newval64
;
293 uint32_t lcntval
, ucntval
, scntval
;
294 volatile uint64_t *c_lsseqaddr
;
295 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
297 int retry_count
= 0, uretry_count
= 0;
301 res
= _pthread_cond_check_init(cond
, &inited
);
302 if (res
!= 0 || inited
== true) {
306 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
312 lcntval
= *c_lseqcnt
;
313 ucntval
= *c_useqcnt
;
314 scntval
= *c_sseqcnt
;
318 if (((lcntval
& PTHRW_COUNT_MASK
) == (scntval
& PTHRW_COUNT_MASK
)) ||
319 (thread
== MACH_PORT_NULL
&& ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
& PTHRW_COUNT_MASK
)))) {
320 /* validate it is spurious and return */
321 oldval64
= (((uint64_t)scntval
) << 32);
325 if (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
)) {
337 /* validate to eliminate spurious values, race snapshots */
338 if (is_seqhigher((scntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
339 /* since ucntval may be newer, just redo */
341 if (retry_count
> 8192) {
348 } else if (is_seqhigher((ucntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
349 /* since ucntval may be newer, just redo */
351 if (uretry_count
> 8192) {
353 * U value if not used for a while can go out of sync
354 * set this to S value and try one more time.
356 if (ucountreset
!= 0) {
358 } else if (os_atomic_cmpxchg(c_useqcnt
, ucntval
, (scntval
& PTHRW_COUNT_MASK
), seq_cst
)) {
359 /* now the U is reset to S value */
369 if (is_seqlower(ucntval
& PTHRW_COUNT_MASK
, scntval
& PTHRW_COUNT_MASK
) != 0) {
370 /* If U < S, set U = S+diff due to intr's TO, etc */
371 ulval
= (scntval
& PTHRW_COUNT_MASK
);
373 /* If U >= S, set U = U+diff due to intr's TO, etc */
374 ulval
= (ucntval
& PTHRW_COUNT_MASK
);
378 diffgen
= diff_genseq(lcntval
, ulval
);
380 ulval
= (lcntval
& PTHRW_COUNT_MASK
);
385 } while (retry
|| !os_atomic_cmpxchg(c_useqcnt
, ucntval
, ulval
, seq_cst
));
388 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
389 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
392 uint64_t cvlsgen
= ((uint64_t)scntval
<< 32) | lcntval
;
395 // pass old U val so kernel will know the diffgen
396 uint64_t cvudgen
= ((uint64_t)ucntval
<< 32) | diffgen
;
397 updateval
= __psynch_cvbroad(ocond
, cvlsgen
, cvudgen
, flags
, NULL
, 0, 0);
399 updateval
= __psynch_cvsignal(ocond
, cvlsgen
, ucntval
, thread
, NULL
, 0, 0, flags
);
402 if (updateval
!= (uint32_t)-1 && updateval
!= 0) {
403 _pthread_cond_updateval(cond
, NULL
, 0, updateval
);
410 * Signal a condition variable, waking up all threads waiting for it.
412 PTHREAD_NOEXPORT_VARIANT
414 pthread_cond_broadcast(pthread_cond_t
*ocond
)
416 return _pthread_cond_signal(ocond
, true, MACH_PORT_NULL
);
420 * Signal a condition variable, waking a specified thread.
422 PTHREAD_NOEXPORT_VARIANT
424 pthread_cond_signal_thread_np(pthread_cond_t
*ocond
, pthread_t thread
)
426 mach_port_t mp
= MACH_PORT_NULL
;
428 mp
= pthread_mach_thread_np((_Nonnull pthread_t
)thread
);
430 return _pthread_cond_signal(ocond
, false, mp
);
434 * Signal a condition variable, waking only one thread.
436 PTHREAD_NOEXPORT_VARIANT
438 pthread_cond_signal(pthread_cond_t
*ocond
)
440 return _pthread_cond_signal(ocond
, false, MACH_PORT_NULL
);
444 * Manage a list of condition variables associated with a mutex
448 * Suspend waiting for a condition variable.
449 * Note: we have to keep a list of condition variables which are using
450 * this same mutex variable so we can detect invalid 'destroy' sequences.
451 * If conformance is not cancelable, we skip the _pthread_testcancel(),
452 * but keep the remaining conforming behavior..
454 PTHREAD_NOEXPORT PTHREAD_NOINLINE
456 _pthread_cond_wait(pthread_cond_t
*ocond
,
457 pthread_mutex_t
*omutex
,
458 const struct timespec
*abstime
,
463 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
464 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
465 struct timespec then
= { 0, 0 };
466 uint32_t mtxgen
, mtxugen
, flags
=0, updateval
;
467 uint32_t lcntval
, ucntval
, scntval
;
468 uint32_t nlval
, ulval
, savebits
;
469 volatile uint64_t *c_lsseqaddr
;
470 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
471 uint64_t oldval64
, newval64
, mugen
, cvlsgen
;
472 uint32_t *npmtx
= NULL
;
473 int timeout_elapsed
= 0;
475 res
= _pthread_cond_check_init(cond
, NULL
);
481 if (!_pthread_mutex_check_signature(mutex
) &&
482 !_pthread_mutex_check_signature_init(mutex
)) {
485 if (conforming
== PTHREAD_CONFORM_UNIX03_CANCELABLE
) {
486 _pthread_testcancel(conforming
);
490 /* send relative time to kernel */
492 if (abstime
->tv_nsec
< 0 || abstime
->tv_nsec
>= NSEC_PER_SEC
) {
496 if (isRelative
== 0) {
499 __gettimeofday(&tv
, NULL
);
500 TIMEVAL_TO_TIMESPEC(&tv
, &now
);
502 if ((abstime
->tv_sec
== now
.tv_sec
) ?
503 (abstime
->tv_nsec
<= now
.tv_nsec
) :
504 (abstime
->tv_sec
< now
.tv_sec
)) {
507 /* Compute relative time to sleep */
508 then
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
509 then
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
510 if (then
.tv_nsec
< 0) {
511 then
.tv_nsec
+= NSEC_PER_SEC
;
516 then
.tv_sec
= abstime
->tv_sec
;
517 then
.tv_nsec
= abstime
->tv_nsec
;
518 if ((then
.tv_sec
== 0) && (then
.tv_nsec
== 0)) {
524 if (cond
->busy
!= NULL
&& cond
->busy
!= mutex
) {
529 * If timeout is known to have elapsed, we still need to unlock and
530 * relock the mutex to allow other waiters to get in line and
531 * modify the condition state.
533 if (timeout_elapsed
) {
534 res
= pthread_mutex_unlock(omutex
);
538 res
= pthread_mutex_lock(omutex
);
545 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
548 lcntval
= *c_lseqcnt
;
549 ucntval
= *c_useqcnt
;
550 scntval
= *c_sseqcnt
;
552 oldval64
= (((uint64_t)scntval
) << 32);
555 /* remove c and p bits on S word */
556 savebits
= scntval
& PTH_RWS_CV_BITSALL
;
557 ulval
= (scntval
& PTHRW_COUNT_MASK
);
558 nlval
= lcntval
+ PTHRW_INC
;
559 newval64
= (((uint64_t)ulval
) << 32);
561 } while (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
));
565 res
= _pthread_mutex_droplock(mutex
, &flags
, &npmtx
, &mtxgen
, &mtxugen
);
567 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
571 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) == 0) {
575 mugen
= ((uint64_t)mtxugen
<< 32) | mtxgen
;
577 flags
&= ~_PTHREAD_MTX_OPT_MUTEX
; /* reset the mutex bit as this is cvar */
579 cvlsgen
= ((uint64_t)(ulval
| savebits
)<< 32) | nlval
;
581 // SUSv3 requires pthread_cond_wait to be a cancellation point
583 pthread_cleanup_push(_pthread_cond_cleanup
, (void *)cond
);
584 updateval
= __psynch_cvwait(ocond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)then
.tv_sec
, (int32_t)then
.tv_nsec
);
585 _pthread_testcancel(conforming
);
586 pthread_cleanup_pop(0);
588 updateval
= __psynch_cvwait(ocond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)then
.tv_sec
, (int32_t)then
.tv_nsec
);
591 if (updateval
== (uint32_t)-1) {
593 switch (err
& 0xff) {
598 // spurious wakeup (unless canceled)
606 // add unlock ref to show one less waiter
607 _pthread_cond_updateval(cond
, mutex
, err
, 0);
608 } else if (updateval
!= 0) {
610 // The return due to prepost and might have bit states
611 // update S and return for prepo if needed
612 _pthread_cond_updateval(cond
, mutex
, 0, updateval
);
615 pthread_mutex_lock(omutex
);
621 _pthread_cond_cleanup(void *arg
)
623 _pthread_cond
*cond
= (_pthread_cond
*)arg
;
624 pthread_t thread
= pthread_self();
625 pthread_mutex_t
*mutex
;
628 if (!thread
->canceled
) {
633 mutex
= (pthread_mutex_t
*)cond
->busy
;
635 // add unlock ref to show one less waiter
636 _pthread_cond_updateval(cond
, (_pthread_mutex
*)mutex
,
637 thread
->cancel_error
, 0);
640 ** Can't do anything if this fails -- we're on the way out
643 (void)pthread_mutex_lock(mutex
);
648 _pthread_cond_updateval(_pthread_cond
*cond
, _pthread_mutex
*mutex
,
649 int error
, uint32_t updateval
)
653 uint32_t diffgen
, nsval
;
654 uint64_t oldval64
, newval64
;
655 uint32_t lcntval
, ucntval
, scntval
;
656 volatile uint64_t *c_lsseqaddr
;
657 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
660 updateval
= PTHRW_INC
;
661 if (error
& ECVCLEARED
) {
662 updateval
|= PTH_RWS_CV_CBIT
;
664 if (error
& ECVPREPOST
) {
665 updateval
|= PTH_RWS_CV_PBIT
;
669 COND_GETSEQ_ADDR(cond
, &c_lsseqaddr
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
672 lcntval
= *c_lseqcnt
;
673 ucntval
= *c_useqcnt
;
674 scntval
= *c_sseqcnt
;
678 diffgen
= diff_genseq(lcntval
, scntval
); // pending waiters
680 oldval64
= (((uint64_t)scntval
) << 32);
683 PTHREAD_TRACE(psynch_cvar_updateval
| DBG_FUNC_START
, cond
, oldval64
,
686 if (diffgen
<= 0 && !is_rws_pbit_set(updateval
)) {
687 /* TBD: Assert, should not be the case */
688 /* validate it is spurious and return */
693 // update scntval with number of expected returns and bits
694 nsval
= (scntval
& PTHRW_COUNT_MASK
) + (updateval
& PTHRW_COUNT_MASK
);
696 nsval
|= ((scntval
& PTH_RWS_CV_BITSALL
) | (updateval
& PTH_RWS_CV_BITSALL
));
698 // if L==S and c&p bits are set, needs clearpre
699 if (((nsval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) &&
700 ((nsval
& PTH_RWS_CV_BITSALL
) == PTH_RWS_CV_BITSALL
)) {
701 // reset p bit but retain c bit on the sword
702 nsval
&= PTH_RWS_CV_RESET_PBIT
;
706 newval64
= (((uint64_t)nsval
) << 32);
709 } while (!os_atomic_cmpxchg(c_lsseqaddr
, oldval64
, newval64
, seq_cst
));
711 PTHREAD_TRACE(psynch_cvar_updateval
| DBG_FUNC_END
, cond
, newval64
,
712 (uint64_t)diffgen
<< 32 | needclearpre
, 0);
715 // if L == S, then reset associated mutex
716 if ((nsval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) {
723 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
724 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
726 (void)__psynch_cvclrprepost(cond
, lcntval
, ucntval
, nsval
, 0, lcntval
, flags
);
730 #endif /* !BUILDING_VARIANT ] */
732 PTHREAD_NOEXPORT_VARIANT
734 pthread_cond_init(pthread_cond_t
*ocond
, const pthread_condattr_t
*attr
)
740 #else /* __DARWIN_UNIX03 */
742 #endif /* __DARWIN_UNIX03 */
744 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
745 _PTHREAD_LOCK_INIT(cond
->lock
);
746 return _pthread_cond_init(cond
, attr
, conforming
);