2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 * POSIX Pthread Library
52 #include <sys/time.h> /* For struct timespec and getclock(). */
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
61 __private_extern__
int _pthread_cond_init(_pthread_cond
*, const pthread_condattr_t
*, int);
62 __private_extern__
int _pthread_cond_wait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*abstime
, int isRelative
, int isconforming
);
64 extern int __gettimeofday(struct timeval
*, struct timezone
*);
66 #ifndef BUILDING_VARIANT
67 static void _pthread_cond_cleanup(void *arg
);
68 static void _pthread_cond_updateval(_pthread_cond
* cond
, int error
, uint32_t updateval
);
72 COND_GETSEQ_ADDR(_pthread_cond
*cond
,
73 volatile uint32_t **c_lseqcnt
,
74 volatile uint32_t **c_useqcnt
,
75 volatile uint32_t **c_sseqcnt
)
78 *c_lseqcnt
= &cond
->c_seq
[1];
79 *c_sseqcnt
= &cond
->c_seq
[2];
80 *c_useqcnt
= &cond
->c_seq
[0];
82 *c_lseqcnt
= &cond
->c_seq
[0];
83 *c_sseqcnt
= &cond
->c_seq
[1];
84 *c_useqcnt
= &cond
->c_seq
[2];
88 #ifndef BUILDING_VARIANT /* [ */
91 pthread_condattr_init(pthread_condattr_t
*attr
)
93 attr
->sig
= _PTHREAD_COND_ATTR_SIG
;
94 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
99 pthread_condattr_destroy(pthread_condattr_t
*attr
)
101 attr
->sig
= _PTHREAD_NO_SIG
;
106 pthread_condattr_getpshared(const pthread_condattr_t
*attr
, int *pshared
)
109 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
) {
110 *pshared
= (int)attr
->pshared
;
117 pthread_condattr_setpshared(pthread_condattr_t
*attr
, int pshared
)
120 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
) {
122 if (pshared
== PTHREAD_PROCESS_PRIVATE
|| pshared
== PTHREAD_PROCESS_SHARED
)
123 #else /* __DARWIN_UNIX03 */
124 if (pshared
== PTHREAD_PROCESS_PRIVATE
)
125 #endif /* __DARWIN_UNIX03 */
127 attr
->pshared
= pshared
;
134 __private_extern__
int
135 _pthread_cond_init(_pthread_cond
*cond
, const pthread_condattr_t
*attr
, int conforming
)
137 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
145 cond
->misalign
= (((uintptr_t)&cond
->c_seq
[0]) & 0x7) != 0;
146 COND_GETSEQ_ADDR(cond
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
147 *c_sseqcnt
= PTH_RWS_CV_CBIT
; // set Sword to 0c
151 cond
->pshared
= attr
->pshared
;
153 cond
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
156 cond
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
159 // Ensure all contents are properly set before setting signature.
161 cond
->sig
= _PTHREAD_COND_SIG
;
167 _pthread_cond_check_init(_pthread_cond
*cond
, bool *inited
)
170 if (cond
->sig
!= _PTHREAD_COND_SIG
) {
172 if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
174 if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
175 res
= _pthread_cond_init(cond
, NULL
, 0);
179 } else if (cond
->sig
== _PTHREAD_COND_SIG
) {
189 pthread_cond_destroy(pthread_cond_t
*ocond
)
191 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
193 if (cond
->sig
== _PTHREAD_COND_SIG
) {
196 uint64_t oldval64
, newval64
;
197 uint32_t lcntval
, ucntval
, scntval
;
198 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
200 COND_GETSEQ_ADDR(cond
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
203 lcntval
= *c_lseqcnt
;
204 ucntval
= *c_useqcnt
;
205 scntval
= *c_sseqcnt
;
207 // validate it is not busy
208 if ((lcntval
& PTHRW_COUNT_MASK
) != (scntval
& PTHRW_COUNT_MASK
)) {
212 oldval64
= (((uint64_t)scntval
) << 32);
215 } while (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
);
217 // <rdar://problem/13782056> Need to clear preposts.
219 bool needclearpre
= ((scntval
& PTH_RWS_CV_PBIT
) != 0);
220 if (needclearpre
&& cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
221 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
224 cond
->sig
= _PTHREAD_NO_SIG
;
230 (void)__psynch_cvclrprepost(cond
, lcntval
, ucntval
, scntval
, 0, lcntval
, flags
);
232 } else if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
233 // Compatibility for misbehaving applications that attempt to
234 // destroy a statically initialized condition variable.
235 cond
->sig
= _PTHREAD_NO_SIG
;
242 _pthread_cond_signal(pthread_cond_t
*ocond
, bool broadcast
, mach_port_t thread
)
245 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
251 uint64_t oldval64
, newval64
;
252 uint32_t lcntval
, ucntval
, scntval
;
253 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
255 int retry_count
= 0, uretry_count
= 0;
259 res
= _pthread_cond_check_init(cond
, &inited
);
260 if (res
!= 0 || inited
== true) {
264 COND_GETSEQ_ADDR(cond
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
270 lcntval
= *c_lseqcnt
;
271 ucntval
= *c_useqcnt
;
272 scntval
= *c_sseqcnt
;
274 if (((lcntval
& PTHRW_COUNT_MASK
) == (scntval
& PTHRW_COUNT_MASK
)) ||
275 (thread
== MACH_PORT_NULL
&& ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
& PTHRW_COUNT_MASK
)))) {
276 /* validate it is spurious and return */
277 oldval64
= (((uint64_t)scntval
) << 32);
281 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
) {
293 /* validate to eliminate spurious values, race snapshots */
294 if (is_seqhigher((scntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
295 /* since ucntval may be newer, just redo */
297 if (retry_count
> 8192) {
304 } else if (is_seqhigher((ucntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
305 /* since ucntval may be newer, just redo */
307 if (uretry_count
> 8192) {
309 * U value if not used for a while can go out of sync
310 * set this to S value and try one more time.
312 if (ucountreset
!= 0) {
314 } else if (OSAtomicCompareAndSwap32Barrier(ucntval
, (scntval
& PTHRW_COUNT_MASK
), (volatile int32_t *)c_useqcnt
) == TRUE
) {
315 /* now the U is reset to S value */
325 if (is_seqlower(ucntval
& PTHRW_COUNT_MASK
, scntval
& PTHRW_COUNT_MASK
) != 0) {
326 /* If U < S, set U = S+diff due to intr's TO, etc */
327 ulval
= (scntval
& PTHRW_COUNT_MASK
);
329 /* If U >= S, set U = U+diff due to intr's TO, etc */
330 ulval
= (ucntval
& PTHRW_COUNT_MASK
);
334 diffgen
= diff_genseq(lcntval
, ulval
);
336 ulval
= (lcntval
& PTHRW_COUNT_MASK
);
341 } while (retry
|| OSAtomicCompareAndSwap32Barrier(ucntval
, ulval
, (volatile int32_t *)c_useqcnt
) != TRUE
);
344 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
345 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
348 uint64_t cvlsgen
= ((uint64_t)scntval
<< 32) | lcntval
;
351 // pass old U val so kernel will know the diffgen
352 uint64_t cvudgen
= ((uint64_t)ucntval
<< 32) | diffgen
;
353 updateval
= __psynch_cvbroad(ocond
, cvlsgen
, cvudgen
, flags
, NULL
, 0, 0);
355 updateval
= __psynch_cvsignal(ocond
, cvlsgen
, ucntval
, thread
, NULL
, 0, 0, flags
);
358 if (updateval
!= (uint32_t)-1 && updateval
!= 0) {
359 _pthread_cond_updateval(cond
, 0, updateval
);
367 * Signal a condition variable, waking up all threads waiting for it.
370 pthread_cond_broadcast(pthread_cond_t
*ocond
)
372 return _pthread_cond_signal(ocond
, true, MACH_PORT_NULL
);
376 * Signal a condition variable, waking a specified thread.
379 pthread_cond_signal_thread_np(pthread_cond_t
*ocond
, pthread_t thread
)
381 mach_port_t mp
= MACH_PORT_NULL
;
383 mp
= pthread_mach_thread_np(thread
);
385 return _pthread_cond_signal(ocond
, false, mp
);
389 * Signal a condition variable, waking only one thread.
392 pthread_cond_signal(pthread_cond_t
*cond
)
394 return pthread_cond_signal_thread_np(cond
, NULL
);
398 * Manage a list of condition variables associated with a mutex
403 * Suspend waiting for a condition variable.
404 * Note: we have to keep a list of condition variables which are using
405 * this same mutex variable so we can detect invalid 'destroy' sequences.
406 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
407 * remaining conforming behavior..
409 __private_extern__
int
410 _pthread_cond_wait(pthread_cond_t
*ocond
,
411 pthread_mutex_t
*omutex
,
412 const struct timespec
*abstime
,
417 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
418 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
419 struct timespec then
= { 0, 0 };
420 uint32_t mtxgen
, mtxugen
, flags
=0, updateval
;
421 uint32_t lcntval
, ucntval
, scntval
;
422 uint32_t nlval
, ulval
, savebits
;
423 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
424 uint64_t oldval64
, newval64
, mugen
, cvlsgen
;
425 uint32_t *npmtx
= NULL
;
427 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
429 res
= _pthread_cond_check_init(cond
, NULL
);
435 if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
&& (mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
) != _PTHREAD_MUTEX_SIG_CMP
) {
438 if (isconforming
> 0) {
439 _pthread_testcancel(pthread_self(), 1);
443 /* send relative time to kernel */
445 if (isRelative
== 0) {
448 __gettimeofday(&tv
, NULL
);
449 TIMEVAL_TO_TIMESPEC(&tv
, &now
);
451 /* Compute relative time to sleep */
452 then
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
453 then
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
454 if (then
.tv_nsec
< 0) {
455 then
.tv_nsec
+= NSEC_PER_SEC
;
458 if (then
.tv_sec
< 0 || (then
.tv_sec
== 0 && then
.tv_nsec
== 0)) {
462 (abstime
->tv_sec
< 0 ||
463 abstime
->tv_nsec
< 0 ||
464 abstime
->tv_nsec
>= NSEC_PER_SEC
)) {
468 then
.tv_sec
= abstime
->tv_sec
;
469 then
.tv_nsec
= abstime
->tv_nsec
;
470 if ((then
.tv_sec
== 0) && (then
.tv_nsec
== 0)) {
474 if (isconforming
&& (then
.tv_sec
< 0 || then
.tv_nsec
< 0)) {
477 if (then
.tv_nsec
>= NSEC_PER_SEC
) {
482 if (cond
->busy
!= NULL
&& cond
->busy
!= mutex
) {
486 COND_GETSEQ_ADDR(cond
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
489 lcntval
= *c_lseqcnt
;
490 ucntval
= *c_useqcnt
;
491 scntval
= *c_sseqcnt
;
493 oldval64
= (((uint64_t)scntval
) << 32);
496 /* remove c and p bits on S word */
497 savebits
= scntval
& PTH_RWS_CV_BITSALL
;
498 ulval
= (scntval
& PTHRW_COUNT_MASK
);
499 nlval
= lcntval
+ PTHRW_INC
;
500 newval64
= (((uint64_t)ulval
) << 32);
502 } while (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
);
506 res
= __mtx_droplock(mutex
, &flags
, &npmtx
, &mtxgen
, &mtxugen
);
508 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
512 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) == 0) {
516 mugen
= ((uint64_t)mtxugen
<< 32) | mtxgen
;
518 flags
&= ~_PTHREAD_MTX_OPT_MUTEX
; /* reset the mutex bit as this is cvar */
520 cvlsgen
= ((uint64_t)(ulval
| savebits
)<< 32) | nlval
;
522 // SUSv3 requires pthread_cond_wait to be a cancellation point
524 pthread_cleanup_push(_pthread_cond_cleanup
, (void *)cond
);
525 updateval
= __psynch_cvwait(ocond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)then
.tv_sec
, (int32_t)then
.tv_nsec
);
526 _pthread_testcancel(pthread_self(), isconforming
);
527 pthread_cleanup_pop(0);
529 updateval
= __psynch_cvwait(ocond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)then
.tv_sec
, (int32_t)then
.tv_nsec
);
532 if (updateval
== (uint32_t)-1) {
534 switch (err
& 0xff) {
539 // spurious wakeup (unless canceled)
547 // add unlock ref to show one less waiter
548 _pthread_cond_updateval(cond
, err
, 0);
549 } else if (updateval
!= 0) {
551 // The return due to prepost and might have bit states
552 // update S and return for prepo if needed
553 _pthread_cond_updateval(cond
, 0, updateval
);
556 pthread_mutex_lock(omutex
);
562 _pthread_cond_cleanup(void *arg
)
564 _pthread_cond
*cond
= (_pthread_cond
*)arg
;
565 pthread_mutex_t
*mutex
;
568 pthread_t thread
= pthread_self();
572 thcanceled
= (thread
->detached
& _PTHREAD_WASCANCEL
);
573 UNLOCK(thread
->lock
);
575 if (thcanceled
== 0) {
580 mutex
= (pthread_mutex_t
*)cond
->busy
;
582 // add unlock ref to show one less waiter
583 _pthread_cond_updateval(cond
, thread
->cancel_error
, 0);
586 ** Can't do anything if this fails -- we're on the way out
589 (void)pthread_mutex_lock(mutex
);
593 #define ECVCERORR 256
594 #define ECVPERORR 512
597 _pthread_cond_updateval(_pthread_cond
*cond
, int error
, uint32_t updateval
)
601 uint32_t diffgen
, nsval
;
602 uint64_t oldval64
, newval64
;
603 uint32_t lcntval
, ucntval
, scntval
;
604 volatile uint32_t *c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
607 updateval
= PTHRW_INC
;
608 if ((error
& ECVCERORR
) != 0) {
609 updateval
|= PTH_RWS_CV_CBIT
;
611 if ((error
& ECVPERORR
) != 0) {
612 updateval
|= PTH_RWS_CV_PBIT
;
616 COND_GETSEQ_ADDR(cond
, &c_lseqcnt
, &c_useqcnt
, &c_sseqcnt
);
619 lcntval
= *c_lseqcnt
;
620 ucntval
= *c_useqcnt
;
621 scntval
= *c_sseqcnt
;
623 diffgen
= diff_genseq(lcntval
, scntval
); // pending waiters
625 oldval64
= (((uint64_t)scntval
) << 32);
629 /* TBD: Assert, should not be the case */
630 /* validate it is spurious and return */
635 // update scntval with number of expected returns and bits
636 nsval
= (scntval
& PTHRW_COUNT_MASK
) + (updateval
& PTHRW_COUNT_MASK
);
638 nsval
|= ((scntval
& PTH_RWS_CV_BITSALL
) | (updateval
& PTH_RWS_CV_BITSALL
));
640 // if L==S and c&p bits are set, needs clearpre
641 if (((nsval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) &&
642 ((nsval
& PTH_RWS_CV_BITSALL
) == PTH_RWS_CV_BITSALL
)) {
643 // reset p bit but retain c bit on the sword
644 nsval
&= PTH_RWS_CV_RESET_PBIT
;
650 newval64
= (((uint64_t)nsval
) << 32);
653 } while (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
);
656 // if L == S, then reset associated mutex
657 if ((nsval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) {
661 if (needclearpre
!= 0) {
663 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
) {
664 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
666 (void)__psynch_cvclrprepost(cond
, lcntval
, ucntval
, nsval
, 0, lcntval
, flags
);
673 pthread_cond_timedwait_relative_np(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*abstime
)
675 return _pthread_cond_wait(cond
, mutex
, abstime
, 1, 0);
678 #endif /* !BUILDING_VARIANT ] */
681 pthread_cond_init(pthread_cond_t
*ocond
, const pthread_condattr_t
*attr
)
687 #else /* __DARWIN_UNIX03 */
689 #endif /* __DARWIN_UNIX03 */
691 _pthread_cond
*cond
= (_pthread_cond
*)ocond
;
692 LOCK_INIT(cond
->lock
);
693 return _pthread_cond_init(cond
, attr
, conforming
);