2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
48 * POSIX Pthread Library
51 #include "pthread_internals.h"
52 #include <sys/time.h> /* For struct timespec and getclock(). */
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
61 extern int _pthread_cond_init(pthread_cond_t
*, const pthread_condattr_t
*, int);
62 extern int __unix_conforming
;
63 extern int usenew_mtximpl
;
66 /* 5243343 - temporary hack to detect if we are running the conformance test */
67 extern int PR_5243343_flag
;
68 #endif /* PR_5243343 */
70 __private_extern__
int _pthread_cond_wait(pthread_cond_t
*cond
, pthread_mutex_t
*mutex
, const struct timespec
*abstime
, int isRelative
, int isconforming
);
71 #ifndef BUILDING_VARIANT
72 static void cond_cleanup(void *arg
);
73 static void cond_dropwait(npthread_cond_t
* cond
, int error
, uint32_t updateval
);
74 static void __pthread_cond_set_signature(npthread_cond_t
* cond
);
75 static int _pthread_cond_destroy_locked(pthread_cond_t
*cond
);
79 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
81 if (cond->misalign != 0) { \
82 c_lseqcnt = &cond->c_seq[1]; \
83 c_sseqcnt = &cond->c_seq[2]; \
84 c_useqcnt = &cond->c_seq[0]; \
87 c_lseqcnt = &cond->c_seq[0]; \
88 c_sseqcnt = &cond->c_seq[1]; \
89 c_useqcnt = &cond->c_seq[2]; \
93 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
95 if (cond->misalign != 0) { \
96 c_lseqcnt = &cond->c_seq[1]; \
97 c_sseqcnt = &cond->c_seq[2]; \
98 c_useqcnt = &cond->c_seq[0]; \
101 c_lseqcnt = &cond->c_seq[0]; \
102 c_sseqcnt = &cond->c_seq[1]; \
103 c_useqcnt = &cond->c_seq[2]; \
106 #endif /* __LP64__ */
109 #define _KSYN_TRACE_ 0
112 /* The Function qualifiers */
113 #define DBG_FUNC_START 1
114 #define DBG_FUNC_END 2
115 #define DBG_FUNC_NONE 0
117 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
119 #define _KSYN_TRACE_UM_LOCK 0x9000060
120 #define _KSYN_TRACE_UM_UNLOCK 0x9000064
121 #define _KSYN_TRACE_UM_MHOLD 0x9000068
122 #define _KSYN_TRACE_UM_MDROP 0x900006c
123 #define _KSYN_TRACE_UM_CVWAIT 0x9000070
124 #define _KSYN_TRACE_UM_CVSIG 0x9000074
125 #define _KSYN_TRACE_UM_CVBRD 0x9000078
126 #define _KSYN_TRACE_UM_CDROPWT 0x90000a0
127 #define _KSYN_TRACE_UM_CVCLRPRE 0x90000a4
129 #endif /* _KSYN_TRACE_ */
132 #ifndef BUILDING_VARIANT /* [ */
135 pthread_condattr_init(pthread_condattr_t
*attr
)
137 attr
->sig
= _PTHREAD_COND_ATTR_SIG
;
138 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
143 pthread_condattr_destroy(pthread_condattr_t
*attr
)
145 attr
->sig
= _PTHREAD_NO_SIG
; /* Uninitialized */
150 pthread_condattr_getpshared(const pthread_condattr_t
*attr
,
153 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
)
155 *pshared
= (int)attr
->pshared
;
159 return (EINVAL
); /* Not an initialized 'attribute' structure */
166 /* temp home till pshared is fixed correctly */
168 pthread_condattr_setpshared(pthread_condattr_t
* attr
, int pshared
)
171 if (attr
->sig
== _PTHREAD_COND_ATTR_SIG
)
174 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) || (pshared
== PTHREAD_PROCESS_SHARED
))
175 #else /* __DARWIN_UNIX03 */
176 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
177 #endif /* __DARWIN_UNIX03 */
179 attr
->pshared
= pshared
;
182 return (EINVAL
); /* Invalid parameter */
186 return (EINVAL
); /* Not an initialized 'attribute' structure */
191 __private_extern__
int
192 _pthread_cond_init(pthread_cond_t
*ocond
,
193 const pthread_condattr_t
*attr
,
196 npthread_cond_t
* cond
= (npthread_cond_t
*)ocond
;
198 cond
->busy
= (npthread_mutex_t
*)NULL
;
204 if (((uintptr_t)cond
& 0x07) != 0) {
206 cond
->c_seq
[2] = PTH_RWS_CV_CBIT
;
209 cond
->c_seq
[1] = PTH_RWS_CV_CBIT
; /* set Sword to 0c */
213 cond
->pshared
= attr
->pshared
;
215 cond
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
217 cond
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
219 * For the new style mutex, interlocks are not held all the time.
220 * We needed the signature to be set in the end. And we need
221 * to protect against the code getting reorganized by compiler.
222 * cond->sig = _PTHREAD_COND_SIG;
224 __pthread_cond_set_signature(cond
);
229 pthread_cond_destroy(pthread_cond_t
* ocond
)
231 npthread_cond_t
*cond
= (npthread_cond_t
*)ocond
;
234 /* to provide backwards compat for apps using united condtn vars */
235 if((cond
->sig
!= _PTHREAD_COND_SIG
) && (cond
->sig
!= _PTHREAD_COND_SIG_init
))
239 ret
= _pthread_cond_destroy_locked(ocond
);
246 _pthread_cond_destroy_locked(pthread_cond_t
* ocond
)
248 npthread_cond_t
*cond
= (npthread_cond_t
*)ocond
;
250 volatile uint32_t * c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
251 uint32_t lcntval
, ucntval
, scntval
;
252 uint64_t oldval64
, newval64
;
255 if (cond
->sig
== _PTHREAD_COND_SIG
)
257 COND_GETSEQ_ADDR(cond
, c_lseqcnt
, c_useqcnt
, c_sseqcnt
);
258 lcntval
= *c_lseqcnt
;
259 ucntval
= *c_useqcnt
;
260 scntval
= *c_sseqcnt
;
262 if ((lcntval
& PTHRW_COUNT_MASK
) == (scntval
& PTHRW_COUNT_MASK
)) {
263 /* validate it is not busy */
264 oldval64
= (((uint64_t)scntval
) << 32);
268 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
270 cond
->sig
= _PTHREAD_NO_SIG
;
274 } else if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
275 cond
->sig
= _PTHREAD_NO_SIG
;
278 ret
= EINVAL
; /* Not an initialized condition variable structure */
283 * Signal a condition variable, waking up all threads waiting for it.
286 pthread_cond_broadcast(pthread_cond_t
*ocond
)
288 npthread_cond_t
* cond
= (npthread_cond_t
*)ocond
;
290 uint32_t flags
, updateval
;
291 uint32_t lcntval
, ucntval
, scntval
;
292 uint64_t oldval64
, newval64
, mugen
, cvlsgen
, cvudgen
, mtid
=0;
293 int diffgen
, error
= 0;
294 volatile uint32_t * c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
295 uint32_t * pmtx
= NULL
;
296 uint32_t nlval
, ulval
;
297 int needclearpre
= 0, retry_count
= 0, uretry_count
= 0;
300 /* to provide backwards compat for apps using united condtn vars */
301 if((sig
!= _PTHREAD_COND_SIG
) && (sig
!= _PTHREAD_COND_SIG_init
))
304 if (sig
!= _PTHREAD_COND_SIG
)
307 if (cond
->sig
== _PTHREAD_COND_SIG_init
)
309 _pthread_cond_init(ocond
, NULL
, 0);
310 /* just inited nothing to post */
313 } else if (cond
->sig
!= _PTHREAD_COND_SIG
) {
314 /* Not a condition variable */
322 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_START
, (uint32_t)cond
, 0, 0, 0, 0);
325 COND_GETSEQ_ADDR(cond
, c_lseqcnt
, c_useqcnt
, c_sseqcnt
);
327 lcntval
= *c_lseqcnt
;
328 ucntval
= *c_useqcnt
;
329 scntval
= *c_sseqcnt
;
332 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_NONE
, (uint32_t)cond
, lcntval
, ucntval
, scntval
, 0);
335 if (((lcntval
& PTHRW_COUNT_MASK
) == (scntval
& PTHRW_COUNT_MASK
)) ||
336 ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
& PTHRW_COUNT_MASK
))) {
337 /* validate it is spurious and return */
338 oldval64
= (((uint64_t)scntval
) << 32);
342 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
345 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_NONE
, (uint32_t)cond
, lcntval
, ucntval
, 0xf1f1f1f1, 0);
346 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_END
, (uint32_t)cond
, scntval
, 0, 0xf1f1f1f1, 0);
351 /* validate to eliminate spurious values, race snapshots */
352 if (is_seqhigher((scntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
353 /* since ucntval may be newer, just redo */
355 if (retry_count
> 8192) {
361 } else if (is_seqhigher((ucntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
362 /* since ucntval may be newer, just redo */
364 if (uretry_count
> 8192) {
366 * U value if not used for a while can go out of sync
367 * set this to S value and try one more time.
369 if (ucountreset
!= 0)
372 if (OSAtomicCompareAndSwap32Barrier(ucntval
, (scntval
& PTHRW_COUNT_MASK
), (volatile int32_t *)c_useqcnt
) == TRUE
) {
373 /* now the U is reset to S value */
382 if (is_seqlower(ucntval
& PTHRW_COUNT_MASK
, scntval
& PTHRW_COUNT_MASK
) != 0) {
383 /* If U < S, set U = S+diff due to intr's TO, etc */
384 ulval
= (scntval
& PTHRW_COUNT_MASK
);
386 /* If U >= S, set U = U+diff due to intr's TO, etc */
387 ulval
= (ucntval
& PTHRW_COUNT_MASK
);
390 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_NONE
, lcntval
, ucntval
, scntval
, diffgen
, 0);
393 diffgen
= diff_genseq((lcntval
& PTHRW_COUNT_MASK
), (ulval
& PTHRW_COUNT_MASK
));
396 ulval
= (lcntval
& PTHRW_COUNT_MASK
);
397 if (OSAtomicCompareAndSwap32Barrier(ucntval
, ulval
, (volatile int32_t *)c_useqcnt
) != TRUE
) {
402 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
)
403 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
407 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_NONE
, (uint32_t)cond
, 3, diffgen
, flags
, 0);
411 /* pass old u val so kernel will know the diffgen */
413 cvlsgen
= ((uint64_t)scntval
<< 32) | nlval
;
414 cvudgen
= ((uint64_t)ucntval
<< 32) | diffgen
;
416 updateval
= __psynch_cvbroad(ocond
, cvlsgen
, cvudgen
, flags
, (pthread_mutex_t
*)pmtx
, mugen
, mtid
);
418 if (updateval
!= (uint32_t)-1) {
420 /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */
421 /* Were any threads woken or bits to be set? */
422 if (updateval
!= 0) {
425 lcntval
= *c_lseqcnt
;
426 ucntval
= *c_useqcnt
;
427 scntval
= *c_sseqcnt
;
428 /* update scntval with number of expected returns and bits */
429 nlval
= (scntval
& PTHRW_COUNT_MASK
) + (updateval
& PTHRW_COUNT_MASK
);
431 nlval
|= ((scntval
& PTH_RWS_CV_BITSALL
) | (updateval
& PTH_RWS_CV_BITSALL
));
434 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_NONE
, 0x25, lcntval
, scntval
, updateval
, 0);
436 /* if L==S and c&p bits are set, needs clearpre */
437 if (((nlval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
))
438 && ((nlval
& PTH_RWS_CV_BITSALL
) == PTH_RWS_CV_BITSALL
)) {
439 /* reset p bit but retain c bit on the sword */
440 nlval
&= PTH_RWS_CV_RESET_PBIT
;
444 oldval64
= (((uint64_t)scntval
) << 32);
446 newval64
= (((uint64_t)nlval
) << 32);
450 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_NONE
, 0x25, nlval
, scntval
, updateval
, 0);
453 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
456 /* if L == S, then reset associated mutex */
457 if ((nlval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) {
458 cond
->busy
= (npthread_mutex_t
*)NULL
;
461 if (needclearpre
!= 0) {
462 (void)__psynch_cvclrprepost(ocond
, lcntval
, ucntval
, nlval
, 0, lcntval
, flags
);
470 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD
| DBG_FUNC_END
, (uint32_t)cond
, 0, error
, 0, 0);
477 * Signal a condition variable, waking a specified thread.
481 pthread_cond_signal_thread_np(pthread_cond_t
*ocond
, pthread_t thread
)
483 npthread_cond_t
* cond
= (npthread_cond_t
*)ocond
;
485 uint32_t flags
, updateval
;
486 uint32_t lcntval
, ucntval
, scntval
;
487 uint32_t nlval
, ulval
=0;
488 volatile uint32_t * c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
489 uint64_t oldval64
, newval64
, mugen
, cvlsgen
, mtid
= 0;
490 int needclearpre
= 0, retry_count
= 0, uretry_count
= 0;
491 int error
, ucountreset
= 0;
493 /* to provide backwards compat for apps using united condtn vars */
495 if((sig
!= _PTHREAD_COND_SIG
) && (sig
!= _PTHREAD_COND_SIG_init
))
498 if (cond
->sig
!= _PTHREAD_COND_SIG
) {
500 if (cond
->sig
!= _PTHREAD_COND_SIG
) {
501 if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
502 _pthread_cond_init(ocond
, NULL
, 0);
503 /* just inited, nothing to post yet */
515 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_START
, (uint32_t)cond
, 0, 0, 0, 0);
517 COND_GETSEQ_ADDR(cond
, c_lseqcnt
, c_useqcnt
, c_sseqcnt
);
519 lcntval
= *c_lseqcnt
;
520 ucntval
= *c_useqcnt
;
521 scntval
= *c_sseqcnt
;
523 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_NONE
, (uint32_t)cond
, lcntval
, ucntval
, scntval
, 0);
526 if (((lcntval
& PTHRW_COUNT_MASK
) == (scntval
& PTHRW_COUNT_MASK
)) ||
527 ((thread
== 0) && ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
& PTHRW_COUNT_MASK
)))) {
528 /* If L <= S+U, it is spurious broadcasr */
529 /* validate it is spurious and return */
530 oldval64
= (((uint64_t)scntval
) << 32);
534 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
537 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_NONE
, (uint32_t)cond
, lcntval
, ucntval
, 0xf1f1f1f1, 0);
538 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_END
, (uint32_t)cond
, scntval
, 0, 0xf1f1f1f1, 0);
544 /* validate to eliminate spurious values, race snapshots */
545 if (is_seqhigher((scntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
546 /* since ucntval may be newer, just redo */
548 if (retry_count
> 8192) {
554 } else if (is_seqhigher((ucntval
& PTHRW_COUNT_MASK
), (lcntval
& PTHRW_COUNT_MASK
))) {
555 /* since ucntval may be newer, just redo */
557 if (uretry_count
> 8192) {
559 * U value if not used for a while can go out of sync
560 * set this to S value and try one more time.
562 if (ucountreset
!= 0)
565 if (OSAtomicCompareAndSwap32Barrier(ucntval
, (scntval
& PTHRW_COUNT_MASK
), (volatile int32_t *)c_useqcnt
) == TRUE
) {
566 /* now the U is reset to S value */
574 } /* thread == 0 ) */
578 * skip manipulating U count as ESRCH from kernel cannot be handled properly.
579 * S count will cover the imbalance and next signal without thread or broadcast
580 * will correct it. But we need to send the right U to kernel so it will use
581 * that to look for the appropriate sequenc. So the ulval is computed anyway.
584 if (is_seqlower(ucntval
& PTHRW_COUNT_MASK
, scntval
& PTHRW_COUNT_MASK
) != 0) {
585 /* If U < S, set U = S+1 due to intr's TO, etc */
586 ulval
= (scntval
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
588 /* If U >= S, set U = U+1 due to intr's TO, etc */
589 ulval
= (ucntval
& PTHRW_COUNT_MASK
) + PTHRW_INC
;
592 if (OSAtomicCompareAndSwap32Barrier(ucntval
, ulval
, (volatile int32_t *)c_useqcnt
) != TRUE
) {
598 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
)
599 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
602 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_NONE
, (uint32_t)cond
, 3, nlval
, ulval
, 0);
605 /* pass old u val so kernel will know the diffgen */
607 cvlsgen
= ((uint64_t)scntval
<< 32) | nlval
;
609 updateval
= __psynch_cvsignal(ocond
, cvlsgen
, ucntval
, pthread_mach_thread_np(thread
), (pthread_mutex_t
*)0, mugen
, mtid
, flags
);
612 if (updateval
!= (uint32_t)-1) {
614 /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */
615 /* Were any threads woken or bits to be set? */
616 if (updateval
!= 0) {
618 lcntval
= *c_lseqcnt
;
619 ucntval
= *c_useqcnt
;
620 scntval
= *c_sseqcnt
;
621 /* update scntval with number of expected returns and bits */
622 nlval
= (scntval
& PTHRW_COUNT_MASK
) + (updateval
& PTHRW_COUNT_MASK
);
624 nlval
|= ((scntval
& PTH_RWS_CV_BITSALL
) | (updateval
& PTH_RWS_CV_BITSALL
));
627 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_NONE
, 0x25, 0, 0, updateval
, 0);
629 /* if L==S and c&p bits are set, needs clearpre */
630 if (((nlval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
))
631 && ((nlval
& PTH_RWS_CV_BITSALL
) == PTH_RWS_CV_BITSALL
)) {
632 /* reset p bit but retain c bit on the sword */
633 nlval
&= PTH_RWS_CV_RESET_PBIT
;
638 oldval64
= (((uint64_t)scntval
) << 32);
640 newval64
= (((uint64_t)nlval
) << 32);
644 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_NONE
, 0x25, nlval
, ulval
, updateval
, 0);
647 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
650 /* if L == S, then reset associated mutex */
651 if ((nlval
& PTHRW_COUNT_MASK
) == (lcntval
& PTHRW_COUNT_MASK
)) {
652 cond
->busy
= (npthread_mutex_t
*)NULL
;
655 if (needclearpre
!= 0) {
656 (void)__psynch_cvclrprepost(ocond
, lcntval
, ucntval
, nlval
, 0, lcntval
, flags
);
664 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0, 0, 0);
670 * Signal a condition variable, waking only one thread.
673 pthread_cond_signal(pthread_cond_t
*cond
)
675 return pthread_cond_signal_thread_np(cond
, NULL
);
679 * Manage a list of condition variables associated with a mutex
684 * Suspend waiting for a condition variable.
685 * Note: we have to keep a list of condition variables which are using
686 * this same mutex variable so we can detect invalid 'destroy' sequences.
687 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
688 * remaining conforming behavior..
690 __private_extern__
int
691 _pthread_cond_wait(pthread_cond_t
*ocond
,
692 pthread_mutex_t
*omutex
,
693 const struct timespec
*abstime
,
698 npthread_cond_t
* cond
= (npthread_cond_t
*)ocond
;
699 npthread_mutex_t
* mutex
= (npthread_mutex_t
* )omutex
;
700 mach_timespec_t then
= {0,0};
701 struct timespec cthen
= {0,0};
703 int msig
= mutex
->sig
;
704 npthread_mutex_t
* pmtx
;
705 uint32_t mtxgen
, mtxugen
, flags
=0, updateval
;
706 uint32_t lcntval
, ucntval
, scntval
;
707 uint32_t nlval
, ulval
, savebits
;
708 volatile uint32_t * c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
709 uint64_t oldval64
, newval64
, mugen
, cvlsgen
;
710 uint32_t * npmtx
= NULL
;
711 int error
, local_error
;
713 extern void _pthread_testcancel(pthread_t thread
, int isconforming
);
715 /* to provide backwards compat for apps using united condtn vars */
716 if((sig
!= _PTHREAD_COND_SIG
) && (sig
!= _PTHREAD_COND_SIG_init
))
720 if((msig
!= _PTHREAD_MUTEX_SIG
) && ((msig
& _PTHREAD_MUTEX_SIG_init_MASK
) != _PTHREAD_MUTEX_SIG_CMP
))
722 if (isconforming
> 0)
723 _pthread_testcancel(pthread_self(), 1);
726 if (cond
->sig
!= _PTHREAD_COND_SIG
)
729 if (cond
->sig
!= _PTHREAD_COND_SIG
) {
730 if (cond
->sig
== _PTHREAD_COND_SIG_init
) {
731 _pthread_cond_init(ocond
, NULL
, 0);
741 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT
| DBG_FUNC_START
, (uint32_t)cond
, isRelative
, 0, (uint32_t)abstime
, 0);
743 COND_GETSEQ_ADDR(cond
, c_lseqcnt
, c_useqcnt
, c_sseqcnt
);
745 /* send relative time to kernel */
748 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT
| DBG_FUNC_START
, 0x11111111, abstime
->tv_nsec
, abstime
->tv_sec
, 0, 0);
750 if (isRelative
== 0) {
753 gettimeofday(&tv
, NULL
);
754 TIMEVAL_TO_TIMESPEC(&tv
, &now
);
756 /* Compute relative time to sleep */
757 then
.tv_nsec
= abstime
->tv_nsec
- now
.tv_nsec
;
758 then
.tv_sec
= abstime
->tv_sec
- now
.tv_sec
;
759 if (then
.tv_nsec
< 0)
761 then
.tv_nsec
+= NSEC_PER_SEC
;
764 if (((int)then
.tv_sec
< 0) ||
765 ((then
.tv_sec
== 0) && (then
.tv_nsec
== 0)))
769 if (isconforming
!= 0) {
770 cthen
.tv_sec
= abstime
->tv_sec
;
771 cthen
.tv_nsec
= abstime
->tv_nsec
;
772 if ((cthen
.tv_sec
< 0) || (cthen
.tv_nsec
< 0)) {
775 if (cthen
.tv_nsec
>= NSEC_PER_SEC
) {
780 then
.tv_sec
= abstime
->tv_sec
;
781 then
.tv_nsec
= abstime
->tv_nsec
;
782 if ((then
.tv_sec
== 0) && (then
.tv_nsec
== 0)) {
786 if(isconforming
&& ((then
.tv_sec
< 0) || (then
.tv_nsec
< 0))) {
789 if (then
.tv_nsec
>= NSEC_PER_SEC
) {
794 if ((cond
->busy
!= (npthread_mutex_t
*)NULL
) && (cond
->busy
!= mutex
))
799 lcntval
= *c_lseqcnt
;
800 ucntval
= *c_useqcnt
;
801 scntval
= *c_sseqcnt
;
803 oldval64
= (((uint64_t)scntval
) << 32);
806 /* remove c and p bits on S word */
807 savebits
= scntval
& PTH_RWS_CV_BITSALL
;
808 ulval
= (scntval
& PTHRW_COUNT_MASK
);
809 nlval
= lcntval
+ PTHRW_INC
;
810 newval64
= (((uint64_t)ulval
) << 32);
813 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
819 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)cond
, lcntval
, ucntval
, scntval
, 0);
821 retval
= __mtx_droplock(pmtx
, PTHRW_INC
, &flags
, &npmtx
, &mtxgen
, &mtxugen
);
823 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
826 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) == 0) {
830 mugen
= ((uint64_t)mtxugen
<< 32) | mtxgen
;
831 flags
&= ~_PTHREAD_MTX_OPT_MUTEX
; /* reset the mutex bit as this is cvar */
834 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)cond
, 3, (uint32_t)mutex
, flags
, 0);
838 cvlsgen
= ((uint64_t)(ulval
| savebits
)<< 32) | nlval
;
841 pthread_cleanup_push(cond_cleanup
, (void *)cond
);
842 updateval
= __psynch_cvwait(ocond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)then
.tv_sec
, (int32_t)then
.tv_nsec
);
843 _pthread_testcancel(pthread_self(), isconforming
);
844 pthread_cleanup_pop(0);
846 updateval
= __psynch_cvwait(ocond
, cvlsgen
, ucntval
, (pthread_mutex_t
*)npmtx
, mugen
, flags
, (int64_t)then
.tv_sec
, (int32_t)then
.tv_nsec
);
852 if (updateval
== (uint32_t)-1) {
854 error
= local_error
& 0xff;
855 if (error
== ETIMEDOUT
) {
857 } else if (error
== EINTR
) {
859 ** EINTR can be treated as a spurious wakeup unless we were canceled.
865 // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf1f1f2f2, local_error, error, 0);
868 /* add unlock ref to show one less waiter */
869 cond_dropwait(cond
, local_error
, 0);
872 // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf3f3f4f4, updateval, 0, 0);
875 if (updateval
!= 0) {
876 /* the return due to prepost and might have bit states */
877 /* update S and return for prepo if needed */
878 cond_dropwait(cond
, 0, updateval
);
883 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT
| DBG_FUNC_NONE
, (uint32_t)cond
, 4, retval
, 0, 0);
885 pthread_mutex_lock(omutex
);
888 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0, retval
, 0);
894 * For the new style mutex, interlocks are not held all the time.
895 * We needed the signature to be set in the end. And we need
896 * to protect against the code getting reorganized by compiler.
899 __pthread_cond_set_signature(npthread_cond_t
* cond
)
901 cond
->sig
= _PTHREAD_COND_SIG
;
906 cond_cleanup(void *arg
)
908 npthread_cond_t
*cond
= (npthread_cond_t
*)arg
;
909 pthread_mutex_t
*mutex
;
912 pthread_t thread
= pthread_self();
916 thcanceled
= (thread
->detached
& _PTHREAD_WASCANCEL
);
917 UNLOCK(thread
->lock
);
923 mutex
= (pthread_mutex_t
*) cond
->busy
;
925 /* add unlock ref to show one less waiter */
926 cond_dropwait(cond
, thread
->cancel_error
, 0);
929 ** Can't do anything if this fails -- we're on the way out
932 (void)pthread_mutex_lock(mutex
);
935 #define ECVCERORR 256
936 #define ECVPERORR 512
939 cond_dropwait(npthread_cond_t
* cond
, int error
, uint32_t updateval
)
942 pthread_cond_t
* ocond
= (pthread_cond_t
*)cond
;
943 int needclearpre
= 0;
944 uint32_t diffgen
, nlval
, ulval
, flags
;
945 uint32_t lcntval
, ucntval
, scntval
, lval
;
946 volatile uint32_t * c_lseqcnt
, *c_useqcnt
, *c_sseqcnt
;
947 uint64_t oldval64
, newval64
;
949 /* to provide backwards compat for apps using united condtn vars */
951 if (sig
!= _PTHREAD_COND_SIG
)
954 COND_GETSEQ_ADDR(cond
, c_lseqcnt
, c_useqcnt
, c_sseqcnt
);
958 if ((error
& ECVCERORR
) != 0)
959 lval
|= PTH_RWS_CV_CBIT
;
960 if ((error
& ECVPERORR
) != 0)
961 lval
|= PTH_RWS_CV_PBIT
;
966 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT
| DBG_FUNC_START
, (uint32_t)cond
, error
, updateval
, 0xee, 0);
969 lcntval
= *c_lseqcnt
;
970 ucntval
= *c_useqcnt
;
971 scntval
= *c_sseqcnt
;
973 diffgen
= diff_genseq((lcntval
& PTHRW_COUNT_MASK
), (scntval
& PTHRW_COUNT_MASK
)); /* pendig waiters */
975 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT
| DBG_FUNC_NONE
, (uint32_t)cond
, lcntval
, scntval
, diffgen
, 0);
978 /* TBD: Assert, should not be the case */
979 /* validate it is spurious and return */
980 oldval64
= (((uint64_t)scntval
) << 32);
983 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
986 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT
| DBG_FUNC_END
, (uint32_t)cond
, 0, 0, 0, 0);
991 /* update S by one */
992 oldval64
= (((uint64_t)scntval
) << 32);
995 /* update scntval with number of expected returns and bits */
996 ulval
= (scntval
& PTHRW_COUNT_MASK
) + (lval
& PTHRW_COUNT_MASK
);
998 ulval
|= ((scntval
& PTH_RWS_CV_BITSALL
) | (lval
& PTH_RWS_CV_BITSALL
));
1004 /* If L==S, need to return to kernel */
1005 if ((nlval
& PTHRW_COUNT_MASK
) == (ulval
& PTHRW_COUNT_MASK
)) {
1006 if ((ulval
& PTH_RWS_CV_BITSALL
) == PTH_RWS_CV_BITSALL
) {
1007 /* reset p bit but retain c bit on the sword */
1009 ulval
&= PTH_RWS_CV_RESET_PBIT
;
1013 newval64
= (((uint64_t)ulval
) << 32);
1017 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT
| DBG_FUNC_NONE
, (uint32_t)cond
, 0xffff, nlval
, ulval
, 0);
1019 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)c_lseqcnt
) != TRUE
)
1023 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT
| DBG_FUNC_NONE
, (uint32_t)cond
, 2, 0, 0xee, 0);
1025 if ((nlval
& PTHRW_COUNT_MASK
) == (ulval
& PTHRW_COUNT_MASK
)) {
1026 /* last usage remove the mutex */
1031 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT
| DBG_FUNC_NONE
, nlval
, ucntval
, ulval
, PTHRW_INC
, 0);
1033 if (needclearpre
!= 0) {
1035 if (cond
->pshared
== PTHREAD_PROCESS_SHARED
)
1036 flags
|= _PTHREAD_MTX_OPT_PSHARED
;
1038 (void)__psynch_cvclrprepost(ocond
, nlval
, ucntval
, ulval
, 0, nlval
, flags
);
1041 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT
| DBG_FUNC_END
, nlval
, ucntval
, ulval
, PTHRW_INC
, 0);
1048 pthread_cond_timedwait_relative_np(pthread_cond_t
*cond
,
1049 pthread_mutex_t
*mutex
,
1050 const struct timespec
*abstime
)
1052 return (_pthread_cond_wait(cond
, mutex
, abstime
, 1, 0));
1057 #else /* !BUILDING_VARIANT */
1059 extern int _pthread_cond_wait(pthread_cond_t
*cond
,
1060 pthread_mutex_t
*mutex
,
1061 const struct timespec
*abstime
,
1065 #endif /* !BUILDING_VARIANT ] */
1067 * Initialize a condition variable. Note: 'attr' is ignored.
1071 * Initialize a condition variable. This is the public interface.
1072 * We can't trust the lock, so initialize it first before taking
1076 pthread_cond_init(pthread_cond_t
*cond
,
1077 const pthread_condattr_t
*attr
)
1083 #else /* __DARWIN_UNIX03 */
1085 #endif /* __DARWIN_UNIX03 */
1087 /* lock is same offset in both structures */
1088 LOCK_INIT(cond
->lock
);
1090 return (_pthread_cond_init(cond
, attr
, conforming
));
1095 pthread_cond_wait(pthread_cond_t *cond,
1096 pthread_mutex_t *mutex)
1099 pthread_cond_timedwait(pthread_cond_t *cond,
1100 pthread_mutex_t *mutex,
1101 const struct timespec *abstime)
1103 moved to pthread_cancelable.c */