]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread_cond.c
055956f1a7898f5f00e49e96d5f2a4ce9cb66107
[apple/libc.git] / pthreads / pthread_cond.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43 /*
44 * MkLinux
45 */
46
47 /*
48 * POSIX Pthread Library
49 */
50
51 #include "pthread_internals.h"
52 #include <sys/time.h> /* For struct timespec and getclock(). */
53 #include <stdio.h>
54
55 #ifdef PLOCKSTAT
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
60
61 extern int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int);
62 extern int __unix_conforming;
63 extern int usenew_mtximpl;
64
65 #ifdef PR_5243343
66 /* 5243343 - temporary hack to detect if we are running the conformance test */
67 extern int PR_5243343_flag;
68 #endif /* PR_5243343 */
69
70 __private_extern__ int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming);
71 #ifndef BUILDING_VARIANT
72 static void cond_cleanup(void *arg);
73 static void cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval);
74 static void __pthread_cond_set_signature(npthread_cond_t * cond);
75 static int _pthread_cond_destroy_locked(pthread_cond_t *cond);
76 #endif
77
78 #if defined(__LP64__)
79 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
80 { \
81 if (cond->misalign != 0) { \
82 c_lseqcnt = &cond->c_seq[1]; \
83 c_sseqcnt = &cond->c_seq[2]; \
84 c_useqcnt = &cond->c_seq[0]; \
85 } else { \
86 /* aligned */ \
87 c_lseqcnt = &cond->c_seq[0]; \
88 c_sseqcnt = &cond->c_seq[1]; \
89 c_useqcnt = &cond->c_seq[2]; \
90 } \
91 }
92 #else /* __LP64__ */
93 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
94 { \
95 if (cond->misalign != 0) { \
96 c_lseqcnt = &cond->c_seq[1]; \
97 c_sseqcnt = &cond->c_seq[2]; \
98 c_useqcnt = &cond->c_seq[0]; \
99 } else { \
100 /* aligned */ \
101 c_lseqcnt = &cond->c_seq[0]; \
102 c_sseqcnt = &cond->c_seq[1]; \
103 c_useqcnt = &cond->c_seq[2]; \
104 } \
105 }
106 #endif /* __LP64__ */
107
108
109 #define _KSYN_TRACE_ 0
110
111 #if _KSYN_TRACE_
112 /* The Function qualifiers */
113 #define DBG_FUNC_START 1
114 #define DBG_FUNC_END 2
115 #define DBG_FUNC_NONE 0
116
117 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
118
119 #define _KSYN_TRACE_UM_LOCK 0x9000060
120 #define _KSYN_TRACE_UM_UNLOCK 0x9000064
121 #define _KSYN_TRACE_UM_MHOLD 0x9000068
122 #define _KSYN_TRACE_UM_MDROP 0x900006c
123 #define _KSYN_TRACE_UM_CVWAIT 0x9000070
124 #define _KSYN_TRACE_UM_CVSIG 0x9000074
125 #define _KSYN_TRACE_UM_CVBRD 0x9000078
126 #define _KSYN_TRACE_UM_CDROPWT 0x90000a0
127 #define _KSYN_TRACE_UM_CVCLRPRE 0x90000a4
128
129 #endif /* _KSYN_TRACE_ */
130
131
132 #ifndef BUILDING_VARIANT /* [ */
133
134 int
135 pthread_condattr_init(pthread_condattr_t *attr)
136 {
137 attr->sig = _PTHREAD_COND_ATTR_SIG;
138 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
139 return (0);
140 }
141
142 int
143 pthread_condattr_destroy(pthread_condattr_t *attr)
144 {
145 attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
146 return (0);
147 }
148
149 int
150 pthread_condattr_getpshared(const pthread_condattr_t *attr,
151 int *pshared)
152 {
153 if (attr->sig == _PTHREAD_COND_ATTR_SIG)
154 {
155 *pshared = (int)attr->pshared;
156 return (0);
157 } else
158 {
159 return (EINVAL); /* Not an initialized 'attribute' structure */
160 }
161 }
162
163
164
165
166 /* temp home till pshared is fixed correctly */
167 int
168 pthread_condattr_setpshared(pthread_condattr_t * attr, int pshared)
169 {
170
171 if (attr->sig == _PTHREAD_COND_ATTR_SIG)
172 {
173 #if __DARWIN_UNIX03
174 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
175 #else /* __DARWIN_UNIX03 */
176 if ( pshared == PTHREAD_PROCESS_PRIVATE)
177 #endif /* __DARWIN_UNIX03 */
178 {
179 attr->pshared = pshared;
180 return (0);
181 } else {
182 return (EINVAL); /* Invalid parameter */
183 }
184 } else
185 {
186 return (EINVAL); /* Not an initialized 'attribute' structure */
187 }
188
189 }
190
191 __private_extern__ int
192 _pthread_cond_init(pthread_cond_t *ocond,
193 const pthread_condattr_t *attr,
194 int conforming)
195 {
196 npthread_cond_t * cond = (npthread_cond_t *)ocond;
197
198 cond->busy = (npthread_mutex_t *)NULL;
199 cond->c_seq[0] = 0;
200 cond->c_seq[1] = 0;
201 cond->c_seq[2] = 0;
202 cond->rfu = 0;
203
204 if (((uintptr_t)cond & 0x07) != 0) {
205 cond->misalign = 1;
206 cond->c_seq[2] = PTH_RWS_CV_CBIT;
207 } else {
208 cond->misalign = 0;
209 cond->c_seq[1] = PTH_RWS_CV_CBIT; /* set Sword to 0c */
210 }
211 if (conforming) {
212 if (attr)
213 cond->pshared = attr->pshared;
214 else
215 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
216 } else
217 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
218 /*
219 * For the new style mutex, interlocks are not held all the time.
220 * We needed the signature to be set in the end. And we need
221 * to protect against the code getting reorganized by compiler.
222 * cond->sig = _PTHREAD_COND_SIG;
223 */
224 __pthread_cond_set_signature(cond);
225 return (0);
226 }
227
228 int
229 pthread_cond_destroy(pthread_cond_t * ocond)
230 {
231 npthread_cond_t *cond = (npthread_cond_t *)ocond;
232 int ret;
233
234 /* to provide backwards compat for apps using united condtn vars */
235 if((cond->sig != _PTHREAD_COND_SIG) && (cond->sig != _PTHREAD_COND_SIG_init))
236 return(EINVAL);
237
238 LOCK(cond->lock);
239 ret = _pthread_cond_destroy_locked(ocond);
240 UNLOCK(cond->lock);
241
242 return(ret);
243 }
244
245 static int
246 _pthread_cond_destroy_locked(pthread_cond_t * ocond)
247 {
248 npthread_cond_t *cond = (npthread_cond_t *)ocond;
249 int ret;
250 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
251 uint32_t lcntval , ucntval, scntval;
252 uint64_t oldval64, newval64;
253
254 retry:
255 if (cond->sig == _PTHREAD_COND_SIG)
256 {
257 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
258 lcntval = *c_lseqcnt;
259 ucntval = *c_useqcnt;
260 scntval = *c_sseqcnt;
261
262 if ((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) {
263 /* validate it is not busy */
264 oldval64 = (((uint64_t)scntval) << 32);
265 oldval64 |= lcntval;
266 newval64 = oldval64;
267
268 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
269 goto retry;
270 cond->sig = _PTHREAD_NO_SIG;
271 ret = 0;
272 } else
273 ret = EBUSY;
274 } else if (cond->sig == _PTHREAD_COND_SIG_init) {
275 cond->sig = _PTHREAD_NO_SIG;
276 ret = 0;
277 } else
278 ret = EINVAL; /* Not an initialized condition variable structure */
279 return (ret);
280 }
281
282 /*
283 * Signal a condition variable, waking up all threads waiting for it.
284 */
285 int
286 pthread_cond_broadcast(pthread_cond_t *ocond)
287 {
288 npthread_cond_t * cond = (npthread_cond_t *)ocond;
289 int sig = cond->sig;
290 uint32_t flags, updateval;
291 uint32_t lcntval , ucntval, scntval;
292 uint64_t oldval64, newval64, mugen, cvlsgen, cvudgen, mtid=0;
293 int diffgen, error = 0;
294 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
295 uint32_t * pmtx = NULL;
296 uint32_t nlval, ulval;
297 int needclearpre = 0, retry_count = 0, uretry_count = 0;
298 int ucountreset = 0;
299
300 /* to provide backwards compat for apps using united condtn vars */
301 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
302 return(EINVAL);
303
304 if (sig != _PTHREAD_COND_SIG)
305 {
306 LOCK(cond->lock);
307 if (cond->sig == _PTHREAD_COND_SIG_init)
308 {
309 _pthread_cond_init(ocond, NULL, 0);
310 /* just inited nothing to post */
311 UNLOCK(cond->lock);
312 return (0);
313 } else if (cond->sig != _PTHREAD_COND_SIG) {
314 /* Not a condition variable */
315 UNLOCK(cond->lock);
316 return (EINVAL);
317 }
318 UNLOCK(cond->lock);
319 }
320
321 #if _KSYN_TRACE_
322 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
323 #endif
324
325 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
326 retry:
327 lcntval = *c_lseqcnt;
328 ucntval = *c_useqcnt;
329 scntval = *c_sseqcnt;
330
331 #if _KSYN_TRACE_
332 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
333 #endif
334
335 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
336 ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))) {
337 /* validate it is spurious and return */
338 oldval64 = (((uint64_t)scntval) << 32);
339 oldval64 |= lcntval;
340 newval64 = oldval64;
341
342 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
343 goto retry;
344 #if _KSYN_TRACE_
345 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0);
346 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0);
347 #endif
348 return(0);
349 }
350
351 /* validate to eliminate spurious values, race snapshots */
352 if (is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
353 /* since ucntval may be newer, just redo */
354 retry_count++;
355 if (retry_count > 8192) {
356 return(EAGAIN);
357 } else {
358 sched_yield();
359 goto retry;
360 }
361 } else if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
362 /* since ucntval may be newer, just redo */
363 uretry_count++;
364 if (uretry_count > 8192) {
365 /*
366 * U value if not used for a while can go out of sync
367 * set this to S value and try one more time.
368 */
369 if (ucountreset != 0)
370 return(EAGAIN);
371 else
372 if (OSAtomicCompareAndSwap32Barrier(ucntval, (scntval & PTHRW_COUNT_MASK), (volatile int32_t *)c_useqcnt) == TRUE) {
373 /* now the U is reset to S value */
374 ucountreset = 1;
375 uretry_count = 0;
376 }
377 }
378 sched_yield();
379 goto retry;
380 }
381
382 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
383 /* If U < S, set U = S+diff due to intr's TO, etc */
384 ulval = (scntval & PTHRW_COUNT_MASK);
385 } else {
386 /* If U >= S, set U = U+diff due to intr's TO, etc */
387 ulval = (ucntval & PTHRW_COUNT_MASK);
388 }
389 #if _KSYN_TRACE_
390 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, lcntval, ucntval, scntval, diffgen, 0);
391 #endif
392
393 diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (ulval & PTHRW_COUNT_MASK));
394
395 /* set U = L */
396 ulval = (lcntval & PTHRW_COUNT_MASK);
397 if (OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) {
398 goto retry;
399 }
400
401 flags = 0;
402 if (cond->pshared == PTHREAD_PROCESS_SHARED)
403 flags |= _PTHREAD_MTX_OPT_PSHARED;
404 pmtx = NULL;
405
406 #if _KSYN_TRACE_
407 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 3, diffgen, flags, 0);
408 #endif
409 nlval = lcntval;
410
411 /* pass old u val so kernel will know the diffgen */
412 mugen = 0;
413 cvlsgen = ((uint64_t)scntval << 32) | nlval;
414 cvudgen = ((uint64_t)ucntval << 32) | diffgen;
415
416 updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, (pthread_mutex_t *)pmtx, mugen, mtid);
417
418 if (updateval != (uint32_t)-1) {
419
420 /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */
421 /* Were any threads woken or bits to be set? */
422 if (updateval != 0) {
423 retry2:
424 needclearpre = 0;
425 lcntval = *c_lseqcnt;
426 ucntval = *c_useqcnt;
427 scntval = *c_sseqcnt;
428 /* update scntval with number of expected returns and bits */
429 nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
430 /* set bits */
431 nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
432
433 #if _KSYN_TRACE_
434 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, lcntval, scntval, updateval, 0);
435 #endif
436 /* if L==S and c&p bits are set, needs clearpre */
437 if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK))
438 && ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
439 /* reset p bit but retain c bit on the sword */
440 nlval &= PTH_RWS_CV_RESET_PBIT;
441 needclearpre = 1;
442 }
443
444 oldval64 = (((uint64_t)scntval) << 32);
445 oldval64 |= lcntval;
446 newval64 = (((uint64_t)nlval) << 32);
447 newval64 |= lcntval;
448
449 #if _KSYN_TRACE_
450 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, nlval, scntval, updateval, 0);
451 #endif
452
453 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
454 goto retry2;
455
456 /* if L == S, then reset associated mutex */
457 if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
458 cond->busy = (npthread_mutex_t *)NULL;
459 }
460
461 if (needclearpre != 0) {
462 (void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags);
463 }
464 }
465
466 }
467 error = 0;
468
469 #if _KSYN_TRACE_
470 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, error, 0, 0);
471 #endif
472 return(error);
473 }
474
475
476 /*
477 * Signal a condition variable, waking a specified thread.
478 */
479
480 int
481 pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
482 {
483 npthread_cond_t * cond = (npthread_cond_t *)ocond;
484 int sig = cond->sig;
485 uint32_t flags, updateval;
486 uint32_t lcntval , ucntval, scntval;
487 uint32_t nlval, ulval=0;
488 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
489 uint64_t oldval64, newval64, mugen, cvlsgen, mtid = 0;
490 int needclearpre = 0, retry_count = 0, uretry_count = 0;
491 int error, ucountreset = 0;
492
493 /* to provide backwards compat for apps using united condtn vars */
494
495 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
496 return(EINVAL);
497
498 if (cond->sig != _PTHREAD_COND_SIG) {
499 LOCK(cond->lock);
500 if (cond->sig != _PTHREAD_COND_SIG) {
501 if (cond->sig == _PTHREAD_COND_SIG_init) {
502 _pthread_cond_init(ocond, NULL, 0);
503 /* just inited, nothing to post yet */
504 UNLOCK(cond->lock);
505 return(0);
506 } else {
507 UNLOCK(cond->lock);
508 return(EINVAL);
509 }
510 }
511 UNLOCK(cond->lock);
512 }
513
514 #if _KSYN_TRACE_
515 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
516 #endif
517 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
518 retry:
519 lcntval = *c_lseqcnt;
520 ucntval = *c_useqcnt;
521 scntval = *c_sseqcnt;
522 #if _KSYN_TRACE_
523 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
524 #endif
525
526 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
527 ((thread == 0) && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
528 /* If L <= S+U, it is spurious broadcasr */
529 /* validate it is spurious and return */
530 oldval64 = (((uint64_t)scntval) << 32);
531 oldval64 |= lcntval;
532 newval64 = oldval64;
533
534 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
535 goto retry;
536 #if _KSYN_TRACE_
537 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0);
538 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0);
539 #endif
540 return(0);
541 }
542
543 if (thread == 0) {
544 /* validate to eliminate spurious values, race snapshots */
545 if (is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
546 /* since ucntval may be newer, just redo */
547 retry_count++;
548 if (retry_count > 8192) {
549 return(EAGAIN);
550 } else {
551 sched_yield();
552 goto retry;
553 }
554 } else if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
555 /* since ucntval may be newer, just redo */
556 uretry_count++;
557 if (uretry_count > 8192) {
558 /*
559 * U value if not used for a while can go out of sync
560 * set this to S value and try one more time.
561 */
562 if (ucountreset != 0)
563 return(EAGAIN);
564 else
565 if (OSAtomicCompareAndSwap32Barrier(ucntval, (scntval & PTHRW_COUNT_MASK), (volatile int32_t *)c_useqcnt) == TRUE) {
566 /* now the U is reset to S value */
567 ucountreset = 1;
568 uretry_count = 0;
569 }
570 }
571 sched_yield();
572 goto retry;
573 }
574 } /* thread == 0 ) */
575
576 if (thread == 0) {
577 /*
578 * skip manipulating U count as ESRCH from kernel cannot be handled properly.
579 * S count will cover the imbalance and next signal without thread or broadcast
580 * will correct it. But we need to send the right U to kernel so it will use
581 * that to look for the appropriate sequenc. So the ulval is computed anyway.
582 */
583
584 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
585 /* If U < S, set U = S+1 due to intr's TO, etc */
586 ulval = (scntval & PTHRW_COUNT_MASK) + PTHRW_INC;
587 } else {
588 /* If U >= S, set U = U+1 due to intr's TO, etc */
589 ulval = (ucntval & PTHRW_COUNT_MASK) + PTHRW_INC;
590 }
591
592 if (OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) {
593 goto retry;
594 }
595 }
596
597 flags = 0;
598 if (cond->pshared == PTHREAD_PROCESS_SHARED)
599 flags |= _PTHREAD_MTX_OPT_PSHARED;
600
601 #if _KSYN_TRACE_
602 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 3, nlval, ulval, 0);
603 #endif
604 nlval = lcntval;
605 /* pass old u val so kernel will know the diffgen */
606 mugen = 0;
607 cvlsgen = ((uint64_t)scntval << 32) | nlval;
608
609 updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, pthread_mach_thread_np(thread), (pthread_mutex_t *)0, mugen, mtid, flags);
610
611
612 if (updateval != (uint32_t)-1) {
613
614 /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */
615 /* Were any threads woken or bits to be set? */
616 if (updateval != 0) {
617 retry2:
618 lcntval = *c_lseqcnt;
619 ucntval = *c_useqcnt;
620 scntval = *c_sseqcnt;
621 /* update scntval with number of expected returns and bits */
622 nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
623 /* set bits */
624 nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
625
626 #if _KSYN_TRACE_
627 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, 0, 0, updateval, 0);
628 #endif
629 /* if L==S and c&p bits are set, needs clearpre */
630 if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK))
631 && ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
632 /* reset p bit but retain c bit on the sword */
633 nlval &= PTH_RWS_CV_RESET_PBIT;
634 needclearpre = 1;
635 } else
636 needclearpre = 0;
637
638 oldval64 = (((uint64_t)scntval) << 32);
639 oldval64 |= lcntval;
640 newval64 = (((uint64_t)nlval) << 32);
641 newval64 |= lcntval;
642
643 #if _KSYN_TRACE_
644 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, nlval, ulval, updateval, 0);
645 #endif
646
647 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
648 goto retry2;
649
650 /* if L == S, then reset associated mutex */
651 if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
652 cond->busy = (npthread_mutex_t *)NULL;
653 }
654
655 if (needclearpre != 0) {
656 (void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags);
657 }
658 }
659 }
660
661 error = 0;
662
663 #if _KSYN_TRACE_
664 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
665 #endif
666 return (error);
667 }
668
669 /*
670 * Signal a condition variable, waking only one thread.
671 */
672 int
673 pthread_cond_signal(pthread_cond_t *cond)
674 {
675 return pthread_cond_signal_thread_np(cond, NULL);
676 }
677
678 /*
679 * Manage a list of condition variables associated with a mutex
680 */
681
682
683 /*
684 * Suspend waiting for a condition variable.
685 * Note: we have to keep a list of condition variables which are using
686 * this same mutex variable so we can detect invalid 'destroy' sequences.
687 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
688 * remaining conforming behavior..
689 */
690 __private_extern__ int
691 _pthread_cond_wait(pthread_cond_t *ocond,
692 pthread_mutex_t *omutex,
693 const struct timespec *abstime,
694 int isRelative,
695 int isconforming)
696 {
697 int retval;
698 npthread_cond_t * cond = (npthread_cond_t *)ocond;
699 npthread_mutex_t * mutex = (npthread_mutex_t * )omutex;
700 mach_timespec_t then = {0,0};
701 struct timespec cthen = {0,0};
702 int sig = cond->sig;
703 int msig = mutex->sig;
704 npthread_mutex_t * pmtx;
705 uint32_t mtxgen, mtxugen, flags=0, updateval;
706 uint32_t lcntval , ucntval, scntval;
707 uint32_t nlval, ulval, savebits;
708 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
709 uint64_t oldval64, newval64, mugen, cvlsgen;
710 uint32_t * npmtx = NULL;
711 int error, local_error;
712
713 extern void _pthread_testcancel(pthread_t thread, int isconforming);
714
715 /* to provide backwards compat for apps using united condtn vars */
716 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
717 return(EINVAL);
718
719 if (isconforming) {
720 if((msig != _PTHREAD_MUTEX_SIG) && ((msig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP))
721 return(EINVAL);
722 if (isconforming > 0)
723 _pthread_testcancel(pthread_self(), 1);
724 }
725
726 if (cond->sig != _PTHREAD_COND_SIG)
727 {
728 LOCK(cond->lock);
729 if (cond->sig != _PTHREAD_COND_SIG) {
730 if (cond->sig == _PTHREAD_COND_SIG_init) {
731 _pthread_cond_init(ocond, NULL, 0);
732 } else {
733 UNLOCK(cond->lock);
734 return(EINVAL);
735 }
736 }
737 UNLOCK(cond->lock);
738 }
739
740 #if _KSYN_TRACE_
741 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, (uint32_t)cond, isRelative, 0, (uint32_t)abstime, 0);
742 #endif
743 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
744
745 /* send relative time to kernel */
746 if (abstime) {
747 #if _KSYN_TRACE_
748 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, 0x11111111, abstime->tv_nsec, abstime->tv_sec, 0, 0);
749 #endif
750 if (isRelative == 0) {
751 struct timespec now;
752 struct timeval tv;
753 gettimeofday(&tv, NULL);
754 TIMEVAL_TO_TIMESPEC(&tv, &now);
755
756 /* Compute relative time to sleep */
757 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
758 then.tv_sec = abstime->tv_sec - now.tv_sec;
759 if (then.tv_nsec < 0)
760 {
761 then.tv_nsec += NSEC_PER_SEC;
762 then.tv_sec--;
763 }
764 if (((int)then.tv_sec < 0) ||
765 ((then.tv_sec == 0) && (then.tv_nsec == 0)))
766 {
767 return ETIMEDOUT;
768 }
769 if (isconforming != 0) {
770 cthen.tv_sec = abstime->tv_sec;
771 cthen.tv_nsec = abstime->tv_nsec;
772 if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
773 return EINVAL;
774 }
775 if (cthen.tv_nsec >= NSEC_PER_SEC) {
776 return EINVAL;
777 }
778 }
779 } else {
780 then.tv_sec = abstime->tv_sec;
781 then.tv_nsec = abstime->tv_nsec;
782 if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
783 return ETIMEDOUT;
784 }
785 }
786 if(isconforming && ((then.tv_sec < 0) || (then.tv_nsec < 0))) {
787 return EINVAL;
788 }
789 if (then.tv_nsec >= NSEC_PER_SEC) {
790 return EINVAL;
791 }
792 }
793
794 if ((cond->busy != (npthread_mutex_t *)NULL) && (cond->busy != mutex))
795 return (EINVAL);
796
797 pmtx = mutex;
798 retry:
799 lcntval = *c_lseqcnt;
800 ucntval = *c_useqcnt;
801 scntval = *c_sseqcnt;
802
803 oldval64 = (((uint64_t)scntval) << 32);
804 oldval64 |= lcntval;
805
806 /* remove c and p bits on S word */
807 savebits = scntval & PTH_RWS_CV_BITSALL;
808 ulval = (scntval & PTHRW_COUNT_MASK);
809 nlval = lcntval + PTHRW_INC;
810 newval64 = (((uint64_t)ulval) << 32);
811 newval64 |= nlval;
812
813 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
814 goto retry;
815
816 cond->busy = mutex;
817
818 #if _KSYN_TRACE_
819 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
820 #endif
821 retval = __mtx_droplock(pmtx, PTHRW_INC, &flags, &npmtx, &mtxgen, &mtxugen);
822
823 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
824 if (retval != 0)
825 return(EINVAL);
826 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) {
827 npmtx = NULL;
828 mugen = 0;
829 } else
830 mugen = ((uint64_t)mtxugen << 32) | mtxgen;
831 flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */
832
833 #if _KSYN_TRACE_
834 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 3, (uint32_t)mutex, flags, 0);
835 #endif
836
837
838 cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
839
840 if (isconforming) {
841 pthread_cleanup_push(cond_cleanup, (void *)cond);
842 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
843 _pthread_testcancel(pthread_self(), isconforming);
844 pthread_cleanup_pop(0);
845 } else {
846 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
847
848 }
849
850 retval = 0;
851
852 if (updateval == (uint32_t)-1) {
853 local_error = errno;
854 error = local_error & 0xff;
855 if (error == ETIMEDOUT) {
856 retval = ETIMEDOUT;
857 } else if (error == EINTR) {
858 /*
859 ** EINTR can be treated as a spurious wakeup unless we were canceled.
860 */
861 retval = 0;
862 } else
863 retval = EINVAL;
864 //#if _KSYN_TRACE_
865 // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf1f1f2f2, local_error, error, 0);
866 //#endif
867
868 /* add unlock ref to show one less waiter */
869 cond_dropwait(cond, local_error, 0);
870 } else {
871 //#if _KSYN_TRACE_
872 // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf3f3f4f4, updateval, 0, 0);
873 //#endif
874 /* succesful wait */
875 if (updateval != 0) {
876 /* the return due to prepost and might have bit states */
877 /* update S and return for prepo if needed */
878 cond_dropwait(cond, 0, updateval);
879 }
880 retval = 0;
881 }
882 #if _KSYN_TRACE_
883 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0);
884 #endif
885 pthread_mutex_lock(omutex);
886
887 #if _KSYN_TRACE_
888 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0, 0, retval, 0);
889 #endif
890 return(retval);
891 }
892
893 /*
894 * For the new style mutex, interlocks are not held all the time.
895 * We needed the signature to be set in the end. And we need
896 * to protect against the code getting reorganized by compiler.
897 */
898 static void
899 __pthread_cond_set_signature(npthread_cond_t * cond)
900 {
901 cond->sig = _PTHREAD_COND_SIG;
902 }
903
904
905 static void
906 cond_cleanup(void *arg)
907 {
908 npthread_cond_t *cond = (npthread_cond_t *)arg;
909 pthread_mutex_t *mutex;
910
911 // 4597450: begin
912 pthread_t thread = pthread_self();
913 int thcanceled = 0;
914
915 LOCK(thread->lock);
916 thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
917 UNLOCK(thread->lock);
918
919 if (thcanceled == 0)
920 return;
921
922 // 4597450: end
923 mutex = (pthread_mutex_t *) cond->busy;
924
925 /* add unlock ref to show one less waiter */
926 cond_dropwait(cond, thread->cancel_error, 0);
927
928 /*
929 ** Can't do anything if this fails -- we're on the way out
930 */
931 if (mutex != NULL)
932 (void)pthread_mutex_lock(mutex);
933 }
934
935 #define ECVCERORR 256
936 #define ECVPERORR 512
937
938 void
939 cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval)
940 {
941 int sig = cond->sig;
942 pthread_cond_t * ocond = (pthread_cond_t *)cond;
943 int needclearpre = 0;
944 uint32_t diffgen, nlval, ulval, flags;
945 uint32_t lcntval , ucntval, scntval, lval;
946 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
947 uint64_t oldval64, newval64;
948
949 /* to provide backwards compat for apps using united condtn vars */
950
951 if (sig != _PTHREAD_COND_SIG)
952 return;
953
954 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
955
956 if (error != 0) {
957 lval = PTHRW_INC;
958 if ((error & ECVCERORR) != 0)
959 lval |= PTH_RWS_CV_CBIT;
960 if ((error & ECVPERORR) != 0)
961 lval |= PTH_RWS_CV_PBIT;
962 } else {
963 lval = updateval;
964 }
965 #if _KSYN_TRACE_
966 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_START, (uint32_t)cond, error, updateval, 0xee, 0);
967 #endif
968 retry:
969 lcntval = *c_lseqcnt;
970 ucntval = *c_useqcnt;
971 scntval = *c_sseqcnt;
972
973 diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (scntval & PTHRW_COUNT_MASK)); /* pendig waiters */
974 #if _KSYN_TRACE_
975 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, scntval, diffgen, 0);
976 #endif
977 if (diffgen <= 0) {
978 /* TBD: Assert, should not be the case */
979 /* validate it is spurious and return */
980 oldval64 = (((uint64_t)scntval) << 32);
981 oldval64 |= lcntval;
982 newval64 = oldval64;
983 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
984 goto retry;
985 #if _KSYN_TRACE_
986 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
987 #endif
988 return;
989 }
990
991 /* update S by one */
992 oldval64 = (((uint64_t)scntval) << 32);
993 oldval64 |= lcntval;
994
995 /* update scntval with number of expected returns and bits */
996 ulval = (scntval & PTHRW_COUNT_MASK) + (lval & PTHRW_COUNT_MASK);
997 /* set bits */
998 ulval |= ((scntval & PTH_RWS_CV_BITSALL) | (lval & PTH_RWS_CV_BITSALL));
999
1000 nlval = lcntval;
1001
1002 needclearpre = 0;
1003
1004 /* If L==S, need to return to kernel */
1005 if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
1006 if ((ulval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL) {
1007 /* reset p bit but retain c bit on the sword */
1008 needclearpre = 1;
1009 ulval &= PTH_RWS_CV_RESET_PBIT;
1010 }
1011 }
1012
1013 newval64 = (((uint64_t)ulval) << 32);
1014 newval64 |= nlval;
1015
1016 #if _KSYN_TRACE_
1017 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 0xffff, nlval, ulval, 0);
1018 #endif
1019 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
1020 goto retry;
1021
1022 #if _KSYN_TRACE_
1023 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 2, 0, 0xee, 0);
1024 #endif
1025 if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
1026 /* last usage remove the mutex */
1027 cond->busy = NULL;
1028 }
1029
1030 #if _KSYN_TRACE_
1031 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, nlval, ucntval, ulval, PTHRW_INC, 0);
1032 #endif
1033 if (needclearpre != 0) {
1034 flags = 0;
1035 if (cond->pshared == PTHREAD_PROCESS_SHARED)
1036 flags |= _PTHREAD_MTX_OPT_PSHARED;
1037 /* reset prepost */
1038 (void)__psynch_cvclrprepost(ocond, nlval, ucntval, ulval, 0, nlval, flags);
1039 }
1040 #if _KSYN_TRACE_
1041 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, nlval, ucntval, ulval, PTHRW_INC, 0);
1042 #endif
1043 return;
1044 }
1045
1046
1047 int
1048 pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1049 pthread_mutex_t *mutex,
1050 const struct timespec *abstime)
1051 {
1052 return (_pthread_cond_wait(cond, mutex, abstime, 1, 0));
1053 }
1054
1055
1056
1057 #else /* !BUILDING_VARIANT */
1058
1059 extern int _pthread_cond_wait(pthread_cond_t *cond,
1060 pthread_mutex_t *mutex,
1061 const struct timespec *abstime,
1062 int isRelative,
1063 int isconforming);
1064
1065 #endif /* !BUILDING_VARIANT ] */
1066 /*
1067 * Initialize a condition variable. Note: 'attr' is ignored.
1068 */
1069
1070 /*
1071 * Initialize a condition variable. This is the public interface.
1072 * We can't trust the lock, so initialize it first before taking
1073 * it.
1074 */
1075 int
1076 pthread_cond_init(pthread_cond_t *cond,
1077 const pthread_condattr_t *attr)
1078 {
1079 int conforming;
1080
1081 #if __DARWIN_UNIX03
1082 conforming = 1;
1083 #else /* __DARWIN_UNIX03 */
1084 conforming = 0;
1085 #endif /* __DARWIN_UNIX03 */
1086
1087 /* lock is same offset in both structures */
1088 LOCK_INIT(cond->lock);
1089
1090 return (_pthread_cond_init(cond, attr, conforming));
1091 }
1092
1093 /*
1094 int
1095 pthread_cond_wait(pthread_cond_t *cond,
1096 pthread_mutex_t *mutex)
1097
1098 int
1099 pthread_cond_timedwait(pthread_cond_t *cond,
1100 pthread_mutex_t *mutex,
1101 const struct timespec *abstime)
1102
1103 moved to pthread_cancelable.c */