]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread_cond.c
Libc-763.13.tar.gz
[apple/libc.git] / pthreads / pthread_cond.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43 /*
44 * MkLinux
45 */
46
47 /*
48 * POSIX Pthread Library
49 */
50
51 #include "pthread_internals.h"
52 #include <sys/time.h> /* For struct timespec and getclock(). */
53 #include <stdio.h>
54
55 #ifdef PLOCKSTAT
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
60
61 extern int __semwait_signal(int, int, int, int, int64_t, int32_t);
62 extern int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int);
63 extern int __unix_conforming;
64 extern int usenew_mtximpl;
65
66 #ifdef PR_5243343
67 /* 5243343 - temporary hack to detect if we are running the conformance test */
68 extern int PR_5243343_flag;
69 #endif /* PR_5243343 */
70
71 __private_extern__ int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming);
72 #ifndef BUILDING_VARIANT
73 static void cond_cleanup(void *arg);
74 static void cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval);
75 static void __pthread_cond_set_signature(npthread_cond_t * cond);
76 static int _pthread_cond_destroy_locked(pthread_cond_t *cond);
77 #endif
78
79 #if defined(__LP64__)
80 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
81 { \
82 if (cond->misalign != 0) { \
83 c_lseqcnt = &cond->c_seq[1]; \
84 c_sseqcnt = &cond->c_seq[2]; \
85 c_useqcnt = &cond->c_seq[0]; \
86 } else { \
87 /* aligned */ \
88 c_lseqcnt = &cond->c_seq[0]; \
89 c_sseqcnt = &cond->c_seq[1]; \
90 c_useqcnt = &cond->c_seq[2]; \
91 } \
92 }
93 #else /* __LP64__ */
94 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \
95 { \
96 if (cond->misalign != 0) { \
97 c_lseqcnt = &cond->c_seq[1]; \
98 c_sseqcnt = &cond->c_seq[2]; \
99 c_useqcnt = &cond->c_seq[0]; \
100 } else { \
101 /* aligned */ \
102 c_lseqcnt = &cond->c_seq[0]; \
103 c_sseqcnt = &cond->c_seq[1]; \
104 c_useqcnt = &cond->c_seq[2]; \
105 } \
106 }
107 #endif /* __LP64__ */
108
109
110 #define _KSYN_TRACE_ 0
111
112 #if _KSYN_TRACE_
113 /* The Function qualifiers */
114 #define DBG_FUNC_START 1
115 #define DBG_FUNC_END 2
116 #define DBG_FUNC_NONE 0
117
118 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
119
120 #define _KSYN_TRACE_UM_LOCK 0x9000060
121 #define _KSYN_TRACE_UM_UNLOCK 0x9000064
122 #define _KSYN_TRACE_UM_MHOLD 0x9000068
123 #define _KSYN_TRACE_UM_MDROP 0x900006c
124 #define _KSYN_TRACE_UM_CVWAIT 0x9000070
125 #define _KSYN_TRACE_UM_CVSIG 0x9000074
126 #define _KSYN_TRACE_UM_CVBRD 0x9000078
127 #define _KSYN_TRACE_UM_CDROPWT 0x90000a0
128 #define _KSYN_TRACE_UM_CVCLRPRE 0x90000a4
129
130 #endif /* _KSYN_TRACE_ */
131
132
133 #ifndef BUILDING_VARIANT /* [ */
134
135 int
136 pthread_condattr_init(pthread_condattr_t *attr)
137 {
138 attr->sig = _PTHREAD_COND_ATTR_SIG;
139 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
140 return (0);
141 }
142
143 int
144 pthread_condattr_destroy(pthread_condattr_t *attr)
145 {
146 attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
147 return (0);
148 }
149
150 int
151 pthread_condattr_getpshared(const pthread_condattr_t *attr,
152 int *pshared)
153 {
154 if (attr->sig == _PTHREAD_COND_ATTR_SIG)
155 {
156 *pshared = (int)attr->pshared;
157 return (0);
158 } else
159 {
160 return (EINVAL); /* Not an initialized 'attribute' structure */
161 }
162 }
163
164
165
166
167 /* temp home till pshared is fixed correctly */
168 int
169 pthread_condattr_setpshared(pthread_condattr_t * attr, int pshared)
170 {
171
172 if (attr->sig == _PTHREAD_COND_ATTR_SIG)
173 {
174 #if __DARWIN_UNIX03
175 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
176 #else /* __DARWIN_UNIX03 */
177 if ( pshared == PTHREAD_PROCESS_PRIVATE)
178 #endif /* __DARWIN_UNIX03 */
179 {
180 attr->pshared = pshared;
181 return (0);
182 } else {
183 return (EINVAL); /* Invalid parameter */
184 }
185 } else
186 {
187 return (EINVAL); /* Not an initialized 'attribute' structure */
188 }
189
190 }
191
192 __private_extern__ int
193 _pthread_cond_init(pthread_cond_t *ocond,
194 const pthread_condattr_t *attr,
195 int conforming)
196 {
197 npthread_cond_t * cond = (npthread_cond_t *)ocond;
198
199 cond->busy = (npthread_mutex_t *)NULL;
200 cond->c_seq[0] = 0;
201 cond->c_seq[1] = 0;
202 cond->c_seq[2] = 0;
203 cond->rfu = 0;
204
205 if (((uintptr_t)cond & 0x07) != 0) {
206 cond->misalign = 1;
207 cond->c_seq[2] = PTH_RWS_CV_CBIT;
208 } else {
209 cond->misalign = 0;
210 cond->c_seq[1] = PTH_RWS_CV_CBIT; /* set Sword to 0c */
211 }
212 if (conforming) {
213 if (attr)
214 cond->pshared = attr->pshared;
215 else
216 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
217 } else
218 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
219 /*
220 * For the new style mutex, interlocks are not held all the time.
221 * We needed the signature to be set in the end. And we need
222 * to protect against the code getting reorganized by compiler.
223 * cond->sig = _PTHREAD_COND_SIG;
224 */
225 __pthread_cond_set_signature(cond);
226 return (0);
227 }
228
229 int
230 pthread_cond_destroy(pthread_cond_t * ocond)
231 {
232 npthread_cond_t *cond = (npthread_cond_t *)ocond;
233 int ret;
234
235 /* to provide backwards compat for apps using united condtn vars */
236 if((cond->sig != _PTHREAD_COND_SIG) && (cond->sig != _PTHREAD_COND_SIG_init))
237 return(EINVAL);
238
239 LOCK(cond->lock);
240 ret = _pthread_cond_destroy_locked(ocond);
241 UNLOCK(cond->lock);
242
243 return(ret);
244 }
245
246 static int
247 _pthread_cond_destroy_locked(pthread_cond_t * ocond)
248 {
249 npthread_cond_t *cond = (npthread_cond_t *)ocond;
250 int ret;
251 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
252 uint32_t lcntval , ucntval, scntval;
253 uint64_t oldval64, newval64;
254
255 retry:
256 if (cond->sig == _PTHREAD_COND_SIG)
257 {
258 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
259 lcntval = *c_lseqcnt;
260 ucntval = *c_useqcnt;
261 scntval = *c_sseqcnt;
262
263 if ((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) {
264 /* validate it is not busy */
265 oldval64 = (((uint64_t)scntval) << 32);
266 oldval64 |= lcntval;
267 newval64 = oldval64;
268
269 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
270 goto retry;
271 cond->sig = _PTHREAD_NO_SIG;
272 ret = 0;
273 } else
274 ret = EBUSY;
275 } else if (cond->sig == _PTHREAD_COND_SIG_init) {
276 cond->sig = _PTHREAD_NO_SIG;
277 ret = 0;
278 } else
279 ret = EINVAL; /* Not an initialized condition variable structure */
280 return (ret);
281 }
282
283 /*
284 * Signal a condition variable, waking up all threads waiting for it.
285 */
286 int
287 pthread_cond_broadcast(pthread_cond_t *ocond)
288 {
289 npthread_cond_t * cond = (npthread_cond_t *)ocond;
290 int sig = cond->sig;
291 uint32_t flags, updateval;
292 uint32_t lcntval , ucntval, scntval;
293 uint64_t oldval64, newval64, mugen, cvlsgen, cvudgen, mtid=0;
294 int diffgen, error = 0;
295 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
296 uint32_t * pmtx = NULL;
297 uint32_t nlval, ulval;
298 int needclearpre = 0, retry_count = 0;
299
300 /* to provide backwards compat for apps using united condtn vars */
301 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
302 return(EINVAL);
303
304 if (sig != _PTHREAD_COND_SIG)
305 {
306 LOCK(cond->lock);
307 if (cond->sig == _PTHREAD_COND_SIG_init)
308 {
309 _pthread_cond_init(ocond, NULL, 0);
310 /* just inited nothing to post */
311 UNLOCK(cond->lock);
312 return (0);
313 } else if (cond->sig != _PTHREAD_COND_SIG) {
314 /* Not a condition variable */
315 UNLOCK(cond->lock);
316 return (EINVAL);
317 }
318 UNLOCK(cond->lock);
319 }
320
321 #if _KSYN_TRACE_
322 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
323 #endif
324
325 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
326 retry:
327 lcntval = *c_lseqcnt;
328 ucntval = *c_useqcnt;
329 scntval = *c_sseqcnt;
330
331 #if _KSYN_TRACE_
332 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
333 #endif
334
335 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
336 ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))) {
337 /* validate it is spurious and return */
338 oldval64 = (((uint64_t)scntval) << 32);
339 oldval64 |= lcntval;
340 newval64 = oldval64;
341
342 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
343 goto retry;
344 #if _KSYN_TRACE_
345 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0);
346 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0);
347 #endif
348 return(0);
349 }
350
351 if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK)) || is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
352 /* since ucntval may be newer, just redo */
353 retry_count++;
354 if (retry_count > 8192) {
355 return(EAGAIN);
356 } else {
357 sched_yield();
358 goto retry;
359 }
360 }
361
362 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
363 /* If U < S, set U = S+diff due to intr's TO, etc */
364 ulval = (scntval & PTHRW_COUNT_MASK);
365 } else {
366 /* If U >= S, set U = U+diff due to intr's TO, etc */
367 ulval = (ucntval & PTHRW_COUNT_MASK);
368 }
369 #if _KSYN_TRACE_
370 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, lcntval, ucntval, scntval, diffgen, 0);
371 #endif
372
373 diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (ulval & PTHRW_COUNT_MASK));
374
375 /* set U = L */
376 ulval = (lcntval & PTHRW_COUNT_MASK);
377 if (OSAtomicCompareAndSwap32(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) {
378 goto retry;
379 }
380
381 flags = 0;
382 if (cond->pshared == PTHREAD_PROCESS_SHARED)
383 flags |= _PTHREAD_MTX_OPT_PSHARED;
384 pmtx = NULL;
385
386 #if _KSYN_TRACE_
387 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 3, diffgen, flags, 0);
388 #endif
389 nlval = lcntval;
390
391 /* pass old u val so kernel will know the diffgen */
392 mugen = 0;
393 cvlsgen = ((uint64_t)scntval << 32) | nlval;
394 cvudgen = ((uint64_t)ucntval << 32) | diffgen;
395
396 updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, (pthread_mutex_t *)pmtx, mugen, mtid);
397
398 if (updateval != (uint32_t)-1) {
399
400 /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */
401 /* Were any threads woken or bits to be set? */
402 if (updateval != 0) {
403 retry2:
404 needclearpre = 0;
405 lcntval = *c_lseqcnt;
406 ucntval = *c_useqcnt;
407 scntval = *c_sseqcnt;
408 /* update scntval with number of expected returns and bits */
409 nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
410 /* set bits */
411 nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
412
413 #if _KSYN_TRACE_
414 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, lcntval, scntval, updateval, 0);
415 #endif
416 /* if L==S and c&p bits are set, needs clearpre */
417 if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK))
418 && ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
419 /* reset p bit but retain c bit on the sword */
420 nlval &= PTH_RWS_CV_RESET_PBIT;
421 needclearpre = 1;
422 }
423
424 oldval64 = (((uint64_t)scntval) << 32);
425 oldval64 |= lcntval;
426 newval64 = (((uint64_t)nlval) << 32);
427 newval64 |= lcntval;
428
429 #if _KSYN_TRACE_
430 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, nlval, scntval, updateval, 0);
431 #endif
432
433 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
434 goto retry2;
435
436 /* if L == S, then reset associated mutex */
437 if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
438 cond->busy = (npthread_mutex_t *)NULL;
439 }
440
441 if (needclearpre != 0) {
442 (void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags);
443 }
444 }
445
446 }
447 error = 0;
448
449 #if _KSYN_TRACE_
450 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, error, 0, 0);
451 #endif
452 return(error);
453 }
454
455
456 /*
457 * Signal a condition variable, waking a specified thread.
458 */
459
460 int
461 pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
462 {
463 npthread_cond_t * cond = (npthread_cond_t *)ocond;
464 int sig = cond->sig;
465 uint32_t flags, updateval;
466 uint32_t lcntval , ucntval, scntval;
467 uint32_t nlval, ulval=0;
468 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
469 uint64_t oldval64, newval64, mugen, cvlsgen, mtid = 0;
470 int needclearpre = 0, retry_count = 0;
471 int error;
472
473 /* to provide backwards compat for apps using united condtn vars */
474
475 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
476 return(EINVAL);
477
478 if (cond->sig != _PTHREAD_COND_SIG) {
479 LOCK(cond->lock);
480 if (cond->sig != _PTHREAD_COND_SIG) {
481 if (cond->sig == _PTHREAD_COND_SIG_init) {
482 _pthread_cond_init(ocond, NULL, 0);
483 /* just inited, nothing to post yet */
484 UNLOCK(cond->lock);
485 return(0);
486 } else {
487 UNLOCK(cond->lock);
488 return(EINVAL);
489 }
490 }
491 UNLOCK(cond->lock);
492 }
493
494 #if _KSYN_TRACE_
495 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
496 #endif
497 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
498 retry:
499 lcntval = *c_lseqcnt;
500 ucntval = *c_useqcnt;
501 scntval = *c_sseqcnt;
502 #if _KSYN_TRACE_
503 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
504 #endif
505
506 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
507 ((thread == 0) && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
508 /* If L <= S+U, it is spurious broadcasr */
509 /* validate it is spurious and return */
510 oldval64 = (((uint64_t)scntval) << 32);
511 oldval64 |= lcntval;
512 newval64 = oldval64;
513
514 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
515 goto retry;
516 #if _KSYN_TRACE_
517 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0);
518 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0);
519 #endif
520 return(0);
521 }
522
523 if (((thread == 0) && (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK)))) || is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
524 /* since ucntval may be newer, just redo */
525 retry_count++;
526 if (retry_count > 8192) {
527 return(EAGAIN);
528 } else {
529 sched_yield();
530 goto retry;
531 }
532 }
533
534 if (thread == 0) {
535 /*
536 * skip manipulating U count as ESRCH from kernel cannot be handled properly.
537 * S count will cover the imbalance and next signal without thread or broadcast
538 * will correct it. But we need to send the right U to kernel so it will use
539 * that to look for the appropriate sequenc. So the ulval is computed anyway.
540 */
541
542 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
543 /* If U < S, set U = S+1 due to intr's TO, etc */
544 ulval = (scntval & PTHRW_COUNT_MASK) + PTHRW_INC;
545 } else {
546 /* If U >= S, set U = U+1 due to intr's TO, etc */
547 ulval = (ucntval & PTHRW_COUNT_MASK) + PTHRW_INC;
548 }
549
550 if (OSAtomicCompareAndSwap32(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) {
551 goto retry;
552 }
553 }
554
555 flags = 0;
556 if (cond->pshared == PTHREAD_PROCESS_SHARED)
557 flags |= _PTHREAD_MTX_OPT_PSHARED;
558
559 #if _KSYN_TRACE_
560 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 3, nlval, ulval, 0);
561 #endif
562 nlval = lcntval;
563 /* pass old u val so kernel will know the diffgen */
564 mugen = 0;
565 cvlsgen = ((uint64_t)scntval << 32) | nlval;
566
567 updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, pthread_mach_thread_np(thread), (pthread_mutex_t *)0, mugen, mtid, flags);
568
569
570 if (updateval != (uint32_t)-1) {
571
572 /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */
573 /* Were any threads woken or bits to be set? */
574 if (updateval != 0) {
575 retry2:
576 lcntval = *c_lseqcnt;
577 ucntval = *c_useqcnt;
578 scntval = *c_sseqcnt;
579 /* update scntval with number of expected returns and bits */
580 nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
581 /* set bits */
582 nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
583
584 #if _KSYN_TRACE_
585 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, 0, 0, updateval, 0);
586 #endif
587 /* if L==S and c&p bits are set, needs clearpre */
588 if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK))
589 && ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
590 /* reset p bit but retain c bit on the sword */
591 nlval &= PTH_RWS_CV_RESET_PBIT;
592 needclearpre = 1;
593 } else
594 needclearpre = 0;
595
596 oldval64 = (((uint64_t)scntval) << 32);
597 oldval64 |= lcntval;
598 newval64 = (((uint64_t)nlval) << 32);
599 newval64 |= lcntval;
600
601 #if _KSYN_TRACE_
602 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, nlval, ulval, updateval, 0);
603 #endif
604
605 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
606 goto retry2;
607
608 /* if L == S, then reset associated mutex */
609 if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
610 cond->busy = (npthread_mutex_t *)NULL;
611 }
612
613 if (needclearpre != 0) {
614 (void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags);
615 }
616 }
617 }
618
619 error = 0;
620
621 #if _KSYN_TRACE_
622 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
623 #endif
624 return (error);
625 }
626
627 /*
628 * Signal a condition variable, waking only one thread.
629 */
630 int
631 pthread_cond_signal(pthread_cond_t *cond)
632 {
633 return pthread_cond_signal_thread_np(cond, NULL);
634 }
635
636 /*
637 * Manage a list of condition variables associated with a mutex
638 */
639
640
641 /*
642 * Suspend waiting for a condition variable.
643 * Note: we have to keep a list of condition variables which are using
644 * this same mutex variable so we can detect invalid 'destroy' sequences.
645 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
646 * remaining conforming behavior..
647 */
648 __private_extern__ int
649 _pthread_cond_wait(pthread_cond_t *ocond,
650 pthread_mutex_t *omutex,
651 const struct timespec *abstime,
652 int isRelative,
653 int isconforming)
654 {
655 int retval;
656 npthread_cond_t * cond = (npthread_cond_t *)ocond;
657 npthread_mutex_t * mutex = (npthread_mutex_t * )omutex;
658 mach_timespec_t then = {0,0};
659 struct timespec cthen = {0,0};
660 int sig = cond->sig;
661 int msig = mutex->sig;
662 npthread_mutex_t * pmtx;
663 uint32_t mtxgen, mtxugen, flags=0, updateval;
664 uint32_t lcntval , ucntval, scntval;
665 uint32_t nlval, ulval, savebits;
666 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
667 uint64_t oldval64, newval64, mugen, cvlsgen;
668 uint32_t * npmtx = NULL;
669 int error, local_error;
670
671 extern void _pthread_testcancel(pthread_t thread, int isconforming);
672
673 /* to provide backwards compat for apps using united condtn vars */
674 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
675 return(EINVAL);
676
677 if (isconforming) {
678 if((msig != _PTHREAD_MUTEX_SIG) && ((msig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP))
679 return(EINVAL);
680 if (isconforming > 0)
681 _pthread_testcancel(pthread_self(), 1);
682 }
683
684 if (cond->sig != _PTHREAD_COND_SIG)
685 {
686 LOCK(cond->lock);
687 if (cond->sig != _PTHREAD_COND_SIG) {
688 if (cond->sig == _PTHREAD_COND_SIG_init) {
689 _pthread_cond_init(ocond, NULL, 0);
690 } else {
691 UNLOCK(cond->lock);
692 return(EINVAL);
693 }
694 }
695 UNLOCK(cond->lock);
696 }
697
698 #if _KSYN_TRACE_
699 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, (uint32_t)cond, isRelative, 0, (uint32_t)abstime, 0);
700 #endif
701 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
702
703 /* send relative time to kernel */
704 if (abstime) {
705 #if _KSYN_TRACE_
706 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, 0x11111111, abstime->tv_nsec, abstime->tv_sec, 0, 0);
707 #endif
708 if (isRelative == 0) {
709 struct timespec now;
710 struct timeval tv;
711 gettimeofday(&tv, NULL);
712 TIMEVAL_TO_TIMESPEC(&tv, &now);
713
714 /* Compute relative time to sleep */
715 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
716 then.tv_sec = abstime->tv_sec - now.tv_sec;
717 if (then.tv_nsec < 0)
718 {
719 then.tv_nsec += NSEC_PER_SEC;
720 then.tv_sec--;
721 }
722 if (((int)then.tv_sec < 0) ||
723 ((then.tv_sec == 0) && (then.tv_nsec == 0)))
724 {
725 return ETIMEDOUT;
726 }
727 if (isconforming != 0) {
728 cthen.tv_sec = abstime->tv_sec;
729 cthen.tv_nsec = abstime->tv_nsec;
730 if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
731 return EINVAL;
732 }
733 if (cthen.tv_nsec >= NSEC_PER_SEC) {
734 return EINVAL;
735 }
736 }
737 } else {
738 then.tv_sec = abstime->tv_sec;
739 then.tv_nsec = abstime->tv_nsec;
740 if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
741 return ETIMEDOUT;
742 }
743 }
744 if(isconforming && ((then.tv_sec < 0) || (then.tv_nsec < 0))) {
745 return EINVAL;
746 }
747 if (then.tv_nsec >= NSEC_PER_SEC) {
748 return EINVAL;
749 }
750 }
751
752 if ((cond->busy != (npthread_mutex_t *)NULL) && (cond->busy != mutex))
753 return (EINVAL);
754
755 pmtx = mutex;
756 retry:
757 lcntval = *c_lseqcnt;
758 ucntval = *c_useqcnt;
759 scntval = *c_sseqcnt;
760
761 oldval64 = (((uint64_t)scntval) << 32);
762 oldval64 |= lcntval;
763
764 /* remove c and p bits on S word */
765 savebits = scntval & PTH_RWS_CV_BITSALL;
766 ulval = (scntval & PTHRW_COUNT_MASK);
767 nlval = lcntval + PTHRW_INC;
768 newval64 = (((uint64_t)ulval) << 32);
769 newval64 |= nlval;
770
771 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
772 goto retry;
773
774 cond->busy = mutex;
775
776 #if _KSYN_TRACE_
777 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0);
778 #endif
779 retval = __mtx_droplock(pmtx, PTHRW_INC, &flags, &npmtx, &mtxgen, &mtxugen);
780
781 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
782 if (retval != 0)
783 return(EINVAL);
784 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) {
785 npmtx = NULL;
786 mugen = 0;
787 } else
788 mugen = ((uint64_t)mtxugen << 32) | mtxgen;
789 flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */
790
791 #if _KSYN_TRACE_
792 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 3, (uint32_t)mutex, flags, 0);
793 #endif
794
795
796 cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
797
798 if (isconforming) {
799 pthread_cleanup_push(cond_cleanup, (void *)cond);
800 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
801 pthread_cleanup_pop(0);
802 } else {
803 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
804
805 }
806
807 retval = 0;
808
809 if (updateval == (uint32_t)-1) {
810 local_error = errno;
811 error = local_error & 0xff;
812 if (error == ETIMEDOUT) {
813 retval = ETIMEDOUT;
814 } else if (error == EINTR) {
815 /*
816 ** EINTR can be treated as a spurious wakeup unless we were canceled.
817 */
818 retval = 0;
819 } else
820 retval = EINVAL;
821 //#if _KSYN_TRACE_
822 // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf1f1f2f2, local_error, error, 0);
823 //#endif
824
825 /* add unlock ref to show one less waiter */
826 cond_dropwait(cond, local_error, 0);
827 } else {
828 //#if _KSYN_TRACE_
829 // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf3f3f4f4, updateval, 0, 0);
830 //#endif
831 /* succesful wait */
832 if (updateval != 0) {
833 /* the return due to prepost and might have bit states */
834 /* update S and return for prepo if needed */
835 cond_dropwait(cond, 0, updateval);
836 }
837 retval = 0;
838 }
839 #if _KSYN_TRACE_
840 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0);
841 #endif
842 pthread_mutex_lock(omutex);
843
844 #if _KSYN_TRACE_
845 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0, 0, retval, 0);
846 #endif
847 return(retval);
848 }
849
850 /*
851 * For the new style mutex, interlocks are not held all the time.
852 * We needed the signature to be set in the end. And we need
853 * to protect against the code getting reorganized by compiler.
854 */
855 static void
856 __pthread_cond_set_signature(npthread_cond_t * cond)
857 {
858 cond->sig = _PTHREAD_COND_SIG;
859 }
860
861
862 static void
863 cond_cleanup(void *arg)
864 {
865 npthread_cond_t *cond = (npthread_cond_t *)arg;
866 pthread_mutex_t *mutex;
867
868 // 4597450: begin
869 pthread_t thread = pthread_self();
870 int thcanceled = 0;
871
872 LOCK(thread->lock);
873 thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
874 UNLOCK(thread->lock);
875
876 if (thcanceled == 0)
877 return;
878
879 // 4597450: end
880 mutex = (pthread_mutex_t *) cond->busy;
881
882 /* add unlock ref to show one less waiter */
883 cond_dropwait(cond, thread->cancel_error, 0);
884
885 /*
886 ** Can't do anything if this fails -- we're on the way out
887 */
888 if (mutex != NULL)
889 (void)pthread_mutex_lock(mutex);
890 }
891
892 #define ECVCERORR 256
893 #define ECVPERORR 512
894
895 void
896 cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval)
897 {
898 int sig = cond->sig;
899 pthread_cond_t * ocond = (pthread_cond_t *)cond;
900 int needclearpre = 0;
901 uint32_t diffgen, nlval, ulval, flags;
902 uint32_t lcntval , ucntval, scntval, lval;
903 volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt;
904 uint64_t oldval64, newval64;
905
906 /* to provide backwards compat for apps using united condtn vars */
907
908 if (sig != _PTHREAD_COND_SIG)
909 return;
910
911 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt);
912
913 if (error != 0) {
914 lval = PTHRW_INC;
915 if ((error & ECVCERORR) != 0)
916 lval |= PTH_RWS_CV_CBIT;
917 if ((error & ECVPERORR) != 0)
918 lval |= PTH_RWS_CV_PBIT;
919 } else {
920 lval = updateval;
921 }
922 #if _KSYN_TRACE_
923 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_START, (uint32_t)cond, error, updateval, 0xee, 0);
924 #endif
925 retry:
926 lcntval = *c_lseqcnt;
927 ucntval = *c_useqcnt;
928 scntval = *c_sseqcnt;
929
930 diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (scntval & PTHRW_COUNT_MASK)); /* pendig waiters */
931 #if _KSYN_TRACE_
932 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, scntval, diffgen, 0);
933 #endif
934 if (diffgen <= 0) {
935 /* TBD: Assert, should not be the case */
936 /* validate it is spurious and return */
937 oldval64 = (((uint64_t)scntval) << 32);
938 oldval64 |= lcntval;
939 newval64 = oldval64;
940 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
941 goto retry;
942 #if _KSYN_TRACE_
943 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
944 #endif
945 return;
946 }
947
948 /* update S by one */
949 oldval64 = (((uint64_t)scntval) << 32);
950 oldval64 |= lcntval;
951
952 /* update scntval with number of expected returns and bits */
953 ulval = (scntval & PTHRW_COUNT_MASK) + (lval & PTHRW_COUNT_MASK);
954 /* set bits */
955 ulval |= ((scntval & PTH_RWS_CV_BITSALL) | (lval & PTH_RWS_CV_BITSALL));
956
957 nlval = lcntval;
958
959 needclearpre = 0;
960
961 /* If L==S, need to return to kernel */
962 if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
963 if ((ulval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL) {
964 /* reset p bit but retain c bit on the sword */
965 needclearpre = 1;
966 ulval &= PTH_RWS_CV_RESET_PBIT;
967 }
968 }
969
970 newval64 = (((uint64_t)ulval) << 32);
971 newval64 |= nlval;
972
973 #if _KSYN_TRACE_
974 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 0xffff, nlval, ulval, 0);
975 #endif
976 if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE)
977 goto retry;
978
979 #if _KSYN_TRACE_
980 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 2, 0, 0xee, 0);
981 #endif
982 if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
983 /* last usage remove the mutex */
984 cond->busy = NULL;
985 }
986
987 #if _KSYN_TRACE_
988 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, nlval, ucntval, ulval, PTHRW_INC, 0);
989 #endif
990 if (needclearpre != 0) {
991 flags = 0;
992 if (cond->pshared == PTHREAD_PROCESS_SHARED)
993 flags |= _PTHREAD_MTX_OPT_PSHARED;
994 /* reset prepost */
995 (void)__psynch_cvclrprepost(ocond, nlval, ucntval, ulval, 0, nlval, flags);
996 }
997 #if _KSYN_TRACE_
998 (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, nlval, ucntval, ulval, PTHRW_INC, 0);
999 #endif
1000 return;
1001 }
1002
1003
1004 int
1005 pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1006 pthread_mutex_t *mutex,
1007 const struct timespec *abstime)
1008 {
1009 return (_pthread_cond_wait(cond, mutex, abstime, 1, 0));
1010 }
1011
1012
1013
1014 #else /* !BUILDING_VARIANT */
1015
1016 extern int _pthread_cond_wait(pthread_cond_t *cond,
1017 pthread_mutex_t *mutex,
1018 const struct timespec *abstime,
1019 int isRelative,
1020 int isconforming);
1021
1022 #endif /* !BUILDING_VARIANT ] */
1023 /*
1024 * Initialize a condition variable. Note: 'attr' is ignored.
1025 */
1026
1027 /*
1028 * Initialize a condition variable. This is the public interface.
1029 * We can't trust the lock, so initialize it first before taking
1030 * it.
1031 */
1032 int
1033 pthread_cond_init(pthread_cond_t *cond,
1034 const pthread_condattr_t *attr)
1035 {
1036 int conforming;
1037
1038 #if __DARWIN_UNIX03
1039 conforming = 1;
1040 #else /* __DARWIN_UNIX03 */
1041 conforming = 0;
1042 #endif /* __DARWIN_UNIX03 */
1043
1044 /* lock is same offset in both structures */
1045 LOCK_INIT(cond->lock);
1046
1047 return (_pthread_cond_init(cond, attr, conforming));
1048 }
1049
1050 /*
1051 int
1052 pthread_cond_wait(pthread_cond_t *cond,
1053 pthread_mutex_t *mutex)
1054
1055 int
1056 pthread_cond_timedwait(pthread_cond_t *cond,
1057 pthread_mutex_t *mutex,
1058 const struct timespec *abstime)
1059
1060 moved to pthread_cancelable.c */