]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_cond.c
libpthread-218.51.1.tar.gz
[apple/libpthread.git] / src / pthread_cond.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43 /*
44 * MkLinux
45 */
46
47 /*
48 * POSIX Pthread Library
49 */
50
51 #include "internal.h"
52 #include <sys/time.h> /* For struct timespec and getclock(). */
53 #include <stdio.h>
54
55 #ifdef PLOCKSTAT
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
60
61 __private_extern__ int _pthread_cond_init(_pthread_cond *, const pthread_condattr_t *, int);
62 __private_extern__ int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming);
63
64 extern int __gettimeofday(struct timeval *, struct timezone *);
65
66 #ifndef BUILDING_VARIANT
67 static void _pthread_cond_cleanup(void *arg);
68 static void _pthread_cond_updateval(_pthread_cond * cond, int error, uint32_t updateval);
69 #endif
70
71 static void
72 COND_GETSEQ_ADDR(_pthread_cond *cond,
73 volatile uint32_t **c_lseqcnt,
74 volatile uint32_t **c_useqcnt,
75 volatile uint32_t **c_sseqcnt)
76 {
77 if (cond->misalign) {
78 *c_lseqcnt = &cond->c_seq[1];
79 *c_sseqcnt = &cond->c_seq[2];
80 *c_useqcnt = &cond->c_seq[0];
81 } else {
82 *c_lseqcnt = &cond->c_seq[0];
83 *c_sseqcnt = &cond->c_seq[1];
84 *c_useqcnt = &cond->c_seq[2];
85 }
86 }
87
88 #ifndef BUILDING_VARIANT /* [ */
89
90 int
91 pthread_condattr_init(pthread_condattr_t *attr)
92 {
93 attr->sig = _PTHREAD_COND_ATTR_SIG;
94 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
95 return 0;
96 }
97
98 int
99 pthread_condattr_destroy(pthread_condattr_t *attr)
100 {
101 attr->sig = _PTHREAD_NO_SIG;
102 return 0;
103 }
104
105 int
106 pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared)
107 {
108 int res = EINVAL;
109 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
110 *pshared = (int)attr->pshared;
111 res = 0;
112 }
113 return res;
114 }
115
116 int
117 pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
118 {
119 int res = EINVAL;
120 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
121 #if __DARWIN_UNIX03
122 if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED)
123 #else /* __DARWIN_UNIX03 */
124 if (pshared == PTHREAD_PROCESS_PRIVATE)
125 #endif /* __DARWIN_UNIX03 */
126 {
127 attr->pshared = pshared;
128 res = 0;
129 }
130 }
131 return res;
132 }
133
134 __private_extern__ int
135 _pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conforming)
136 {
137 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
138
139 cond->busy = NULL;
140 cond->c_seq[0] = 0;
141 cond->c_seq[1] = 0;
142 cond->c_seq[2] = 0;
143 cond->unused = 0;
144
145 cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0;
146 COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
147 *c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c
148
149 if (conforming) {
150 if (attr) {
151 cond->pshared = attr->pshared;
152 } else {
153 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
154 }
155 } else {
156 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
157 }
158
159 // Ensure all contents are properly set before setting signature.
160 OSMemoryBarrier();
161 cond->sig = _PTHREAD_COND_SIG;
162
163 return 0;
164 }
165
166 PTHREAD_NOINLINE
167 static int
168 _pthread_cond_check_init_slow(_pthread_cond *cond, bool *inited)
169 {
170 int res = EINVAL;
171 if (cond->sig == _PTHREAD_COND_SIG_init) {
172 _PTHREAD_LOCK(cond->lock);
173 if (cond->sig == _PTHREAD_COND_SIG_init) {
174 res = _pthread_cond_init(cond, NULL, 0);
175 if (inited) {
176 *inited = true;
177 }
178 } else if (cond->sig == _PTHREAD_COND_SIG) {
179 res = 0;
180 }
181 _PTHREAD_UNLOCK(cond->lock);
182 } else if (cond->sig == _PTHREAD_COND_SIG) {
183 res = 0;
184 }
185 return res;
186 }
187
188 static inline int
189 _pthread_cond_check_init(_pthread_cond *cond, bool *inited)
190 {
191 int res = 0;
192 if (cond->sig != _PTHREAD_COND_SIG) {
193 return _pthread_cond_check_init_slow(cond, inited);
194 }
195 return res;
196 }
197
198 int
199 pthread_cond_destroy(pthread_cond_t *ocond)
200 {
201 _pthread_cond *cond = (_pthread_cond *)ocond;
202 int res = EINVAL;
203 if (cond->sig == _PTHREAD_COND_SIG) {
204 _PTHREAD_LOCK(cond->lock);
205
206 uint64_t oldval64, newval64;
207 uint32_t lcntval, ucntval, scntval;
208 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
209
210 COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
211
212 do {
213 lcntval = *c_lseqcnt;
214 ucntval = *c_useqcnt;
215 scntval = *c_sseqcnt;
216
217 // validate it is not busy
218 if ((lcntval & PTHRW_COUNT_MASK) != (scntval & PTHRW_COUNT_MASK)) {
219 //res = EBUSY;
220 break;
221 }
222 oldval64 = (((uint64_t)scntval) << 32);
223 oldval64 |= lcntval;
224 newval64 = oldval64;
225 } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE);
226
227 // <rdar://problem/13782056> Need to clear preposts.
228 uint32_t flags = 0;
229 bool needclearpre = ((scntval & PTH_RWS_CV_PBIT) != 0);
230 if (needclearpre && cond->pshared == PTHREAD_PROCESS_SHARED) {
231 flags |= _PTHREAD_MTX_OPT_PSHARED;
232 }
233
234 cond->sig = _PTHREAD_NO_SIG;
235 res = 0;
236
237 _PTHREAD_UNLOCK(cond->lock);
238
239 if (needclearpre) {
240 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, scntval, 0, lcntval, flags);
241 }
242 } else if (cond->sig == _PTHREAD_COND_SIG_init) {
243 // Compatibility for misbehaving applications that attempt to
244 // destroy a statically initialized condition variable.
245 cond->sig = _PTHREAD_NO_SIG;
246 res = 0;
247 }
248 return res;
249 }
250
251 static int
252 _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
253 {
254 int res;
255 _pthread_cond *cond = (_pthread_cond *)ocond;
256
257 uint32_t updateval;
258 uint32_t diffgen;
259 uint32_t ulval;
260
261 uint64_t oldval64, newval64;
262 uint32_t lcntval, ucntval, scntval;
263 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
264
265 int retry_count = 0, uretry_count = 0;
266 int ucountreset = 0;
267
268 bool inited = false;
269 res = _pthread_cond_check_init(cond, &inited);
270 if (res != 0 || inited == true) {
271 return res;
272 }
273
274 COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
275
276 bool retry;
277 do {
278 retry = false;
279
280 lcntval = *c_lseqcnt;
281 ucntval = *c_useqcnt;
282 scntval = *c_sseqcnt;
283
284 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
285 (thread == MACH_PORT_NULL && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
286 /* validate it is spurious and return */
287 oldval64 = (((uint64_t)scntval) << 32);
288 oldval64 |= lcntval;
289 newval64 = oldval64;
290
291 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) {
292 retry = true;
293 continue;
294 } else {
295 return 0;
296 }
297 }
298
299 if (thread) {
300 break;
301 }
302
303 /* validate to eliminate spurious values, race snapshots */
304 if (is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
305 /* since ucntval may be newer, just redo */
306 retry_count++;
307 if (retry_count > 8192) {
308 return EAGAIN;
309 } else {
310 sched_yield();
311 retry = true;
312 continue;
313 }
314 } else if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
315 /* since ucntval may be newer, just redo */
316 uretry_count++;
317 if (uretry_count > 8192) {
318 /*
319 * U value if not used for a while can go out of sync
320 * set this to S value and try one more time.
321 */
322 if (ucountreset != 0) {
323 return EAGAIN;
324 } else if (OSAtomicCompareAndSwap32Barrier(ucntval, (scntval & PTHRW_COUNT_MASK), (volatile int32_t *)c_useqcnt) == TRUE) {
325 /* now the U is reset to S value */
326 ucountreset = 1;
327 uretry_count = 0;
328 }
329 }
330 sched_yield();
331 retry = true;
332 continue;
333 }
334
335 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
336 /* If U < S, set U = S+diff due to intr's TO, etc */
337 ulval = (scntval & PTHRW_COUNT_MASK);
338 } else {
339 /* If U >= S, set U = U+diff due to intr's TO, etc */
340 ulval = (ucntval & PTHRW_COUNT_MASK);
341 }
342
343 if (broadcast) {
344 diffgen = diff_genseq(lcntval, ulval);
345 // Set U = L
346 ulval = (lcntval & PTHRW_COUNT_MASK);
347 } else {
348 ulval += PTHRW_INC;
349 }
350
351 } while (retry || OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE);
352
353 uint32_t flags = 0;
354 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
355 flags |= _PTHREAD_MTX_OPT_PSHARED;
356 }
357
358 uint64_t cvlsgen = ((uint64_t)scntval << 32) | lcntval;
359
360 if (broadcast) {
361 // pass old U val so kernel will know the diffgen
362 uint64_t cvudgen = ((uint64_t)ucntval << 32) | diffgen;
363 updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, NULL, 0, 0);
364 } else {
365 updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, thread, NULL, 0, 0, flags);
366 }
367
368 if (updateval != (uint32_t)-1 && updateval != 0) {
369 _pthread_cond_updateval(cond, 0, updateval);
370 }
371
372 return 0;
373 }
374
375
376 /*
377 * Signal a condition variable, waking up all threads waiting for it.
378 */
379 int
380 pthread_cond_broadcast(pthread_cond_t *ocond)
381 {
382 return _pthread_cond_signal(ocond, true, MACH_PORT_NULL);
383 }
384
385 /*
386 * Signal a condition variable, waking a specified thread.
387 */
388 int
389 pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
390 {
391 mach_port_t mp = MACH_PORT_NULL;
392 if (thread) {
393 mp = pthread_mach_thread_np(thread);
394 }
395 return _pthread_cond_signal(ocond, false, mp);
396 }
397
398 /*
399 * Signal a condition variable, waking only one thread.
400 */
401 int
402 pthread_cond_signal(pthread_cond_t *cond)
403 {
404 return pthread_cond_signal_thread_np(cond, NULL);
405 }
406
407 /*
408 * Manage a list of condition variables associated with a mutex
409 */
410
411
412 /*
413 * Suspend waiting for a condition variable.
414 * Note: we have to keep a list of condition variables which are using
415 * this same mutex variable so we can detect invalid 'destroy' sequences.
416 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
417 * remaining conforming behavior..
418 */
419 __private_extern__ int
420 _pthread_cond_wait(pthread_cond_t *ocond,
421 pthread_mutex_t *omutex,
422 const struct timespec *abstime,
423 int isRelative,
424 int isconforming)
425 {
426 int res;
427 _pthread_cond *cond = (_pthread_cond *)ocond;
428 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
429 struct timespec then = { 0, 0 };
430 uint32_t mtxgen, mtxugen, flags=0, updateval;
431 uint32_t lcntval, ucntval, scntval;
432 uint32_t nlval, ulval, savebits;
433 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
434 uint64_t oldval64, newval64, mugen, cvlsgen;
435 uint32_t *npmtx = NULL;
436
437 extern void _pthread_testcancel(pthread_t thread, int isconforming);
438
439 res = _pthread_cond_check_init(cond, NULL);
440 if (res != 0) {
441 return res;
442 }
443
444 if (isconforming) {
445 if (!_pthread_mutex_check_signature(mutex) &&
446 !_pthread_mutex_check_signature_init(mutex)) {
447 return EINVAL;
448 }
449 if (isconforming > 0) {
450 _pthread_testcancel(pthread_self(), 1);
451 }
452 }
453
454 /* send relative time to kernel */
455 if (abstime) {
456 if (isRelative == 0) {
457 struct timespec now;
458 struct timeval tv;
459 __gettimeofday(&tv, NULL);
460 TIMEVAL_TO_TIMESPEC(&tv, &now);
461
462 /* Compute relative time to sleep */
463 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
464 then.tv_sec = abstime->tv_sec - now.tv_sec;
465 if (then.tv_nsec < 0) {
466 then.tv_nsec += NSEC_PER_SEC;
467 then.tv_sec--;
468 }
469 if (then.tv_sec < 0 || (then.tv_sec == 0 && then.tv_nsec == 0)) {
470 return ETIMEDOUT;
471 }
472 if (isconforming &&
473 (abstime->tv_sec < 0 ||
474 abstime->tv_nsec < 0 ||
475 abstime->tv_nsec >= NSEC_PER_SEC)) {
476 return EINVAL;
477 }
478 } else {
479 then.tv_sec = abstime->tv_sec;
480 then.tv_nsec = abstime->tv_nsec;
481 if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
482 return ETIMEDOUT;
483 }
484 }
485 if (isconforming && (then.tv_sec < 0 || then.tv_nsec < 0)) {
486 return EINVAL;
487 }
488 if (then.tv_nsec >= NSEC_PER_SEC) {
489 return EINVAL;
490 }
491 }
492
493 if (cond->busy != NULL && cond->busy != mutex) {
494 return EINVAL;
495 }
496
497 COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
498
499 do {
500 lcntval = *c_lseqcnt;
501 ucntval = *c_useqcnt;
502 scntval = *c_sseqcnt;
503
504 oldval64 = (((uint64_t)scntval) << 32);
505 oldval64 |= lcntval;
506
507 /* remove c and p bits on S word */
508 savebits = scntval & PTH_RWS_CV_BITSALL;
509 ulval = (scntval & PTHRW_COUNT_MASK);
510 nlval = lcntval + PTHRW_INC;
511 newval64 = (((uint64_t)ulval) << 32);
512 newval64 |= nlval;
513 } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE);
514
515 cond->busy = mutex;
516
517 res = __mtx_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
518
519 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
520 if (res != 0) {
521 return EINVAL;
522 }
523 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) {
524 npmtx = NULL;
525 mugen = 0;
526 } else {
527 mugen = ((uint64_t)mtxugen << 32) | mtxgen;
528 }
529 flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */
530
531 cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
532
533 // SUSv3 requires pthread_cond_wait to be a cancellation point
534 if (isconforming) {
535 pthread_cleanup_push(_pthread_cond_cleanup, (void *)cond);
536 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
537 _pthread_testcancel(pthread_self(), isconforming);
538 pthread_cleanup_pop(0);
539 } else {
540 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
541 }
542
543 if (updateval == (uint32_t)-1) {
544 int err = errno;
545 switch (err & 0xff) {
546 case ETIMEDOUT:
547 res = ETIMEDOUT;
548 break;
549 case EINTR:
550 // spurious wakeup (unless canceled)
551 res = 0;
552 break;
553 default:
554 res = EINVAL;
555 break;
556 }
557
558 // add unlock ref to show one less waiter
559 _pthread_cond_updateval(cond, err, 0);
560 } else if (updateval != 0) {
561 // Successful wait
562 // The return due to prepost and might have bit states
563 // update S and return for prepo if needed
564 _pthread_cond_updateval(cond, 0, updateval);
565 }
566
567 pthread_mutex_lock(omutex);
568
569 return res;
570 }
571
572 static void
573 _pthread_cond_cleanup(void *arg)
574 {
575 _pthread_cond *cond = (_pthread_cond *)arg;
576 pthread_mutex_t *mutex;
577
578 // 4597450: begin
579 pthread_t thread = pthread_self();
580 int thcanceled = 0;
581
582 _PTHREAD_LOCK(thread->lock);
583 thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
584 _PTHREAD_UNLOCK(thread->lock);
585
586 if (thcanceled == 0) {
587 return;
588 }
589
590 // 4597450: end
591 mutex = (pthread_mutex_t *)cond->busy;
592
593 // add unlock ref to show one less waiter
594 _pthread_cond_updateval(cond, thread->cancel_error, 0);
595
596 /*
597 ** Can't do anything if this fails -- we're on the way out
598 */
599 if (mutex != NULL) {
600 (void)pthread_mutex_lock(mutex);
601 }
602 }
603
604 #define ECVCERORR 256
605 #define ECVPERORR 512
606
607 static void
608 _pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
609 {
610 int needclearpre;
611
612 uint32_t diffgen, nsval;
613 uint64_t oldval64, newval64;
614 uint32_t lcntval, ucntval, scntval;
615 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
616
617 if (error != 0) {
618 updateval = PTHRW_INC;
619 if ((error & ECVCERORR) != 0) {
620 updateval |= PTH_RWS_CV_CBIT;
621 }
622 if ((error & ECVPERORR) != 0) {
623 updateval |= PTH_RWS_CV_PBIT;
624 }
625 }
626
627 COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
628
629 do {
630 lcntval = *c_lseqcnt;
631 ucntval = *c_useqcnt;
632 scntval = *c_sseqcnt;
633
634 diffgen = diff_genseq(lcntval, scntval); // pending waiters
635
636 oldval64 = (((uint64_t)scntval) << 32);
637 oldval64 |= lcntval;
638
639 if (diffgen <= 0) {
640 /* TBD: Assert, should not be the case */
641 /* validate it is spurious and return */
642 newval64 = oldval64;
643 } else {
644 // update S by one
645
646 // update scntval with number of expected returns and bits
647 nsval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
648 // set bits
649 nsval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
650
651 // if L==S and c&p bits are set, needs clearpre
652 if (((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) &&
653 ((nsval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
654 // reset p bit but retain c bit on the sword
655 nsval &= PTH_RWS_CV_RESET_PBIT;
656 needclearpre = 1;
657 } else {
658 needclearpre = 0;
659 }
660
661 newval64 = (((uint64_t)nsval) << 32);
662 newval64 |= lcntval;
663 }
664 } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE);
665
666 if (diffgen > 0) {
667 // if L == S, then reset associated mutex
668 if ((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
669 cond->busy = NULL;
670 }
671
672 if (needclearpre != 0) {
673 uint32_t flags = 0;
674 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
675 flags |= _PTHREAD_MTX_OPT_PSHARED;
676 }
677 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
678 }
679 }
680 }
681
682
683 int
684 pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime)
685 {
686 return _pthread_cond_wait(cond, mutex, abstime, 1, 0);
687 }
688
689 #endif /* !BUILDING_VARIANT ] */
690
691 int
692 pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr)
693 {
694 int conforming;
695
696 #if __DARWIN_UNIX03
697 conforming = 1;
698 #else /* __DARWIN_UNIX03 */
699 conforming = 0;
700 #endif /* __DARWIN_UNIX03 */
701
702 _pthread_cond *cond = (_pthread_cond *)ocond;
703 _PTHREAD_LOCK_INIT(cond->lock);
704 return _pthread_cond_init(cond, attr, conforming);
705 }