]> git.saurik.com Git - apple/libpthread.git/blame - src/pthread_cond.c
libpthread-301.20.1.tar.gz
[apple/libpthread.git] / src / pthread_cond.c
CommitLineData
f1a1da6c
A
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
a0619f9c 5 *
f1a1da6c
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
a0619f9c 12 *
f1a1da6c
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
a0619f9c 20 *
f1a1da6c
A
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
a0619f9c
A
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
f1a1da6c 28 * its documentation for any purpose and without fee is hereby granted,
a0619f9c
A
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
f1a1da6c
A
42 */
43/*
44 * MkLinux
45 */
46
47/*
48 * POSIX Pthread Library
49 */
50
a0619f9c 51#include "resolver.h"
f1a1da6c
A
52#include "internal.h"
53#include <sys/time.h> /* For struct timespec and getclock(). */
f1a1da6c
A
54
55#ifdef PLOCKSTAT
56#include "plockstat.h"
57#else /* !PLOCKSTAT */
58#define PLOCKSTAT_MUTEX_RELEASE(x, y)
59#endif /* PLOCKSTAT */
60
f1a1da6c 61extern int __gettimeofday(struct timeval *, struct timezone *);
a0619f9c 62extern void _pthread_testcancel(pthread_t thread, int isconforming);
f1a1da6c 63
a0619f9c
A
64PTHREAD_NOEXPORT
65int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
66 const struct timespec *abstime, int isRelative, int isconforming);
f1a1da6c 67
a0619f9c
A
68PTHREAD_ALWAYS_INLINE
69static inline void
f1a1da6c 70COND_GETSEQ_ADDR(_pthread_cond *cond,
a0619f9c
A
71 volatile uint64_t **c_lsseqaddr,
72 volatile uint32_t **c_lseqcnt,
73 volatile uint32_t **c_useqcnt,
74 volatile uint32_t **c_sseqcnt)
f1a1da6c
A
75{
76 if (cond->misalign) {
77 *c_lseqcnt = &cond->c_seq[1];
78 *c_sseqcnt = &cond->c_seq[2];
79 *c_useqcnt = &cond->c_seq[0];
80 } else {
81 *c_lseqcnt = &cond->c_seq[0];
82 *c_sseqcnt = &cond->c_seq[1];
83 *c_useqcnt = &cond->c_seq[2];
84 }
a0619f9c 85 *c_lsseqaddr = (volatile uint64_t *)*c_lseqcnt;
f1a1da6c
A
86}
87
88#ifndef BUILDING_VARIANT /* [ */
89
a0619f9c
A
90static void _pthread_cond_cleanup(void *arg);
91static void _pthread_cond_updateval(_pthread_cond * cond, int error,
92 uint32_t updateval);
93
94
f1a1da6c
A
95int
96pthread_condattr_init(pthread_condattr_t *attr)
97{
98 attr->sig = _PTHREAD_COND_ATTR_SIG;
99 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
100 return 0;
101}
102
a0619f9c 103int
f1a1da6c
A
104pthread_condattr_destroy(pthread_condattr_t *attr)
105{
106 attr->sig = _PTHREAD_NO_SIG;
107 return 0;
108}
109
110int
111pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared)
112{
113 int res = EINVAL;
114 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
115 *pshared = (int)attr->pshared;
116 res = 0;
117 }
118 return res;
119}
120
121int
122pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
123{
124 int res = EINVAL;
125 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
126#if __DARWIN_UNIX03
127 if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED)
128#else /* __DARWIN_UNIX03 */
129 if (pshared == PTHREAD_PROCESS_PRIVATE)
130#endif /* __DARWIN_UNIX03 */
131 {
132 attr->pshared = pshared;
133 res = 0;
134 }
135 }
136 return res;
137}
138
a0619f9c
A
139int
140pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex,
141 const struct timespec *abstime)
142{
143 return _pthread_cond_wait(cond, mutex, abstime, 1, 0);
144}
145
146#endif /* !BUILDING_VARIANT ] */
147
148PTHREAD_ALWAYS_INLINE
149static inline int
f1a1da6c
A
150_pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conforming)
151{
a0619f9c 152 volatile uint64_t *c_lsseqaddr;
f1a1da6c
A
153 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
154
155 cond->busy = NULL;
156 cond->c_seq[0] = 0;
157 cond->c_seq[1] = 0;
158 cond->c_seq[2] = 0;
159 cond->unused = 0;
160
161 cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0;
a0619f9c 162 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
f1a1da6c 163 *c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c
a0619f9c 164
f1a1da6c
A
165 if (conforming) {
166 if (attr) {
167 cond->pshared = attr->pshared;
168 } else {
169 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
170 }
171 } else {
172 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
173 }
a0619f9c
A
174
175 long sig = _PTHREAD_COND_SIG;
176
f1a1da6c 177 // Ensure all contents are properly set before setting signature.
a0619f9c
A
178#if defined(__LP64__)
179 // For binary compatibility reasons we cannot require natural alignment of
180 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
181 uint32_t *sig32_ptr = (uint32_t*)&cond->sig;
182 uint32_t *sig32_val = (uint32_t*)&sig;
183 *(sig32_ptr + 1) = *(sig32_val + 1);
184 os_atomic_store(sig32_ptr, *sig32_val, release);
185#else
186 os_atomic_store2o(cond, sig, sig, release);
187#endif
188
f1a1da6c
A
189 return 0;
190}
191
a0619f9c
A
192#ifndef BUILDING_VARIANT /* [ */
193
964d3577 194PTHREAD_NOINLINE
f1a1da6c 195static int
964d3577 196_pthread_cond_check_init_slow(_pthread_cond *cond, bool *inited)
f1a1da6c 197{
964d3577
A
198 int res = EINVAL;
199 if (cond->sig == _PTHREAD_COND_SIG_init) {
2546420a 200 _PTHREAD_LOCK(cond->lock);
f1a1da6c 201 if (cond->sig == _PTHREAD_COND_SIG_init) {
964d3577
A
202 res = _pthread_cond_init(cond, NULL, 0);
203 if (inited) {
204 *inited = true;
f1a1da6c 205 }
964d3577
A
206 } else if (cond->sig == _PTHREAD_COND_SIG) {
207 res = 0;
f1a1da6c 208 }
2546420a 209 _PTHREAD_UNLOCK(cond->lock);
964d3577
A
210 } else if (cond->sig == _PTHREAD_COND_SIG) {
211 res = 0;
212 }
213 return res;
214}
215
a0619f9c 216PTHREAD_ALWAYS_INLINE
964d3577
A
217static inline int
218_pthread_cond_check_init(_pthread_cond *cond, bool *inited)
219{
220 int res = 0;
221 if (cond->sig != _PTHREAD_COND_SIG) {
222 return _pthread_cond_check_init_slow(cond, inited);
f1a1da6c
A
223 }
224 return res;
225}
226
a0619f9c 227PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
228int
229pthread_cond_destroy(pthread_cond_t *ocond)
230{
231 _pthread_cond *cond = (_pthread_cond *)ocond;
232 int res = EINVAL;
233 if (cond->sig == _PTHREAD_COND_SIG) {
2546420a 234 _PTHREAD_LOCK(cond->lock);
f1a1da6c
A
235
236 uint64_t oldval64, newval64;
237 uint32_t lcntval, ucntval, scntval;
a0619f9c 238 volatile uint64_t *c_lsseqaddr;
f1a1da6c
A
239 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
240
a0619f9c 241 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
f1a1da6c
A
242
243 do {
244 lcntval = *c_lseqcnt;
245 ucntval = *c_useqcnt;
246 scntval = *c_sseqcnt;
a0619f9c 247
f1a1da6c
A
248 // validate it is not busy
249 if ((lcntval & PTHRW_COUNT_MASK) != (scntval & PTHRW_COUNT_MASK)) {
250 //res = EBUSY;
251 break;
252 }
253 oldval64 = (((uint64_t)scntval) << 32);
254 oldval64 |= lcntval;
255 newval64 = oldval64;
a0619f9c 256 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
f1a1da6c
A
257
258 // <rdar://problem/13782056> Need to clear preposts.
259 uint32_t flags = 0;
260 bool needclearpre = ((scntval & PTH_RWS_CV_PBIT) != 0);
261 if (needclearpre && cond->pshared == PTHREAD_PROCESS_SHARED) {
262 flags |= _PTHREAD_MTX_OPT_PSHARED;
263 }
264
265 cond->sig = _PTHREAD_NO_SIG;
266 res = 0;
a0619f9c 267
2546420a 268 _PTHREAD_UNLOCK(cond->lock);
f1a1da6c
A
269
270 if (needclearpre) {
271 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, scntval, 0, lcntval, flags);
272 }
273 } else if (cond->sig == _PTHREAD_COND_SIG_init) {
274 // Compatibility for misbehaving applications that attempt to
275 // destroy a statically initialized condition variable.
276 cond->sig = _PTHREAD_NO_SIG;
277 res = 0;
278 }
279 return res;
280}
281
a0619f9c
A
282PTHREAD_ALWAYS_INLINE
283static inline int
f1a1da6c
A
284_pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
285{
286 int res;
287 _pthread_cond *cond = (_pthread_cond *)ocond;
288
289 uint32_t updateval;
290 uint32_t diffgen;
291 uint32_t ulval;
292
293 uint64_t oldval64, newval64;
294 uint32_t lcntval, ucntval, scntval;
a0619f9c 295 volatile uint64_t *c_lsseqaddr;
f1a1da6c
A
296 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
297
298 int retry_count = 0, uretry_count = 0;
299 int ucountreset = 0;
300
301 bool inited = false;
302 res = _pthread_cond_check_init(cond, &inited);
303 if (res != 0 || inited == true) {
304 return res;
305 }
306
a0619f9c 307 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
f1a1da6c
A
308
309 bool retry;
310 do {
311 retry = false;
312
313 lcntval = *c_lseqcnt;
314 ucntval = *c_useqcnt;
315 scntval = *c_sseqcnt;
a0619f9c
A
316 diffgen = 0;
317 ulval = 0;
f1a1da6c
A
318
319 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
320 (thread == MACH_PORT_NULL && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
321 /* validate it is spurious and return */
322 oldval64 = (((uint64_t)scntval) << 32);
323 oldval64 |= lcntval;
324 newval64 = oldval64;
a0619f9c
A
325
326 if (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst)) {
f1a1da6c
A
327 retry = true;
328 continue;
329 } else {
330 return 0;
331 }
332 }
333
334 if (thread) {
335 break;
336 }
337
338 /* validate to eliminate spurious values, race snapshots */
339 if (is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
340 /* since ucntval may be newer, just redo */
341 retry_count++;
342 if (retry_count > 8192) {
343 return EAGAIN;
344 } else {
345 sched_yield();
346 retry = true;
347 continue;
348 }
349 } else if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
350 /* since ucntval may be newer, just redo */
351 uretry_count++;
352 if (uretry_count > 8192) {
353 /*
354 * U value if not used for a while can go out of sync
355 * set this to S value and try one more time.
356 */
357 if (ucountreset != 0) {
358 return EAGAIN;
a0619f9c 359 } else if (os_atomic_cmpxchg(c_useqcnt, ucntval, (scntval & PTHRW_COUNT_MASK), seq_cst)) {
f1a1da6c
A
360 /* now the U is reset to S value */
361 ucountreset = 1;
362 uretry_count = 0;
363 }
364 }
365 sched_yield();
366 retry = true;
367 continue;
368 }
369
370 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
371 /* If U < S, set U = S+diff due to intr's TO, etc */
372 ulval = (scntval & PTHRW_COUNT_MASK);
373 } else {
374 /* If U >= S, set U = U+diff due to intr's TO, etc */
375 ulval = (ucntval & PTHRW_COUNT_MASK);
376 }
377
378 if (broadcast) {
379 diffgen = diff_genseq(lcntval, ulval);
380 // Set U = L
381 ulval = (lcntval & PTHRW_COUNT_MASK);
382 } else {
383 ulval += PTHRW_INC;
384 }
385
a0619f9c
A
386 } while (retry || !os_atomic_cmpxchg(c_useqcnt, ucntval, ulval, seq_cst));
387
f1a1da6c
A
388 uint32_t flags = 0;
389 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
390 flags |= _PTHREAD_MTX_OPT_PSHARED;
391 }
392
393 uint64_t cvlsgen = ((uint64_t)scntval << 32) | lcntval;
394
395 if (broadcast) {
396 // pass old U val so kernel will know the diffgen
397 uint64_t cvudgen = ((uint64_t)ucntval << 32) | diffgen;
398 updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, NULL, 0, 0);
399 } else {
400 updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, thread, NULL, 0, 0, flags);
401 }
402
403 if (updateval != (uint32_t)-1 && updateval != 0) {
404 _pthread_cond_updateval(cond, 0, updateval);
405 }
406
407 return 0;
408}
409
f1a1da6c
A
410/*
411 * Signal a condition variable, waking up all threads waiting for it.
412 */
a0619f9c 413PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
414int
415pthread_cond_broadcast(pthread_cond_t *ocond)
416{
417 return _pthread_cond_signal(ocond, true, MACH_PORT_NULL);
418}
419
420/*
421 * Signal a condition variable, waking a specified thread.
422 */
a0619f9c
A
423PTHREAD_NOEXPORT_VARIANT
424int
f1a1da6c
A
425pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
426{
427 mach_port_t mp = MACH_PORT_NULL;
428 if (thread) {
a0619f9c 429 mp = pthread_mach_thread_np((_Nonnull pthread_t)thread);
f1a1da6c
A
430 }
431 return _pthread_cond_signal(ocond, false, mp);
432}
433
434/*
435 * Signal a condition variable, waking only one thread.
436 */
a0619f9c 437PTHREAD_NOEXPORT_VARIANT
f1a1da6c 438int
a0619f9c 439pthread_cond_signal(pthread_cond_t *ocond)
f1a1da6c 440{
a0619f9c 441 return _pthread_cond_signal(ocond, false, MACH_PORT_NULL);
f1a1da6c
A
442}
443
444/*
445 * Manage a list of condition variables associated with a mutex
446 */
447
f1a1da6c
A
448/*
449 * Suspend waiting for a condition variable.
450 * Note: we have to keep a list of condition variables which are using
451 * this same mutex variable so we can detect invalid 'destroy' sequences.
452 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
453 * remaining conforming behavior..
454 */
a0619f9c
A
455PTHREAD_NOEXPORT PTHREAD_NOINLINE
456int
457_pthread_cond_wait(pthread_cond_t *ocond,
f1a1da6c
A
458 pthread_mutex_t *omutex,
459 const struct timespec *abstime,
460 int isRelative,
461 int isconforming)
462{
463 int res;
464 _pthread_cond *cond = (_pthread_cond *)ocond;
465 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
466 struct timespec then = { 0, 0 };
467 uint32_t mtxgen, mtxugen, flags=0, updateval;
468 uint32_t lcntval, ucntval, scntval;
469 uint32_t nlval, ulval, savebits;
a0619f9c 470 volatile uint64_t *c_lsseqaddr;
f1a1da6c
A
471 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
472 uint64_t oldval64, newval64, mugen, cvlsgen;
473 uint32_t *npmtx = NULL;
474
f1a1da6c
A
475 res = _pthread_cond_check_init(cond, NULL);
476 if (res != 0) {
477 return res;
478 }
479
480 if (isconforming) {
964d3577
A
481 if (!_pthread_mutex_check_signature(mutex) &&
482 !_pthread_mutex_check_signature_init(mutex)) {
f1a1da6c
A
483 return EINVAL;
484 }
485 if (isconforming > 0) {
486 _pthread_testcancel(pthread_self(), 1);
487 }
488 }
489
490 /* send relative time to kernel */
491 if (abstime) {
492 if (isRelative == 0) {
493 struct timespec now;
494 struct timeval tv;
495 __gettimeofday(&tv, NULL);
496 TIMEVAL_TO_TIMESPEC(&tv, &now);
497
498 /* Compute relative time to sleep */
499 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
500 then.tv_sec = abstime->tv_sec - now.tv_sec;
501 if (then.tv_nsec < 0) {
502 then.tv_nsec += NSEC_PER_SEC;
503 then.tv_sec--;
504 }
505 if (then.tv_sec < 0 || (then.tv_sec == 0 && then.tv_nsec == 0)) {
506 return ETIMEDOUT;
507 }
508 if (isconforming &&
509 (abstime->tv_sec < 0 ||
510 abstime->tv_nsec < 0 ||
511 abstime->tv_nsec >= NSEC_PER_SEC)) {
512 return EINVAL;
513 }
514 } else {
515 then.tv_sec = abstime->tv_sec;
516 then.tv_nsec = abstime->tv_nsec;
517 if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
518 return ETIMEDOUT;
519 }
520 }
521 if (isconforming && (then.tv_sec < 0 || then.tv_nsec < 0)) {
522 return EINVAL;
523 }
524 if (then.tv_nsec >= NSEC_PER_SEC) {
525 return EINVAL;
526 }
527 }
528
529 if (cond->busy != NULL && cond->busy != mutex) {
530 return EINVAL;
531 }
532
a0619f9c 533 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
f1a1da6c
A
534
535 do {
536 lcntval = *c_lseqcnt;
537 ucntval = *c_useqcnt;
538 scntval = *c_sseqcnt;
539
540 oldval64 = (((uint64_t)scntval) << 32);
541 oldval64 |= lcntval;
542
543 /* remove c and p bits on S word */
544 savebits = scntval & PTH_RWS_CV_BITSALL;
545 ulval = (scntval & PTHRW_COUNT_MASK);
546 nlval = lcntval + PTHRW_INC;
547 newval64 = (((uint64_t)ulval) << 32);
548 newval64 |= nlval;
a0619f9c 549 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
f1a1da6c
A
550
551 cond->busy = mutex;
552
a0619f9c 553 res = _pthread_mutex_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
f1a1da6c
A
554
555 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
556 if (res != 0) {
557 return EINVAL;
558 }
559 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) {
560 npmtx = NULL;
561 mugen = 0;
562 } else {
563 mugen = ((uint64_t)mtxugen << 32) | mtxgen;
564 }
565 flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */
566
567 cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
568
569 // SUSv3 requires pthread_cond_wait to be a cancellation point
570 if (isconforming) {
571 pthread_cleanup_push(_pthread_cond_cleanup, (void *)cond);
572 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
573 _pthread_testcancel(pthread_self(), isconforming);
574 pthread_cleanup_pop(0);
575 } else {
576 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
577 }
578
579 if (updateval == (uint32_t)-1) {
580 int err = errno;
581 switch (err & 0xff) {
582 case ETIMEDOUT:
583 res = ETIMEDOUT;
584 break;
585 case EINTR:
586 // spurious wakeup (unless canceled)
587 res = 0;
588 break;
589 default:
590 res = EINVAL;
591 break;
592 }
593
594 // add unlock ref to show one less waiter
595 _pthread_cond_updateval(cond, err, 0);
596 } else if (updateval != 0) {
597 // Successful wait
598 // The return due to prepost and might have bit states
599 // update S and return for prepo if needed
600 _pthread_cond_updateval(cond, 0, updateval);
601 }
602
603 pthread_mutex_lock(omutex);
604
605 return res;
606}
607
a0619f9c 608static void
f1a1da6c
A
609_pthread_cond_cleanup(void *arg)
610{
611 _pthread_cond *cond = (_pthread_cond *)arg;
612 pthread_mutex_t *mutex;
613
614// 4597450: begin
615 pthread_t thread = pthread_self();
616 int thcanceled = 0;
617
2546420a 618 _PTHREAD_LOCK(thread->lock);
f1a1da6c 619 thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
2546420a 620 _PTHREAD_UNLOCK(thread->lock);
f1a1da6c
A
621
622 if (thcanceled == 0) {
623 return;
624 }
625
626// 4597450: end
627 mutex = (pthread_mutex_t *)cond->busy;
a0619f9c 628
f1a1da6c
A
629 // add unlock ref to show one less waiter
630 _pthread_cond_updateval(cond, thread->cancel_error, 0);
631
632 /*
633 ** Can't do anything if this fails -- we're on the way out
634 */
635 if (mutex != NULL) {
636 (void)pthread_mutex_lock(mutex);
637 }
638}
639
640#define ECVCERORR 256
641#define ECVPERORR 512
642
643static void
644_pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval)
645{
646 int needclearpre;
a0619f9c 647
f1a1da6c
A
648 uint32_t diffgen, nsval;
649 uint64_t oldval64, newval64;
650 uint32_t lcntval, ucntval, scntval;
a0619f9c 651 volatile uint64_t *c_lsseqaddr;
f1a1da6c
A
652 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
653
654 if (error != 0) {
655 updateval = PTHRW_INC;
656 if ((error & ECVCERORR) != 0) {
657 updateval |= PTH_RWS_CV_CBIT;
658 }
659 if ((error & ECVPERORR) != 0) {
660 updateval |= PTH_RWS_CV_PBIT;
661 }
662 }
663
a0619f9c 664 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
f1a1da6c
A
665
666 do {
667 lcntval = *c_lseqcnt;
668 ucntval = *c_useqcnt;
669 scntval = *c_sseqcnt;
a0619f9c
A
670 nsval = 0;
671 needclearpre = 0;
f1a1da6c
A
672
673 diffgen = diff_genseq(lcntval, scntval); // pending waiters
674
675 oldval64 = (((uint64_t)scntval) << 32);
676 oldval64 |= lcntval;
677
678 if (diffgen <= 0) {
679 /* TBD: Assert, should not be the case */
680 /* validate it is spurious and return */
681 newval64 = oldval64;
682 } else {
683 // update S by one
684
685 // update scntval with number of expected returns and bits
686 nsval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
687 // set bits
688 nsval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
689
690 // if L==S and c&p bits are set, needs clearpre
691 if (((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) &&
692 ((nsval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
693 // reset p bit but retain c bit on the sword
694 nsval &= PTH_RWS_CV_RESET_PBIT;
695 needclearpre = 1;
f1a1da6c
A
696 }
697
698 newval64 = (((uint64_t)nsval) << 32);
699 newval64 |= lcntval;
700 }
a0619f9c 701 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
f1a1da6c
A
702
703 if (diffgen > 0) {
704 // if L == S, then reset associated mutex
705 if ((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
706 cond->busy = NULL;
707 }
708
709 if (needclearpre != 0) {
710 uint32_t flags = 0;
711 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
712 flags |= _PTHREAD_MTX_OPT_PSHARED;
713 }
714 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
715 }
716 }
717}
718
f1a1da6c
A
719#endif /* !BUILDING_VARIANT ] */
720
a0619f9c 721PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
722int
723pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr)
724{
725 int conforming;
726
727#if __DARWIN_UNIX03
728 conforming = 1;
729#else /* __DARWIN_UNIX03 */
730 conforming = 0;
731#endif /* __DARWIN_UNIX03 */
732
733 _pthread_cond *cond = (_pthread_cond *)ocond;
2546420a 734 _PTHREAD_LOCK_INIT(cond->lock);
f1a1da6c
A
735 return _pthread_cond_init(cond, attr, conforming);
736}
a0619f9c 737