]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_cond.c
libpthread-416.100.3.tar.gz
[apple/libpthread.git] / src / pthread_cond.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43 /*
44 * MkLinux
45 */
46
47 /*
48 * POSIX Pthread Library
49 */
50
51 #include "resolver.h"
52 #include "internal.h"
53 #include <sys/time.h> /* For struct timespec and getclock(). */
54
55 #ifdef PLOCKSTAT
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
60
61 extern int __gettimeofday(struct timeval *, struct timezone *);
62
63 PTHREAD_NOEXPORT
64 int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
65 const struct timespec *abstime, int isRelative, int isconforming);
66
67 PTHREAD_ALWAYS_INLINE
68 static inline void
69 COND_GETSEQ_ADDR(_pthread_cond *cond,
70 volatile uint64_t **c_lsseqaddr,
71 volatile uint32_t **c_lseqcnt,
72 volatile uint32_t **c_useqcnt,
73 volatile uint32_t **c_sseqcnt)
74 {
75 if (cond->misalign) {
76 *c_lseqcnt = &cond->c_seq[1];
77 *c_sseqcnt = &cond->c_seq[2];
78 *c_useqcnt = &cond->c_seq[0];
79 } else {
80 *c_lseqcnt = &cond->c_seq[0];
81 *c_sseqcnt = &cond->c_seq[1];
82 *c_useqcnt = &cond->c_seq[2];
83 }
84 *c_lsseqaddr = (volatile uint64_t *)*c_lseqcnt;
85 }
86
87 #ifndef BUILDING_VARIANT /* [ */
88
89 static void _pthread_cond_cleanup(void *arg);
90 static void _pthread_cond_updateval(_pthread_cond *cond, _pthread_mutex *mutex,
91 int error, uint32_t updateval);
92
93
94 int
95 pthread_condattr_init(pthread_condattr_t *attr)
96 {
97 attr->sig = _PTHREAD_COND_ATTR_SIG;
98 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
99 return 0;
100 }
101
102 int
103 pthread_condattr_destroy(pthread_condattr_t *attr)
104 {
105 attr->sig = _PTHREAD_NO_SIG;
106 return 0;
107 }
108
109 int
110 pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared)
111 {
112 int res = EINVAL;
113 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
114 *pshared = (int)attr->pshared;
115 res = 0;
116 }
117 return res;
118 }
119
120 int
121 pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
122 {
123 int res = EINVAL;
124 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
125 #if __DARWIN_UNIX03
126 if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED)
127 #else /* __DARWIN_UNIX03 */
128 if (pshared == PTHREAD_PROCESS_PRIVATE)
129 #endif /* __DARWIN_UNIX03 */
130 {
131 attr->pshared = pshared;
132 res = 0;
133 }
134 }
135 return res;
136 }
137
138 int
139 pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex,
140 const struct timespec *abstime)
141 {
142 return _pthread_cond_wait(cond, mutex, abstime, 1, 0);
143 }
144
145 #endif /* !BUILDING_VARIANT ] */
146
147 PTHREAD_ALWAYS_INLINE
148 static inline int
149 _pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conforming)
150 {
151 volatile uint64_t *c_lsseqaddr;
152 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
153
154 cond->busy = NULL;
155 cond->c_seq[0] = 0;
156 cond->c_seq[1] = 0;
157 cond->c_seq[2] = 0;
158 cond->unused = 0;
159
160 cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0;
161 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
162 *c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c
163
164 if (conforming) {
165 if (attr) {
166 cond->pshared = attr->pshared;
167 } else {
168 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
169 }
170 } else {
171 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
172 }
173
174 long sig = _PTHREAD_COND_SIG;
175
176 // Ensure all contents are properly set before setting signature.
177 #if defined(__LP64__)
178 // For binary compatibility reasons we cannot require natural alignment of
179 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
180 uint32_t *sig32_ptr = (uint32_t*)&cond->sig;
181 uint32_t *sig32_val = (uint32_t*)&sig;
182 *(sig32_ptr + 1) = *(sig32_val + 1);
183 os_atomic_store(sig32_ptr, *sig32_val, release);
184 #else
185 os_atomic_store2o(cond, sig, sig, release);
186 #endif
187
188 return 0;
189 }
190
191 #ifndef BUILDING_VARIANT /* [ */
192
193 PTHREAD_NOINLINE
194 static int
195 _pthread_cond_check_init_slow(_pthread_cond *cond, bool *inited)
196 {
197 int res = EINVAL;
198 if (cond->sig == _PTHREAD_COND_SIG_init) {
199 _PTHREAD_LOCK(cond->lock);
200 if (cond->sig == _PTHREAD_COND_SIG_init) {
201 res = _pthread_cond_init(cond, NULL, 0);
202 if (inited) {
203 *inited = true;
204 }
205 } else if (cond->sig == _PTHREAD_COND_SIG) {
206 res = 0;
207 }
208 _PTHREAD_UNLOCK(cond->lock);
209 } else if (cond->sig == _PTHREAD_COND_SIG) {
210 res = 0;
211 }
212 return res;
213 }
214
215 PTHREAD_ALWAYS_INLINE
216 static inline int
217 _pthread_cond_check_init(_pthread_cond *cond, bool *inited)
218 {
219 int res = 0;
220 if (cond->sig != _PTHREAD_COND_SIG) {
221 return _pthread_cond_check_init_slow(cond, inited);
222 }
223 return res;
224 }
225
226 PTHREAD_NOEXPORT_VARIANT
227 int
228 pthread_cond_destroy(pthread_cond_t *ocond)
229 {
230 _pthread_cond *cond = (_pthread_cond *)ocond;
231 int res = EINVAL;
232 if (cond->sig == _PTHREAD_COND_SIG) {
233 _PTHREAD_LOCK(cond->lock);
234
235 uint64_t oldval64, newval64;
236 uint32_t lcntval, ucntval, scntval;
237 volatile uint64_t *c_lsseqaddr;
238 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
239
240 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
241
242 do {
243 lcntval = *c_lseqcnt;
244 ucntval = *c_useqcnt;
245 scntval = *c_sseqcnt;
246
247 // validate it is not busy
248 if ((lcntval & PTHRW_COUNT_MASK) != (scntval & PTHRW_COUNT_MASK)) {
249 //res = EBUSY;
250 break;
251 }
252 oldval64 = (((uint64_t)scntval) << 32);
253 oldval64 |= lcntval;
254 newval64 = oldval64;
255 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
256
257 // <rdar://problem/13782056> Need to clear preposts.
258 uint32_t flags = 0;
259 bool needclearpre = ((scntval & PTH_RWS_CV_PBIT) != 0);
260 if (needclearpre && cond->pshared == PTHREAD_PROCESS_SHARED) {
261 flags |= _PTHREAD_MTX_OPT_PSHARED;
262 }
263
264 cond->sig = _PTHREAD_NO_SIG;
265 res = 0;
266
267 _PTHREAD_UNLOCK(cond->lock);
268
269 if (needclearpre) {
270 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, scntval, 0, lcntval, flags);
271 }
272 } else if (cond->sig == _PTHREAD_COND_SIG_init) {
273 // Compatibility for misbehaving applications that attempt to
274 // destroy a statically initialized condition variable.
275 cond->sig = _PTHREAD_NO_SIG;
276 res = 0;
277 }
278 return res;
279 }
280
281 PTHREAD_ALWAYS_INLINE
282 static inline int
283 _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread)
284 {
285 int res;
286 _pthread_cond *cond = (_pthread_cond *)ocond;
287
288 uint32_t updateval;
289 uint32_t diffgen;
290 uint32_t ulval;
291
292 uint64_t oldval64, newval64;
293 uint32_t lcntval, ucntval, scntval;
294 volatile uint64_t *c_lsseqaddr;
295 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
296
297 int retry_count = 0, uretry_count = 0;
298 int ucountreset = 0;
299
300 bool inited = false;
301 res = _pthread_cond_check_init(cond, &inited);
302 if (res != 0 || inited == true) {
303 return res;
304 }
305
306 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
307
308 bool retry;
309 do {
310 retry = false;
311
312 lcntval = *c_lseqcnt;
313 ucntval = *c_useqcnt;
314 scntval = *c_sseqcnt;
315 diffgen = 0;
316 ulval = 0;
317
318 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
319 (thread == MACH_PORT_NULL && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
320 /* validate it is spurious and return */
321 oldval64 = (((uint64_t)scntval) << 32);
322 oldval64 |= lcntval;
323 newval64 = oldval64;
324
325 if (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst)) {
326 retry = true;
327 continue;
328 } else {
329 return 0;
330 }
331 }
332
333 if (thread) {
334 break;
335 }
336
337 /* validate to eliminate spurious values, race snapshots */
338 if (is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
339 /* since ucntval may be newer, just redo */
340 retry_count++;
341 if (retry_count > 8192) {
342 return EAGAIN;
343 } else {
344 sched_yield();
345 retry = true;
346 continue;
347 }
348 } else if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
349 /* since ucntval may be newer, just redo */
350 uretry_count++;
351 if (uretry_count > 8192) {
352 /*
353 * U value if not used for a while can go out of sync
354 * set this to S value and try one more time.
355 */
356 if (ucountreset != 0) {
357 return EAGAIN;
358 } else if (os_atomic_cmpxchg(c_useqcnt, ucntval, (scntval & PTHRW_COUNT_MASK), seq_cst)) {
359 /* now the U is reset to S value */
360 ucountreset = 1;
361 uretry_count = 0;
362 }
363 }
364 sched_yield();
365 retry = true;
366 continue;
367 }
368
369 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
370 /* If U < S, set U = S+diff due to intr's TO, etc */
371 ulval = (scntval & PTHRW_COUNT_MASK);
372 } else {
373 /* If U >= S, set U = U+diff due to intr's TO, etc */
374 ulval = (ucntval & PTHRW_COUNT_MASK);
375 }
376
377 if (broadcast) {
378 diffgen = diff_genseq(lcntval, ulval);
379 // Set U = L
380 ulval = (lcntval & PTHRW_COUNT_MASK);
381 } else {
382 ulval += PTHRW_INC;
383 }
384
385 } while (retry || !os_atomic_cmpxchg(c_useqcnt, ucntval, ulval, seq_cst));
386
387 uint32_t flags = 0;
388 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
389 flags |= _PTHREAD_MTX_OPT_PSHARED;
390 }
391
392 uint64_t cvlsgen = ((uint64_t)scntval << 32) | lcntval;
393
394 if (broadcast) {
395 // pass old U val so kernel will know the diffgen
396 uint64_t cvudgen = ((uint64_t)ucntval << 32) | diffgen;
397 updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, NULL, 0, 0);
398 } else {
399 updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, thread, NULL, 0, 0, flags);
400 }
401
402 if (updateval != (uint32_t)-1 && updateval != 0) {
403 _pthread_cond_updateval(cond, NULL, 0, updateval);
404 }
405
406 return 0;
407 }
408
409 /*
410 * Signal a condition variable, waking up all threads waiting for it.
411 */
412 PTHREAD_NOEXPORT_VARIANT
413 int
414 pthread_cond_broadcast(pthread_cond_t *ocond)
415 {
416 return _pthread_cond_signal(ocond, true, MACH_PORT_NULL);
417 }
418
419 /*
420 * Signal a condition variable, waking a specified thread.
421 */
422 PTHREAD_NOEXPORT_VARIANT
423 int
424 pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
425 {
426 mach_port_t mp = MACH_PORT_NULL;
427 if (thread) {
428 mp = pthread_mach_thread_np((_Nonnull pthread_t)thread);
429 }
430 return _pthread_cond_signal(ocond, false, mp);
431 }
432
433 /*
434 * Signal a condition variable, waking only one thread.
435 */
436 PTHREAD_NOEXPORT_VARIANT
437 int
438 pthread_cond_signal(pthread_cond_t *ocond)
439 {
440 return _pthread_cond_signal(ocond, false, MACH_PORT_NULL);
441 }
442
443 /*
444 * Manage a list of condition variables associated with a mutex
445 */
446
447 /*
448 * Suspend waiting for a condition variable.
449 * Note: we have to keep a list of condition variables which are using
450 * this same mutex variable so we can detect invalid 'destroy' sequences.
451 * If conformance is not cancelable, we skip the _pthread_testcancel(),
452 * but keep the remaining conforming behavior..
453 */
454 PTHREAD_NOEXPORT PTHREAD_NOINLINE
455 int
456 _pthread_cond_wait(pthread_cond_t *ocond,
457 pthread_mutex_t *omutex,
458 const struct timespec *abstime,
459 int isRelative,
460 int conforming)
461 {
462 int res;
463 _pthread_cond *cond = (_pthread_cond *)ocond;
464 _pthread_mutex *mutex = (_pthread_mutex *)omutex;
465 struct timespec then = { 0, 0 };
466 uint32_t mtxgen, mtxugen, flags=0, updateval;
467 uint32_t lcntval, ucntval, scntval;
468 uint32_t nlval, ulval, savebits;
469 volatile uint64_t *c_lsseqaddr;
470 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
471 uint64_t oldval64, newval64, mugen, cvlsgen;
472 uint32_t *npmtx = NULL;
473 int timeout_elapsed = 0;
474
475 res = _pthread_cond_check_init(cond, NULL);
476 if (res != 0) {
477 return res;
478 }
479
480 if (conforming) {
481 if (!_pthread_mutex_check_signature(mutex) &&
482 !_pthread_mutex_check_signature_init(mutex)) {
483 return EINVAL;
484 }
485 if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
486 _pthread_testcancel(conforming);
487 }
488 }
489
490 /* send relative time to kernel */
491 if (abstime) {
492 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= NSEC_PER_SEC) {
493 return EINVAL;
494 }
495
496 if (isRelative == 0) {
497 struct timespec now;
498 struct timeval tv;
499 __gettimeofday(&tv, NULL);
500 TIMEVAL_TO_TIMESPEC(&tv, &now);
501
502 if ((abstime->tv_sec == now.tv_sec) ?
503 (abstime->tv_nsec <= now.tv_nsec) :
504 (abstime->tv_sec < now.tv_sec)) {
505 timeout_elapsed = 1;
506 } else {
507 /* Compute relative time to sleep */
508 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
509 then.tv_sec = abstime->tv_sec - now.tv_sec;
510 if (then.tv_nsec < 0) {
511 then.tv_nsec += NSEC_PER_SEC;
512 then.tv_sec--;
513 }
514 }
515 } else {
516 then.tv_sec = abstime->tv_sec;
517 then.tv_nsec = abstime->tv_nsec;
518 if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
519 timeout_elapsed = 1;
520 }
521 }
522 }
523
524 if (cond->busy != NULL && cond->busy != mutex) {
525 return EINVAL;
526 }
527
528 /*
529 * If timeout is known to have elapsed, we still need to unlock and
530 * relock the mutex to allow other waiters to get in line and
531 * modify the condition state.
532 */
533 if (timeout_elapsed) {
534 res = pthread_mutex_unlock(omutex);
535 if (res != 0) {
536 return res;
537 }
538 res = pthread_mutex_lock(omutex);
539 if (res != 0) {
540 return res;
541 }
542 return ETIMEDOUT;
543 }
544
545 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
546
547 do {
548 lcntval = *c_lseqcnt;
549 ucntval = *c_useqcnt;
550 scntval = *c_sseqcnt;
551
552 oldval64 = (((uint64_t)scntval) << 32);
553 oldval64 |= lcntval;
554
555 /* remove c and p bits on S word */
556 savebits = scntval & PTH_RWS_CV_BITSALL;
557 ulval = (scntval & PTHRW_COUNT_MASK);
558 nlval = lcntval + PTHRW_INC;
559 newval64 = (((uint64_t)ulval) << 32);
560 newval64 |= nlval;
561 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
562
563 cond->busy = mutex;
564
565 res = _pthread_mutex_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
566
567 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
568 if (res != 0) {
569 return EINVAL;
570 }
571 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) {
572 npmtx = NULL;
573 mugen = 0;
574 } else {
575 mugen = ((uint64_t)mtxugen << 32) | mtxgen;
576 }
577 flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */
578
579 cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
580
581 // SUSv3 requires pthread_cond_wait to be a cancellation point
582 if (conforming) {
583 pthread_cleanup_push(_pthread_cond_cleanup, (void *)cond);
584 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
585 _pthread_testcancel(conforming);
586 pthread_cleanup_pop(0);
587 } else {
588 updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec);
589 }
590
591 if (updateval == (uint32_t)-1) {
592 int err = errno;
593 switch (err & 0xff) {
594 case ETIMEDOUT:
595 res = ETIMEDOUT;
596 break;
597 case EINTR:
598 // spurious wakeup (unless canceled)
599 res = 0;
600 break;
601 default:
602 res = EINVAL;
603 break;
604 }
605
606 // add unlock ref to show one less waiter
607 _pthread_cond_updateval(cond, mutex, err, 0);
608 } else if (updateval != 0) {
609 // Successful wait
610 // The return due to prepost and might have bit states
611 // update S and return for prepo if needed
612 _pthread_cond_updateval(cond, mutex, 0, updateval);
613 }
614
615 pthread_mutex_lock(omutex);
616
617 return res;
618 }
619
620 static void
621 _pthread_cond_cleanup(void *arg)
622 {
623 _pthread_cond *cond = (_pthread_cond *)arg;
624 pthread_t thread = pthread_self();
625 pthread_mutex_t *mutex;
626
627 // 4597450: begin
628 if (!thread->canceled) {
629 return;
630 }
631 // 4597450: end
632
633 mutex = (pthread_mutex_t *)cond->busy;
634
635 // add unlock ref to show one less waiter
636 _pthread_cond_updateval(cond, (_pthread_mutex *)mutex,
637 thread->cancel_error, 0);
638
639 /*
640 ** Can't do anything if this fails -- we're on the way out
641 */
642 if (mutex != NULL) {
643 (void)pthread_mutex_lock(mutex);
644 }
645 }
646
647 static void
648 _pthread_cond_updateval(_pthread_cond *cond, _pthread_mutex *mutex,
649 int error, uint32_t updateval)
650 {
651 int needclearpre;
652
653 uint32_t diffgen, nsval;
654 uint64_t oldval64, newval64;
655 uint32_t lcntval, ucntval, scntval;
656 volatile uint64_t *c_lsseqaddr;
657 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
658
659 if (error != 0) {
660 updateval = PTHRW_INC;
661 if (error & ECVCLEARED) {
662 updateval |= PTH_RWS_CV_CBIT;
663 }
664 if (error & ECVPREPOST) {
665 updateval |= PTH_RWS_CV_PBIT;
666 }
667 }
668
669 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
670
671 do {
672 lcntval = *c_lseqcnt;
673 ucntval = *c_useqcnt;
674 scntval = *c_sseqcnt;
675 nsval = 0;
676 needclearpre = 0;
677
678 diffgen = diff_genseq(lcntval, scntval); // pending waiters
679
680 oldval64 = (((uint64_t)scntval) << 32);
681 oldval64 |= lcntval;
682
683 PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_START, cond, oldval64,
684 updateval, 0);
685
686 if (diffgen <= 0 && !is_rws_pbit_set(updateval)) {
687 /* TBD: Assert, should not be the case */
688 /* validate it is spurious and return */
689 newval64 = oldval64;
690 } else {
691 // update S by one
692
693 // update scntval with number of expected returns and bits
694 nsval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
695 // set bits
696 nsval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
697
698 // if L==S and c&p bits are set, needs clearpre
699 if (((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) &&
700 ((nsval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
701 // reset p bit but retain c bit on the sword
702 nsval &= PTH_RWS_CV_RESET_PBIT;
703 needclearpre = 1;
704 }
705
706 newval64 = (((uint64_t)nsval) << 32);
707 newval64 |= lcntval;
708 }
709 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
710
711 PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_END, cond, newval64,
712 (uint64_t)diffgen << 32 | needclearpre, 0);
713
714 if (diffgen > 0) {
715 // if L == S, then reset associated mutex
716 if ((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
717 cond->busy = NULL;
718 }
719 }
720
721 if (needclearpre) {
722 uint32_t flags = 0;
723 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
724 flags |= _PTHREAD_MTX_OPT_PSHARED;
725 }
726 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
727 }
728 }
729
730 #endif /* !BUILDING_VARIANT ] */
731
732 PTHREAD_NOEXPORT_VARIANT
733 int
734 pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr)
735 {
736 int conforming;
737
738 #if __DARWIN_UNIX03
739 conforming = 1;
740 #else /* __DARWIN_UNIX03 */
741 conforming = 0;
742 #endif /* __DARWIN_UNIX03 */
743
744 _pthread_cond *cond = (_pthread_cond *)ocond;
745 _PTHREAD_LOCK_INIT(cond->lock);
746 return _pthread_cond_init(cond, attr, conforming);
747 }
748