]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_cond.c
libpthread-454.80.2.tar.gz
[apple/libpthread.git] / src / pthread_cond.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43 /*
44 * MkLinux
45 */
46
47 /*
48 * POSIX Pthread Library
49 */
50
51 #include "resolver.h"
52 #include "internal.h"
53 #include <sys/time.h> /* For struct timespec and getclock(). */
54
55 #ifdef PLOCKSTAT
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
60
61 typedef union {
62 uint64_t val;
63 struct {
64 uint32_t seq;
65 uint16_t waiters;
66 uint16_t signal;
67 };
68 } pthread_ulock_cond_state_u;
69
70 #define _PTHREAD_COND_WAITERS_INC \
71 (1ull << (offsetof(pthread_ulock_cond_state_u, waiters) * CHAR_BIT))
72
73 OS_ALWAYS_INLINE
74 static inline void
75 COND_GETSEQ_ADDR(pthread_cond_t *cond,
76 volatile uint64_t **c_lsseqaddr,
77 volatile uint32_t **c_lseqcnt,
78 volatile uint32_t **c_useqcnt,
79 volatile uint32_t **c_sseqcnt)
80 {
81 if (cond->misalign) {
82 *c_lseqcnt = &cond->c_seq[1];
83 *c_sseqcnt = &cond->c_seq[2];
84 *c_useqcnt = &cond->c_seq[0];
85 } else {
86 *c_lseqcnt = &cond->c_seq[0];
87 *c_sseqcnt = &cond->c_seq[1];
88 *c_useqcnt = &cond->c_seq[2];
89 }
90 *c_lsseqaddr = (volatile uint64_t *)*c_lseqcnt;
91 }
92
93 OS_ALWAYS_INLINE
94 static inline pthread_ulock_cond_state_u *
95 _pthread_ulock_cond_state(pthread_cond_t *cond)
96 {
97 return (pthread_ulock_cond_state_u *)&cond->c_seq[cond->misalign];
98 }
99
100 #ifndef BUILDING_VARIANT /* [ */
101
102 static void _pthread_psynch_cond_cleanup(void *arg);
103 static void _pthread_cond_updateval(pthread_cond_t *cond, pthread_mutex_t *mutex,
104 int error, uint32_t updateval);
105
106 static int
107 _pthread_ulock_cond_wait_complete(pthread_ulock_cond_state_u *state,
108 pthread_mutex_t *mutex, int rc);
109 static void
110 _pthread_ulock_cond_cleanup(void *arg);
111
112
113 int
114 pthread_condattr_init(pthread_condattr_t *attr)
115 {
116 attr->sig = _PTHREAD_COND_ATTR_SIG;
117 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
118 return 0;
119 }
120
121 int
122 pthread_condattr_destroy(pthread_condattr_t *attr)
123 {
124 attr->sig = _PTHREAD_NO_SIG;
125 return 0;
126 }
127
128 int
129 pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared)
130 {
131 int res = EINVAL;
132 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
133 *pshared = (int)attr->pshared;
134 res = 0;
135 }
136 return res;
137 }
138
139 int
140 pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
141 {
142 int res = EINVAL;
143 if (attr->sig == _PTHREAD_COND_ATTR_SIG) {
144 if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) {
145 attr->pshared = pshared;
146 res = 0;
147 }
148 }
149 return res;
150 }
151
152 int
153 pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex,
154 const struct timespec *abstime)
155 {
156 return _pthread_cond_wait(cond, mutex, abstime, 1,
157 PTHREAD_CONFORM_UNIX03_NOCANCEL);
158 }
159
160 #endif /* !BUILDING_VARIANT ] */
161
162 OS_ALWAYS_INLINE
163 static inline int
164 _pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr,
165 uint32_t sig)
166 {
167 volatile uint64_t *c_lsseqaddr;
168 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
169
170 cond->busy = NULL;
171 cond->c_seq[0] = 0;
172 cond->c_seq[1] = 0;
173 cond->c_seq[2] = 0;
174 cond->unused = 0;
175
176 // TODO: PTHREAD_STRICT candidate
177 cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0;
178 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
179 *c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c
180
181 if (attr) {
182 cond->pshared = attr->pshared;
183 } else {
184 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
185 }
186
187 // Ensure all contents are properly set before setting signature.
188 #if defined(__LP64__)
189 // For binary compatibility reasons we cannot require natural alignment of
190 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
191 cond->sig._pad = 0;
192 #endif
193 os_atomic_store(&cond->sig.val, sig, release);
194
195 return 0;
196 }
197
198 #ifndef BUILDING_VARIANT /* [ */
199
200 OS_ALWAYS_INLINE
201 static int
202 _pthread_cond_check_signature(pthread_cond_t *cond, uint32_t sig_current,
203 uint32_t *sig_inout)
204 {
205 int res = 0;
206 switch (sig_current) {
207 case _PTHREAD_COND_SIG_init:
208 __builtin_unreachable();
209 break;
210 case _PTHREAD_COND_SIG_pristine:
211 if (*sig_inout != _PTHREAD_COND_SIG_pristine) {
212 os_atomic_store(&cond->sig.val, *sig_inout, relaxed);
213 }
214 break;
215 case _PTHREAD_COND_SIG_psynch:
216 case _PTHREAD_COND_SIG_ulock:
217 if (*sig_inout == _PTHREAD_COND_SIG_pristine) {
218 *sig_inout = sig_current;
219 } else if (*sig_inout != sig_current) {
220 PTHREAD_INTERNAL_CRASH(0, "Mixed ulock and psych condvar use");
221 }
222 break;
223 default:
224 // TODO: PTHREAD_STRICT candidate
225 res = EINVAL;
226 break;
227 }
228
229 return res;
230 }
231
232 OS_NOINLINE
233 static int
234 _pthread_cond_check_init_slow(pthread_cond_t *cond, uint32_t *sig_inout)
235 {
236 int res;
237 _pthread_lock_lock(&cond->lock);
238
239 uint32_t sig_current = os_atomic_load(&cond->sig.val, relaxed);
240 if (sig_current == _PTHREAD_COND_SIG_init) {
241 res = _pthread_cond_init(cond, NULL, *sig_inout);
242 } else {
243 res = _pthread_cond_check_signature(cond, sig_current, sig_inout);
244 }
245
246 _pthread_lock_unlock(&cond->lock);
247 return res;
248 }
249
250 /*
251 * These routines maintain the signature of the condition variable, which
252 * encodes a small state machine:
253 * - a statically initialized condvar begins with SIG_init
254 * - explicit initialization via _cond_init() and implicit initialization
255 * transition to SIG_pristine, as there have been no waiters so we don't know
256 * what kind of mutex we'll be used with
257 * - the first _cond_wait() transitions to one of SIG_psynch or SIG_ulock
258 * according to the mutex being waited on
259 *
260 * On entry, *sig_inout is the furthest state we can transition to given the
261 * calling context. On exit, it is the actual state we observed, after any
262 * possible advancement.
263 */
264 OS_ALWAYS_INLINE
265 static inline int
266 _pthread_cond_check_init(pthread_cond_t *cond, uint32_t *sig_inout)
267 {
268 uint32_t sig_current = os_atomic_load(&cond->sig.val, relaxed);
269 if (sig_current == _PTHREAD_COND_SIG_init) {
270 return _pthread_cond_check_init_slow(cond, sig_inout);
271 } else {
272 return _pthread_cond_check_signature(cond, sig_current, sig_inout);
273 }
274 }
275
276 PTHREAD_NOEXPORT_VARIANT
277 int
278 pthread_cond_destroy(pthread_cond_t *cond)
279 {
280 int res = EINVAL;
281 uint32_t sig = os_atomic_load(&cond->sig.val, relaxed);
282 switch (sig) {
283 case _PTHREAD_COND_SIG_psynch:
284 _pthread_lock_lock(&cond->lock);
285
286 uint64_t oldval64, newval64;
287 uint32_t lcntval, ucntval, scntval;
288 volatile uint64_t *c_lsseqaddr;
289 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
290
291 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
292
293 do {
294 lcntval = *c_lseqcnt;
295 ucntval = *c_useqcnt;
296 scntval = *c_sseqcnt;
297
298 // validate it is not busy
299 if ((lcntval & PTHRW_COUNT_MASK) != (scntval & PTHRW_COUNT_MASK)) {
300 //res = EBUSY;
301 break;
302 }
303 oldval64 = (((uint64_t)scntval) << 32);
304 oldval64 |= lcntval;
305 newval64 = oldval64;
306 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
307
308 // <rdar://problem/13782056> Need to clear preposts.
309 uint32_t flags = 0;
310 bool needclearpre = ((scntval & PTH_RWS_CV_PBIT) != 0);
311 if (needclearpre && cond->pshared == PTHREAD_PROCESS_SHARED) {
312 flags |= _PTHREAD_MTX_OPT_PSHARED;
313 }
314
315 os_atomic_store(&cond->sig.val, _PTHREAD_NO_SIG, relaxed);
316 res = 0;
317
318 _pthread_lock_unlock(&cond->lock);
319
320 if (needclearpre) {
321 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, scntval, 0, lcntval, flags);
322 }
323 break;
324 case _PTHREAD_COND_SIG_init:
325 // Compatibility for misbehaving applications that attempt to
326 // destroy a statically initialized condition variable.
327 //
328 // fall through
329 case _PTHREAD_COND_SIG_pristine:
330 case _PTHREAD_COND_SIG_ulock:
331 os_atomic_store(&cond->sig.val, _PTHREAD_NO_SIG, relaxed);
332 res = 0;
333 break;
334 default:
335 // TODO: PTHREAD_STRICT candidate
336 break;
337 }
338 return res;
339 }
340
341 OS_ALWAYS_INLINE
342 static inline int
343 _pthread_psynch_cond_signal(pthread_cond_t *cond, bool broadcast,
344 mach_port_t thread)
345 {
346 uint32_t updateval;
347 uint32_t diffgen;
348 uint32_t ulval;
349
350 uint64_t oldval64, newval64;
351 uint32_t lcntval, ucntval, scntval;
352 volatile uint64_t *c_lsseqaddr;
353 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
354
355 int retry_count = 0, uretry_count = 0;
356 int ucountreset = 0;
357
358 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
359
360 bool retry;
361 do {
362 retry = false;
363
364 lcntval = *c_lseqcnt;
365 ucntval = *c_useqcnt;
366 scntval = *c_sseqcnt;
367 diffgen = 0;
368 ulval = 0;
369
370 if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) ||
371 (thread == MACH_PORT_NULL && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) {
372 /* validate it is spurious and return */
373 oldval64 = (((uint64_t)scntval) << 32);
374 oldval64 |= lcntval;
375 newval64 = oldval64;
376
377 if (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst)) {
378 retry = true;
379 continue;
380 } else {
381 return 0;
382 }
383 }
384
385 if (thread) {
386 break;
387 }
388
389 /* validate to eliminate spurious values, race snapshots */
390 if (is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
391 /* since ucntval may be newer, just redo */
392 retry_count++;
393 if (retry_count > 8192) {
394 return EAGAIN;
395 } else {
396 sched_yield();
397 retry = true;
398 continue;
399 }
400 } else if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) {
401 /* since ucntval may be newer, just redo */
402 uretry_count++;
403 if (uretry_count > 8192) {
404 /*
405 * U value if not used for a while can go out of sync
406 * set this to S value and try one more time.
407 */
408 if (ucountreset != 0) {
409 return EAGAIN;
410 } else if (os_atomic_cmpxchg(c_useqcnt, ucntval, (scntval & PTHRW_COUNT_MASK), seq_cst)) {
411 /* now the U is reset to S value */
412 ucountreset = 1;
413 uretry_count = 0;
414 }
415 }
416 sched_yield();
417 retry = true;
418 continue;
419 }
420
421 if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) {
422 /* If U < S, set U = S+diff due to intr's TO, etc */
423 ulval = (scntval & PTHRW_COUNT_MASK);
424 } else {
425 /* If U >= S, set U = U+diff due to intr's TO, etc */
426 ulval = (ucntval & PTHRW_COUNT_MASK);
427 }
428
429 if (broadcast) {
430 diffgen = diff_genseq(lcntval, ulval);
431 // Set U = L
432 ulval = (lcntval & PTHRW_COUNT_MASK);
433 } else {
434 ulval += PTHRW_INC;
435 }
436
437 } while (retry || !os_atomic_cmpxchg(c_useqcnt, ucntval, ulval, seq_cst));
438
439 uint32_t flags = 0;
440 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
441 flags |= _PTHREAD_MTX_OPT_PSHARED;
442 }
443
444 uint64_t cvlsgen = ((uint64_t)scntval << 32) | lcntval;
445
446 if (broadcast) {
447 // pass old U val so kernel will know the diffgen
448 uint64_t cvudgen = ((uint64_t)ucntval << 32) | diffgen;
449 updateval = __psynch_cvbroad(cond, cvlsgen, cvudgen, flags, NULL, 0, 0);
450 } else {
451 updateval = __psynch_cvsignal(cond, cvlsgen, ucntval, thread, NULL, 0, 0, flags);
452 }
453
454 if (updateval != (uint32_t)-1 && updateval != 0) {
455 _pthread_cond_updateval(cond, NULL, 0, updateval);
456 }
457
458 return 0;
459 }
460
461 OS_ALWAYS_INLINE
462 static inline int
463 _pthread_ulock_cond_signal(pthread_cond_t *cond, bool broadcast,
464 mach_port_t thread)
465 {
466 pthread_ulock_cond_state_u *state = _pthread_ulock_cond_state(cond);
467
468 pthread_ulock_cond_state_u oldstate, newstate;
469 // release to pair with acquire after wait
470 os_atomic_rmw_loop(&state->val, oldstate.val, newstate.val, release, {
471 if (!oldstate.waiters || oldstate.waiters == oldstate.signal) {
472 os_atomic_rmw_loop_give_up(return 0);
473 }
474
475 newstate = (pthread_ulock_cond_state_u){
476 .seq = oldstate.seq + 1,
477 .waiters = oldstate.waiters,
478 .signal = broadcast ? oldstate.waiters :
479 MIN(oldstate.signal + 1, oldstate.waiters),
480 };
481 });
482
483 PTHREAD_TRACE(ulcond_signal, cond, oldstate.val, newstate.val, broadcast);
484
485 // Priority hole: if we're pre-empted here, nobody else can signal the
486 // waiter we took responsibility for signaling by incrementing the signal
487 // count.
488
489 if (oldstate.signal < oldstate.waiters) {
490 uint32_t wake_op = UL_COMPARE_AND_WAIT | ULF_NO_ERRNO;
491 if (broadcast) {
492 wake_op |= ULF_WAKE_ALL;
493 } else if (thread) {
494 wake_op |= ULF_WAKE_THREAD;
495 }
496
497 for (;;) {
498 int rc = __ulock_wake(wake_op, &state->seq, thread);
499 if (rc < 0) {
500 switch (-rc) {
501 case EINTR:
502 continue;
503 case ENOENT:
504 break;
505 case EALREADY:
506 if (!thread) {
507 PTHREAD_INTERNAL_CRASH(0, "EALREADY from ulock_wake");
508 }
509 // Compatibility with psynch: promote to broadcast
510 return pthread_cond_broadcast(cond);
511 default:
512 PTHREAD_INTERNAL_CRASH(-rc, "ulock_wake failure");
513 }
514 }
515 break;
516 }
517 }
518
519 return 0;
520 }
521
522 OS_ALWAYS_INLINE
523 static inline int
524 _pthread_cond_signal(pthread_cond_t *cond, bool broadcast, mach_port_t thread)
525 {
526 uint32_t sig = _PTHREAD_COND_SIG_pristine;
527 int res = _pthread_cond_check_init(cond, &sig);
528 if (res != 0 || sig == _PTHREAD_COND_SIG_pristine) {
529 return res;
530 }
531
532 switch (sig) {
533 case _PTHREAD_COND_SIG_psynch:
534 return _pthread_psynch_cond_signal(cond, broadcast, thread);
535 case _PTHREAD_COND_SIG_ulock:
536 return _pthread_ulock_cond_signal(cond, broadcast, thread);
537 default:
538 PTHREAD_INTERNAL_CRASH(sig, "impossible cond signature");
539 }
540 }
541
542 /*
543 * Signal a condition variable, waking up all threads waiting for it.
544 */
545 PTHREAD_NOEXPORT_VARIANT
546 int
547 pthread_cond_broadcast(pthread_cond_t *cond)
548 {
549 return _pthread_cond_signal(cond, true, MACH_PORT_NULL);
550 }
551
552 /*
553 * Signal a condition variable, waking a specified thread.
554 */
555 PTHREAD_NOEXPORT_VARIANT
556 int
557 pthread_cond_signal_thread_np(pthread_cond_t *cond, pthread_t thread)
558 {
559 mach_port_t mp = MACH_PORT_NULL;
560 if (thread) {
561 mp = pthread_mach_thread_np((_Nonnull pthread_t)thread);
562 }
563 return _pthread_cond_signal(cond, false, mp);
564 }
565
566 /*
567 * Signal a condition variable, waking only one thread.
568 */
569 PTHREAD_NOEXPORT_VARIANT
570 int
571 pthread_cond_signal(pthread_cond_t *cond)
572 {
573 return _pthread_cond_signal(cond, false, MACH_PORT_NULL);
574 }
575
576 static int
577 _pthread_psynch_cond_wait(pthread_cond_t *cond,
578 pthread_mutex_t *mutex,
579 const struct timespec *then,
580 pthread_conformance_t conforming)
581 {
582 uint32_t mtxgen, mtxugen, flags=0, updateval;
583 uint32_t lcntval, ucntval, scntval;
584 uint32_t nlval, ulval, savebits;
585 volatile uint64_t *c_lsseqaddr;
586 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
587 uint64_t oldval64, newval64, mugen, cvlsgen;
588 uint32_t *npmtx = NULL;
589
590 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
591
592 do {
593 lcntval = *c_lseqcnt;
594 ucntval = *c_useqcnt;
595 scntval = *c_sseqcnt;
596
597 oldval64 = (((uint64_t)scntval) << 32);
598 oldval64 |= lcntval;
599
600 /* remove c and p bits on S word */
601 savebits = scntval & PTH_RWS_CV_BITSALL;
602 ulval = (scntval & PTHRW_COUNT_MASK);
603 nlval = lcntval + PTHRW_INC;
604 newval64 = (((uint64_t)ulval) << 32);
605 newval64 |= nlval;
606 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
607
608 cond->busy = mutex;
609
610 int res = _pthread_mutex_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen);
611
612 /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/
613 if (res != 0) {
614 return EINVAL;
615 }
616 if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) {
617 npmtx = NULL;
618 mugen = 0;
619 } else {
620 mugen = ((uint64_t)mtxugen << 32) | mtxgen;
621 }
622 flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */
623
624 cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval;
625
626 // SUSv3 requires pthread_cond_wait to be a cancellation point
627 if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
628 pthread_cleanup_push(_pthread_psynch_cond_cleanup, (void *)cond);
629 updateval = __psynch_cvwait(cond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)(then->tv_sec), (int32_t)(then->tv_nsec));
630 pthread_testcancel();
631 pthread_cleanup_pop(0);
632 } else {
633 updateval = __psynch_cvwait(cond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)(then->tv_sec), (int32_t)(then->tv_nsec));
634 }
635
636 if (updateval == (uint32_t)-1) {
637 int err = errno;
638 switch (err & 0xff) {
639 case ETIMEDOUT:
640 res = ETIMEDOUT;
641 break;
642 case EINTR:
643 // spurious wakeup (unless canceled)
644 res = 0;
645 break;
646 default:
647 res = EINVAL;
648 break;
649 }
650
651 // add unlock ref to show one less waiter
652 _pthread_cond_updateval(cond, mutex, err, 0);
653 } else if (updateval != 0) {
654 // Successful wait
655 // The return due to prepost and might have bit states
656 // update S and return for prepo if needed
657 _pthread_cond_updateval(cond, mutex, 0, updateval);
658 }
659
660 pthread_mutex_lock(mutex);
661
662 return res;
663 }
664
665 struct pthread_ulock_cond_cancel_ctx_s {
666 pthread_cond_t *cond;
667 pthread_mutex_t *mutex;
668 };
669
670 static int
671 _pthread_ulock_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
672 const struct timespec *then, pthread_conformance_t conforming)
673 {
674 bool cancelable = (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE);
675
676 uint64_t timeout_ns = 0;
677 if (then->tv_sec || then->tv_nsec) {
678 // psynch compatibility: cast and bitwise-truncate tv_nsec
679 uint64_t fraction_ns = ((uint32_t)then->tv_nsec) & 0x3fffffff;
680 if (os_mul_and_add_overflow(then->tv_sec, NSEC_PER_SEC, fraction_ns,
681 &timeout_ns)) {
682 // saturate (can't wait longer than 584 years...)
683 timeout_ns = UINT64_MAX;
684 }
685 }
686
687 pthread_ulock_cond_state_u *state = _pthread_ulock_cond_state(cond);
688
689 pthread_ulock_cond_state_u origstate = {
690 .val = os_atomic_add(&state->val, _PTHREAD_COND_WAITERS_INC, relaxed)
691 };
692
693 int rc = _pthread_mutex_ulock_unlock(mutex);
694 if (rc) {
695 return _pthread_ulock_cond_wait_complete(state, NULL, rc);
696 }
697
698 PTHREAD_TRACE(ulcond_wait, cond, origstate.val, timeout_ns, 0);
699
700 do {
701 const uint32_t wait_op = UL_COMPARE_AND_WAIT | ULF_NO_ERRNO;
702 if (cancelable) {
703 struct pthread_ulock_cond_cancel_ctx_s ctx = {
704 .cond = cond,
705 .mutex = mutex,
706 };
707 pthread_cleanup_push(_pthread_ulock_cond_cleanup, &ctx);
708 rc = __ulock_wait2(wait_op | ULF_WAIT_CANCEL_POINT, &state->seq,
709 origstate.seq, timeout_ns, 0);
710 pthread_testcancel();
711 pthread_cleanup_pop(0);
712 } else {
713 rc = __ulock_wait2(wait_op, &state->seq, origstate.seq, timeout_ns, 0);
714 }
715 if (rc < 0) {
716 switch (-rc) {
717 case EFAULT:
718 continue;
719 case EINTR:
720 // "These functions shall not return an error code of [EINTR]."
721 // => promote to spurious wake-up
722 rc = 0;
723 goto out;
724 case ETIMEDOUT:
725 rc = ETIMEDOUT;
726 goto out;
727 default:
728 PTHREAD_INTERNAL_CRASH(-rc, "ulock_wait failure");
729 }
730 } else {
731 // XXX for now don't care about other waiters
732 rc = 0;
733 }
734 } while (os_atomic_load(&state->seq, relaxed) == origstate.seq);
735
736 out:
737 return _pthread_ulock_cond_wait_complete(state, mutex, rc);
738 }
739
740 static int
741 _pthread_ulock_cond_wait_complete(pthread_ulock_cond_state_u *state,
742 pthread_mutex_t *mutex, int rc)
743 {
744 if (mutex) {
745 // XXX Check this return value? Historically we haven't, but if rc == 0
746 // we could promote the return value to this one.
747 _pthread_mutex_ulock_lock(mutex, false);
748 }
749
750 pthread_ulock_cond_state_u oldstate, newstate;
751 // acquire to pair with release upon signal
752 os_atomic_rmw_loop(&state->val, oldstate.val, newstate.val, acquire, {
753 newstate = (pthread_ulock_cond_state_u){
754 .seq = oldstate.seq,
755 .waiters = oldstate.waiters - 1,
756 .signal = oldstate.signal ? oldstate.signal - 1 : 0,
757 };
758 });
759
760 return rc;
761 }
762
763 /*
764 * Suspend waiting for a condition variable.
765 * If conformance is not cancelable, we skip the pthread_testcancel(),
766 * but keep the remaining conforming behavior.
767 */
768 PTHREAD_NOEXPORT OS_NOINLINE
769 int
770 _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex,
771 const struct timespec *abstime, int isRelative,
772 pthread_conformance_t conforming)
773 {
774 int res;
775 struct timespec then = { 0, 0 };
776 bool timeout_elapsed = false;
777
778 if (!_pthread_mutex_check_signature(mutex) &&
779 !_pthread_mutex_check_signature_init(mutex)) {
780 return EINVAL;
781 }
782
783 bool ulock = _pthread_mutex_uses_ulock(mutex);
784 uint32_t sig = ulock ? _PTHREAD_COND_SIG_ulock : _PTHREAD_COND_SIG_psynch;
785 res = _pthread_cond_check_init(cond, &sig);
786 if (res != 0) {
787 return res;
788 }
789
790 if (conforming == PTHREAD_CONFORM_UNIX03_CANCELABLE) {
791 pthread_testcancel();
792 }
793
794 /* send relative time to kernel */
795 if (abstime) {
796 if (abstime->tv_nsec < 0 || abstime->tv_nsec >= NSEC_PER_SEC) {
797 // TODO: PTHREAD_STRICT candidate
798 return EINVAL;
799 }
800
801 if (isRelative == 0) {
802 struct timespec now;
803 struct timeval tv;
804 __gettimeofday(&tv, NULL);
805 TIMEVAL_TO_TIMESPEC(&tv, &now);
806
807 if ((abstime->tv_sec == now.tv_sec) ?
808 (abstime->tv_nsec <= now.tv_nsec) :
809 (abstime->tv_sec < now.tv_sec)) {
810 timeout_elapsed = true;
811 } else {
812 /* Compute relative time to sleep */
813 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
814 then.tv_sec = abstime->tv_sec - now.tv_sec;
815 if (then.tv_nsec < 0) {
816 then.tv_nsec += NSEC_PER_SEC;
817 then.tv_sec--;
818 }
819 }
820 } else {
821 then.tv_sec = abstime->tv_sec;
822 then.tv_nsec = abstime->tv_nsec;
823 if ((then.tv_sec == 0) && (then.tv_nsec == 0)) {
824 timeout_elapsed = true;
825 }
826 }
827 }
828
829 if (!ulock && cond->busy != NULL && cond->busy != mutex) {
830 // TODO: PTHREAD_STRICT candidate
831 return EINVAL;
832 }
833
834 /*
835 * If timeout is known to have elapsed, we still need to unlock and
836 * relock the mutex to allow other waiters to get in line and
837 * modify the condition state.
838 */
839 if (timeout_elapsed) {
840 res = pthread_mutex_unlock(mutex);
841 if (res != 0) {
842 return res;
843 }
844 res = pthread_mutex_lock(mutex);
845 if (res != 0) {
846 return res;
847 }
848
849 return ETIMEDOUT;
850 }
851
852 if (ulock) {
853 return _pthread_ulock_cond_wait(cond, mutex, &then, conforming);
854 } else {
855 return _pthread_psynch_cond_wait(cond, mutex, &then, conforming);
856 }
857 }
858
859 static void
860 _pthread_ulock_cond_cleanup(void *arg)
861 {
862 struct pthread_ulock_cond_cancel_ctx_s *ctx = arg;
863 pthread_ulock_cond_state_u *state = _pthread_ulock_cond_state(ctx->cond);
864
865 (void)_pthread_ulock_cond_wait_complete(state, ctx->mutex, 0);
866
867 // "A thread that has been unblocked because it has been canceled while
868 // blocked in a call to pthread_cond_timedwait() or pthread_cond_wait()
869 // shall not consume any condition signal that may be directed concurrently
870 // at the condition variable if there are other threads blocked on the
871 // condition variable."
872 //
873 // Since we have no way to know if we've eaten somebody else's signal, just
874 // signal again pessimistically.
875 pthread_cond_signal(ctx->cond);
876 }
877
878 static void
879 _pthread_psynch_cond_cleanup(void *arg)
880 {
881 pthread_cond_t *cond = (pthread_cond_t *)arg;
882 pthread_t thread = pthread_self();
883 pthread_mutex_t *mutex;
884
885 // 4597450: begin
886 if (!thread->canceled) {
887 return;
888 }
889 // 4597450: end
890
891 mutex = cond->busy;
892
893 // add unlock ref to show one less waiter
894 _pthread_cond_updateval(cond, mutex, thread->cancel_error, 0);
895
896 /*
897 ** Can't do anything if this fails -- we're on the way out
898 */
899 if (mutex != NULL) {
900 (void)pthread_mutex_lock(mutex);
901 }
902 }
903
904 static void
905 _pthread_cond_updateval(pthread_cond_t *cond, pthread_mutex_t *mutex,
906 int error, uint32_t updateval)
907 {
908 int needclearpre;
909
910 uint32_t diffgen, nsval;
911 uint64_t oldval64, newval64;
912 uint32_t lcntval, ucntval, scntval;
913 volatile uint64_t *c_lsseqaddr;
914 volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;
915
916 if (error != 0) {
917 updateval = PTHRW_INC;
918 if (error & ECVCLEARED) {
919 updateval |= PTH_RWS_CV_CBIT;
920 }
921 if (error & ECVPREPOST) {
922 updateval |= PTH_RWS_CV_PBIT;
923 }
924 }
925
926 COND_GETSEQ_ADDR(cond, &c_lsseqaddr, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
927
928 do {
929 lcntval = *c_lseqcnt;
930 ucntval = *c_useqcnt;
931 scntval = *c_sseqcnt;
932 nsval = 0;
933 needclearpre = 0;
934
935 diffgen = diff_genseq(lcntval, scntval); // pending waiters
936
937 oldval64 = (((uint64_t)scntval) << 32);
938 oldval64 |= lcntval;
939
940 PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_START, cond, oldval64,
941 updateval, 0);
942
943 if (diffgen <= 0 && !is_rws_pbit_set(updateval)) {
944 /* TBD: Assert, should not be the case */
945 /* validate it is spurious and return */
946 newval64 = oldval64;
947 } else {
948 // update S by one
949
950 // update scntval with number of expected returns and bits
951 nsval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
952 // set bits
953 nsval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL));
954
955 // if L==S and c&p bits are set, needs clearpre
956 if (((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) &&
957 ((nsval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) {
958 // reset p bit but retain c bit on the sword
959 nsval &= PTH_RWS_CV_RESET_PBIT;
960 needclearpre = 1;
961 }
962
963 newval64 = (((uint64_t)nsval) << 32);
964 newval64 |= lcntval;
965 }
966 } while (!os_atomic_cmpxchg(c_lsseqaddr, oldval64, newval64, seq_cst));
967
968 PTHREAD_TRACE(psynch_cvar_updateval | DBG_FUNC_END, cond, newval64,
969 (uint64_t)diffgen << 32 | needclearpre, 0);
970
971 if (diffgen > 0) {
972 // if L == S, then reset associated mutex
973 if ((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) {
974 cond->busy = NULL;
975 }
976 }
977
978 if (needclearpre) {
979 uint32_t flags = 0;
980 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
981 flags |= _PTHREAD_MTX_OPT_PSHARED;
982 }
983 (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags);
984 }
985 }
986
987 #endif /* !BUILDING_VARIANT ] */
988
989 PTHREAD_NOEXPORT_VARIANT
990 int
991 pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
992 {
993 _pthread_lock_init(&cond->lock);
994 return _pthread_cond_init(cond, attr, _PTHREAD_COND_SIG_pristine);
995 }
996