]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread_cond.c
Libc-594.9.1.tar.gz
[apple/libc.git] / pthreads / pthread_cond.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
25 * All Rights Reserved
26 *
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
32 *
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
36 *
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
42 */
43 /*
44 * MkLinux
45 */
46
47 /*
48 * POSIX Pthread Library
49 */
50
51 #include "pthread_internals.h"
52 #include <sys/time.h> /* For struct timespec and getclock(). */
53 #include <stdio.h>
54
55 #ifdef PLOCKSTAT
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
59 #endif /* PLOCKSTAT */
60
61
62 extern int __semwait_signal(int, int, int, int, int64_t, int32_t);
63 extern int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int);
64 extern int __unix_conforming;
65
66 #ifdef PR_5243343
67 /* 5243343 - temporary hack to detect if we are running the conformance test */
68 extern int PR_5243343_flag;
69 #endif /* PR_5243343 */
70
71 #if defined(__i386__) || defined(__x86_64__)
72 __private_extern__ int __new_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming);
73 extern int _new_pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int);
74 extern int _new_pthread_cond_destroy(pthread_cond_t *);
75 extern int _new_pthread_cond_destroy_locked(pthread_cond_t *);
76 int _new_pthread_cond_broadcast(pthread_cond_t *cond);
77 int _new_pthread_cond_signal_thread_np(pthread_cond_t *cond, pthread_t thread);
78 int _new_pthread_cond_signal(pthread_cond_t *cond);
79 int _new_pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime);
80 int _new_pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex);
81 int _new_pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime);
82 static void _new_cond_cleanup(void *arg);
83 static void _new_cond_dropwait(npthread_cond_t * cond);
84
85
86 #if defined(__LP64__)
87 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt) \
88 { \
89 if (cond->misalign != 0) { \
90 c_lseqcnt = &cond->c_seq[1]; \
91 c_useqcnt = &cond->c_seq[2]; \
92 } else { \
93 /* aligned */ \
94 c_lseqcnt = &cond->c_seq[0]; \
95 c_useqcnt = &cond->c_seq[1]; \
96 } \
97 }
98 #else /* __LP64__ */
99 #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt) \
100 { \
101 if (cond->misalign != 0) { \
102 c_lseqcnt = &cond->c_seq[1]; \
103 c_useqcnt = &cond->c_seq[2]; \
104 } else { \
105 /* aligned */ \
106 c_lseqcnt = &cond->c_seq[0]; \
107 c_useqcnt = &cond->c_seq[1]; \
108 } \
109 }
110 #endif /* __LP64__ */
111
112
113 #define _KSYN_TRACE_ 0
114
115 #if _KSYN_TRACE_
116 /* The Function qualifiers */
117 #define DBG_FUNC_START 1
118 #define DBG_FUNC_END 2
119 #define DBG_FUNC_NONE 0
120
121 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
122
123 #define _KSYN_TRACE_UM_LOCK 0x9000060
124 #define _KSYN_TRACE_UM_UNLOCK 0x9000064
125 #define _KSYN_TRACE_UM_MHOLD 0x9000068
126 #define _KSYN_TRACE_UM_MDROP 0x900006c
127 #define _KSYN_TRACE_UM_CVWAIT 0x9000070
128 #define _KSYN_TRACE_UM_CVSIG 0x9000074
129 #define _KSYN_TRACE_UM_CVBRD 0x9000078
130
131 #endif /* _KSYN_TRACE_ */
132 #endif /* __i386__ || __x86_64__ */
133
134
135 #ifndef BUILDING_VARIANT /* [ */
136
137 /*
138 * Destroy a condition variable.
139 */
140 int
141 pthread_cond_destroy(pthread_cond_t *cond)
142 {
143 int ret;
144 int sig = cond->sig;
145
146 /* to provide backwards compat for apps using united condtn vars */
147 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
148 return(EINVAL);
149
150 LOCK(cond->lock);
151 if (cond->sig == _PTHREAD_COND_SIG)
152 {
153 #if defined(__i386__) || defined(__x86_64__)
154 if (cond->pshared == PTHREAD_PROCESS_SHARED) {
155 ret = _new_pthread_cond_destroy_locked(cond);
156 UNLOCK(cond->lock);
157 return(ret);
158 }
159 #endif /* __i386__ || __x86_64__ */
160 if (cond->busy == (pthread_mutex_t *)NULL)
161 {
162 cond->sig = _PTHREAD_NO_SIG;
163 ret = 0;
164 } else
165 ret = EBUSY;
166 } else
167 ret = EINVAL; /* Not an initialized condition variable structure */
168 UNLOCK(cond->lock);
169 return (ret);
170 }
171
172
173 /*
174 * Signal a condition variable, waking up all threads waiting for it.
175 */
176 int
177 pthread_cond_broadcast(pthread_cond_t *cond)
178 {
179 kern_return_t kern_res;
180 semaphore_t sem;
181 int sig = cond->sig;
182
183 /* to provide backwards compat for apps using united condtn vars */
184 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
185 return(EINVAL);
186
187 LOCK(cond->lock);
188 if (cond->sig != _PTHREAD_COND_SIG)
189 {
190 int res;
191
192 if (cond->sig == _PTHREAD_COND_SIG_init)
193 {
194 _pthread_cond_init(cond, NULL, 0);
195 res = 0;
196 } else
197 res = EINVAL; /* Not a condition variable */
198 UNLOCK(cond->lock);
199 return (res);
200 }
201 #if defined(__i386__) || defined(__x86_64__)
202 else if (cond->pshared == PTHREAD_PROCESS_SHARED) {
203 UNLOCK(cond->lock);
204 return(_new_pthread_cond_broadcast(cond));
205 }
206 #endif /* __i386__ || __x86_64__ */
207 else if ((sem = cond->sem) == SEMAPHORE_NULL)
208 {
209 /* Avoid kernel call since there are no waiters... */
210 UNLOCK(cond->lock);
211 return (0);
212 }
213 cond->sigspending++;
214 UNLOCK(cond->lock);
215
216 PTHREAD_MACH_CALL(semaphore_signal_all(sem), kern_res);
217
218 LOCK(cond->lock);
219 cond->sigspending--;
220 if (cond->waiters == 0 && cond->sigspending == 0)
221 {
222 cond->sem = SEMAPHORE_NULL;
223 restore_sem_to_pool(sem);
224 }
225 UNLOCK(cond->lock);
226 if (kern_res != KERN_SUCCESS)
227 return (EINVAL);
228 return (0);
229 }
230
231 /*
232 * Signal a condition variable, waking a specified thread.
233 */
234 int
235 pthread_cond_signal_thread_np(pthread_cond_t *cond, pthread_t thread)
236 {
237 kern_return_t kern_res;
238 semaphore_t sem;
239 int sig = cond->sig;
240
241 /* to provide backwards compat for apps using united condtn vars */
242
243 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
244 return(EINVAL);
245 LOCK(cond->lock);
246 if (cond->sig != _PTHREAD_COND_SIG)
247 {
248 int ret;
249
250 if (cond->sig == _PTHREAD_COND_SIG_init)
251 {
252 _pthread_cond_init(cond, NULL, 0);
253 ret = 0;
254 } else
255 ret = EINVAL; /* Not a condition variable */
256 UNLOCK(cond->lock);
257 return (ret);
258 }
259 #if defined(__i386__) || defined(__x86_64__)
260 else if (cond->pshared == PTHREAD_PROCESS_SHARED) {
261 UNLOCK(cond->lock);
262 return(_new_pthread_cond_signal_thread_np(cond, thread));
263 }
264 #endif /* __i386__ || __x86_64__ */
265 else if ((sem = cond->sem) == SEMAPHORE_NULL)
266 {
267 /* Avoid kernel call since there are not enough waiters... */
268 UNLOCK(cond->lock);
269 return (0);
270 }
271 cond->sigspending++;
272 UNLOCK(cond->lock);
273
274 if (thread == (pthread_t)NULL)
275 {
276 kern_res = semaphore_signal_thread(sem, THREAD_NULL);
277 if (kern_res == KERN_NOT_WAITING)
278 kern_res = KERN_SUCCESS;
279 }
280 else if (thread->sig == _PTHREAD_SIG)
281 {
282 PTHREAD_MACH_CALL(semaphore_signal_thread(
283 sem, pthread_mach_thread_np(thread)), kern_res);
284 }
285 else
286 kern_res = KERN_FAILURE;
287
288 LOCK(cond->lock);
289 cond->sigspending--;
290 if (cond->waiters == 0 && cond->sigspending == 0)
291 {
292 cond->sem = SEMAPHORE_NULL;
293 restore_sem_to_pool(sem);
294 }
295 UNLOCK(cond->lock);
296 if (kern_res != KERN_SUCCESS)
297 return (EINVAL);
298 return (0);
299 }
300
301 /*
302 * Signal a condition variable, waking only one thread.
303 */
304 int
305 pthread_cond_signal(pthread_cond_t *cond)
306 {
307 return pthread_cond_signal_thread_np(cond, NULL);
308 }
309
310 /*
311 * Manage a list of condition variables associated with a mutex
312 */
313
314 static void
315 _pthread_cond_add(pthread_cond_t *cond, pthread_mutex_t *mutex)
316 {
317 pthread_cond_t *c;
318 LOCK(mutex->lock);
319 if ((c = mutex->busy) != (pthread_cond_t *)NULL)
320 {
321 c->prev = cond;
322 }
323 cond->next = c;
324 cond->prev = (pthread_cond_t *)NULL;
325 mutex->busy = cond;
326 UNLOCK(mutex->lock);
327 if (cond->sem == SEMAPHORE_NULL)
328 cond->sem = new_sem_from_pool();
329 }
330
331 static void
332 _pthread_cond_remove(pthread_cond_t *cond, pthread_mutex_t *mutex)
333 {
334 pthread_cond_t *n, *p;
335
336 LOCK(mutex->lock);
337 if ((n = cond->next) != (pthread_cond_t *)NULL)
338 {
339 n->prev = cond->prev;
340 }
341 if ((p = cond->prev) != (pthread_cond_t *)NULL)
342 {
343 p->next = cond->next;
344 }
345 else
346 { /* This is the first in the list */
347 mutex->busy = n;
348 }
349 UNLOCK(mutex->lock);
350 if (cond->sigspending == 0)
351 {
352 restore_sem_to_pool(cond->sem);
353 cond->sem = SEMAPHORE_NULL;
354 }
355 }
356
357 static void
358 cond_cleanup(void *arg)
359 {
360 pthread_cond_t *cond = (pthread_cond_t *)arg;
361 pthread_mutex_t *mutex;
362 // 4597450: begin
363 pthread_t thread = pthread_self();
364 int thcanceled = 0;
365
366 LOCK(thread->lock);
367 thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
368 UNLOCK(thread->lock);
369
370 if (thcanceled == 0)
371 return;
372
373 // 4597450: end
374 LOCK(cond->lock);
375 mutex = cond->busy;
376 cond->waiters--;
377 if (cond->waiters == 0) {
378 _pthread_cond_remove(cond, mutex);
379 cond->busy = (pthread_mutex_t *)NULL;
380 }
381 UNLOCK(cond->lock);
382
383 /*
384 ** Can't do anything if this fails -- we're on the way out
385 */
386 (void)pthread_mutex_lock(mutex);
387 }
388
389 /*
390 * Suspend waiting for a condition variable.
391 * Note: we have to keep a list of condition variables which are using
392 * this same mutex variable so we can detect invalid 'destroy' sequences.
393 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
394 * remaining conforming behavior..
395 */
396 __private_extern__ int
397 _pthread_cond_wait(pthread_cond_t *cond,
398 pthread_mutex_t *mutex,
399 const struct timespec *abstime,
400 int isRelative,
401 int isconforming)
402 {
403 int res;
404 kern_return_t kern_res = KERN_SUCCESS;
405 int wait_res = 0;
406 pthread_mutex_t *busy;
407 mach_timespec_t then = {0, 0};
408 struct timespec cthen = {0,0};
409 int sig = cond->sig;
410 int msig = mutex->sig;
411 extern void _pthread_testcancel(pthread_t thread, int isconforming);
412
413 /* to provide backwards compat for apps using united condtn vars */
414 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
415 return(EINVAL);
416
417 if (isconforming) {
418 if((msig != _PTHREAD_MUTEX_SIG) && (msig != _PTHREAD_MUTEX_SIG_init))
419 return(EINVAL);
420 if (isconforming > 0)
421 _pthread_testcancel(pthread_self(), 1);
422 }
423 LOCK(cond->lock);
424 if (cond->sig != _PTHREAD_COND_SIG)
425 {
426 if (cond->sig != _PTHREAD_COND_SIG_init)
427 {
428 UNLOCK(cond->lock);
429 return (EINVAL); /* Not a condition variable */
430 }
431 _pthread_cond_init(cond, NULL, 0);
432 }
433 #if defined(__i386__) || defined(__x86_64__)
434 else if (cond->pshared == PTHREAD_PROCESS_SHARED) {
435 UNLOCK(cond->lock);
436 return(__new_pthread_cond_wait(cond, mutex, abstime, isRelative, isconforming));
437 }
438 #endif /* __i386__ || __x86_64__ */
439
440 if (abstime) {
441 if (!isconforming)
442 {
443 if (isRelative == 0) {
444 struct timespec now;
445 struct timeval tv;
446 gettimeofday(&tv, NULL);
447 TIMEVAL_TO_TIMESPEC(&tv, &now);
448
449 /* Compute relative time to sleep */
450 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
451 then.tv_sec = abstime->tv_sec - now.tv_sec;
452 if (then.tv_nsec < 0)
453 {
454 then.tv_nsec += NSEC_PER_SEC;
455 then.tv_sec--;
456 }
457 if (((int)then.tv_sec < 0) ||
458 ((then.tv_sec == 0) && (then.tv_nsec == 0)))
459 {
460 UNLOCK(cond->lock);
461 return ETIMEDOUT;
462 }
463 } else {
464 then.tv_sec = abstime->tv_sec;
465 then.tv_nsec = abstime->tv_nsec;
466 }
467 if (then.tv_nsec >= NSEC_PER_SEC) {
468 UNLOCK(cond->lock);
469 return EINVAL;
470 }
471 } else {
472 if (isRelative == 0) {
473 /* preflight the checks for failures */
474 struct timespec now;
475 struct timeval tv;
476 gettimeofday(&tv, NULL);
477 TIMEVAL_TO_TIMESPEC(&tv, &now);
478
479 /* Compute relative time to sleep */
480 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
481 then.tv_sec = abstime->tv_sec - now.tv_sec;
482 if (then.tv_nsec < 0)
483 {
484 then.tv_nsec += NSEC_PER_SEC;
485 then.tv_sec--;
486 }
487 if (((int)then.tv_sec < 0) ||
488 ((then.tv_sec == 0) && (then.tv_nsec == 0)))
489 {
490 UNLOCK(cond->lock);
491 return ETIMEDOUT;
492 }
493 if (then.tv_nsec >= NSEC_PER_SEC) {
494 UNLOCK(cond->lock);
495 return EINVAL;
496 }
497 }
498 /* we can cleanup this code and pass the calculated time
499 * to the kernel. But kernel is going to do the same. TILL
500 * we change the kernel do this anyway
501 */
502 cthen.tv_sec = abstime->tv_sec;
503 cthen.tv_nsec = abstime->tv_nsec;
504 if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
505 UNLOCK(cond->lock);
506 return EINVAL;
507 }
508 if (cthen.tv_nsec >= NSEC_PER_SEC) {
509 UNLOCK(cond->lock);
510 return EINVAL;
511 }
512 }
513 }
514
515 if (++cond->waiters == 1)
516 {
517 _pthread_cond_add(cond, mutex);
518 cond->busy = mutex;
519 }
520 else if ((busy = cond->busy) != mutex)
521 {
522 /* Must always specify the same mutex! */
523 cond->waiters--;
524 UNLOCK(cond->lock);
525 return (EINVAL);
526 }
527 UNLOCK(cond->lock);
528
529 LOCK(mutex->lock);
530 if (--mutex->mtxopts.options.lock_count == 0)
531 {
532 PLOCKSTAT_MUTEX_RELEASE(mutex, (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)? 1:0);
533
534 if (mutex->sem == SEMAPHORE_NULL)
535 mutex->sem = new_sem_from_pool();
536 mutex->owner = _PTHREAD_MUTEX_OWNER_SWITCHING;
537 UNLOCK(mutex->lock);
538
539 if (!isconforming) {
540 if (abstime) {
541 kern_res = semaphore_timedwait_signal(cond->sem, mutex->sem, then);
542 } else {
543 PTHREAD_MACH_CALL(semaphore_wait_signal(cond->sem, mutex->sem), kern_res);
544 }
545 } else {
546 pthread_cleanup_push(cond_cleanup, (void *)cond);
547 wait_res = __semwait_signal(cond->sem, mutex->sem, abstime != NULL, isRelative,
548 (int64_t)cthen.tv_sec, (int32_t)cthen.tv_nsec);
549 pthread_cleanup_pop(0);
550 }
551 } else {
552 PLOCKSTAT_MUTEX_RELEASE(mutex, (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)? 1:0);
553 UNLOCK(mutex->lock);
554 if (!isconforming) {
555 if (abstime) {
556 kern_res = semaphore_timedwait(cond->sem, then);
557 } else {
558 PTHREAD_MACH_CALL(semaphore_wait(cond->sem), kern_res);
559 }
560 } else {
561 pthread_cleanup_push(cond_cleanup, (void *)cond);
562 wait_res = __semwait_signal(cond->sem, 0, abstime != NULL, isRelative,
563 (int64_t)cthen.tv_sec, (int32_t)cthen.tv_nsec);
564 pthread_cleanup_pop(0);
565 }
566
567 }
568
569 LOCK(cond->lock);
570 cond->waiters--;
571 if (cond->waiters == 0)
572 {
573 _pthread_cond_remove(cond, mutex);
574 cond->busy = (pthread_mutex_t *)NULL;
575 }
576 UNLOCK(cond->lock);
577 if ((res = pthread_mutex_lock(mutex)) != 0)
578 return (res);
579
580 if (!isconforming) {
581 /* KERN_ABORTED can be treated as a spurious wakeup */
582 if ((kern_res == KERN_SUCCESS) || (kern_res == KERN_ABORTED))
583 return (0);
584 else if (kern_res == KERN_OPERATION_TIMED_OUT)
585 return (ETIMEDOUT);
586 return (EINVAL);
587 } else {
588 if (wait_res < 0) {
589 if (errno == ETIMEDOUT) {
590 return ETIMEDOUT;
591 } else if (errno == EINTR) {
592 /*
593 ** EINTR can be treated as a spurious wakeup unless we were canceled.
594 */
595 return 0;
596 }
597 return EINVAL;
598 }
599 return 0;
600 }
601 }
602
603
604 int
605 pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
606 pthread_mutex_t *mutex,
607 const struct timespec *abstime)
608 {
609 return (_pthread_cond_wait(cond, mutex, abstime, 1, 0));
610 }
611
612 int
613 pthread_condattr_init(pthread_condattr_t *attr)
614 {
615 attr->sig = _PTHREAD_COND_ATTR_SIG;
616 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
617 return (0);
618 }
619
620 int
621 pthread_condattr_destroy(pthread_condattr_t *attr)
622 {
623 attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
624 return (0);
625 }
626
627 int
628 pthread_condattr_getpshared(const pthread_condattr_t *attr,
629 int *pshared)
630 {
631 if (attr->sig == _PTHREAD_COND_ATTR_SIG)
632 {
633 *pshared = (int)attr->pshared;
634 return (0);
635 } else
636 {
637 return (EINVAL); /* Not an initialized 'attribute' structure */
638 }
639 }
640
641
642 __private_extern__ int
643 _pthread_cond_init(pthread_cond_t *cond,
644 const pthread_condattr_t *attr,
645 int conforming)
646 {
647 cond->next = (pthread_cond_t *)NULL;
648 cond->prev = (pthread_cond_t *)NULL;
649 cond->busy = (pthread_mutex_t *)NULL;
650 cond->waiters = 0;
651 cond->sigspending = 0;
652 if (conforming) {
653 if (attr)
654 cond->pshared = attr->pshared;
655 else
656 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
657 } else
658 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
659 cond->sem = SEMAPHORE_NULL;
660 cond->sig = _PTHREAD_COND_SIG;
661 return (0);
662 }
663
664
665 /* temp home till pshared is fixed correctly */
666 int
667 pthread_condattr_setpshared(pthread_condattr_t * attr, int pshared)
668 {
669
670 if (attr->sig == _PTHREAD_COND_ATTR_SIG)
671 {
672 #if __DARWIN_UNIX03
673 #ifdef PR_5243343
674 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED && PR_5243343_flag))
675 #else /* !PR_5243343 */
676 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
677 #endif /* PR_5243343 */
678 #else /* __DARWIN_UNIX03 */
679 if ( pshared == PTHREAD_PROCESS_PRIVATE)
680 #endif /* __DARWIN_UNIX03 */
681 {
682 attr->pshared = pshared;
683 return (0);
684 } else
685 {
686 return (EINVAL); /* Invalid parameter */
687 }
688 } else
689 {
690 return (EINVAL); /* Not an initialized 'attribute' structure */
691 }
692
693 }
694
695 #if defined(__i386__) || defined(__x86_64__)
696
697 __private_extern__ int
698 _new_pthread_cond_init(pthread_cond_t *ocond,
699 const pthread_condattr_t *attr,
700 int conforming)
701 {
702 npthread_cond_t * cond = (npthread_cond_t *)ocond;
703
704 cond->busy = (npthread_mutex_t *)NULL;
705 cond->c_seq[0] = 0;
706 cond->c_seq[1] = 0;
707 cond->c_seq[2] = 0;
708
709 cond->rfu = 0;
710 if (((uintptr_t)cond & 0x07) != 0) {
711 cond->misalign = 1;
712 } else {
713 cond->misalign = 0;
714 }
715 if (conforming) {
716 if (attr)
717 cond->pshared = attr->pshared;
718 else
719 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
720 } else
721 cond->pshared = _PTHREAD_DEFAULT_PSHARED;
722 cond->sig = _PTHREAD_COND_SIG;
723 return (0);
724 }
725
726 int
727 _new_pthread_cond_destroy(pthread_cond_t * ocond)
728 {
729 npthread_cond_t *cond = (npthread_cond_t *)ocond;
730 int ret;
731
732 LOCK(cond->lock);
733 ret = _new_pthread_cond_destroy_locked(ocond);
734 UNLOCK(cond->lock);
735
736 return(ret);
737 }
738
739 int
740 _new_pthread_cond_destroy_locked(pthread_cond_t * ocond)
741 {
742 npthread_cond_t *cond = (npthread_cond_t *)ocond;
743 int ret;
744 int sig = cond->sig;
745 uint32_t * c_lseqcnt;
746 uint32_t * c_useqcnt;
747 uint32_t lgenval , ugenval;
748
749 /* to provide backwards compat for apps using united condtn vars */
750 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
751 return(EINVAL);
752
753 if (cond->sig == _PTHREAD_COND_SIG)
754 {
755 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
756 retry:
757 lgenval = *c_lseqcnt;
758 ugenval = *c_useqcnt;
759 if (lgenval == ugenval)
760 {
761 cond->sig = _PTHREAD_NO_SIG;
762 ret = 0;
763 } else
764 ret = EBUSY;
765 } else
766 ret = EINVAL; /* Not an initialized condition variable structure */
767 return (ret);
768 }
769
770 /*
771 * Signal a condition variable, waking up all threads waiting for it.
772 */
773 int
774 _new_pthread_cond_broadcast(pthread_cond_t *ocond)
775 {
776 npthread_cond_t * cond = (npthread_cond_t *)ocond;
777 int sig = cond->sig;
778 npthread_mutex_t * mutex;
779 uint32_t lgenval, ugenval, mgen, ugen, flags, mtxgen, mtxugen, notify;
780 int diffgen, retval, dropcount, mutexrefs;
781 uint64_t oldval64, newval64;
782 uint32_t * c_lseqcnt;
783 uint32_t * c_useqcnt;
784 uint32_t * pmtx = NULL;
785
786
787 /* to provide backwards compat for apps using united condtn vars */
788 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
789 return(EINVAL);
790
791 if (sig != _PTHREAD_COND_SIG)
792 {
793 int res;
794
795 LOCK(cond->lock);
796 if (cond->sig == _PTHREAD_COND_SIG_init)
797 {
798 _new_pthread_cond_init(ocond, NULL, 0);
799 res = 0;
800 } else if (cond->sig != _PTHREAD_COND_SIG) {
801 res = EINVAL; /* Not a condition variable */
802 UNLOCK(cond->lock);
803 return (res);
804 }
805 UNLOCK(cond->lock);
806 }
807
808 #if _KSYN_TRACE_
809 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
810 #endif
811
812 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
813 retry:
814 lgenval = *c_lseqcnt;
815 ugenval = *c_useqcnt;
816 diffgen = lgenval - ugenval; /* pendig waiters */
817
818 if (diffgen <= 0) {
819 return(0);
820 #if _KSYN_TRACE_
821 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
822 #endif
823 }
824
825 mutex = cond->busy;
826
827 if (OSAtomicCompareAndSwap32(ugenval, ugenval+diffgen, (volatile int *)c_useqcnt) != TRUE)
828 goto retry;
829
830 #ifdef COND_MTX_WAITQUEUEMOVE
831
832 if ((mutex != NULL) && cond->pshared != PTHREAD_PROCESS_SHARED) {
833 #if _KSYN_TRACE_
834 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 1, diffgen, 0, 0);
835 #endif
836 (void)__mtx_holdlock(mutex, diffgen, &flags, &pmtx, &mgen, &ugen);
837 mutexrefs = 1;
838 } else {
839 if (cond->pshared != PTHREAD_PROCESS_SHARED)
840 flags = _PTHREAD_MTX_OPT_NOHOLD;
841 else
842 flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
843 mgen = ugen = 0;
844 mutexrefs = 0;
845 pmtx = NULL;
846 }
847 #else /* COND_MTX_WAITQUEUEMOVE */
848
849 if (cond->pshared != PTHREAD_PROCESS_SHARED)
850 flags = _PTHREAD_MTX_OPT_NOHOLD;
851 else
852 flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
853 pmtx = NULL;
854 mgen = ugen = 0;
855 mutexrefs = 0;
856 #endif /* COND_MTX_WAITQUEUEMOVE */
857
858 #if _KSYN_TRACE_
859 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 3, diffgen, 0, 0);
860 #endif
861 retval = __psynch_cvbroad(ocond, lgenval, diffgen, (pthread_mutex_t *)pmtx, mgen, ugen , (uint64_t)0, flags);
862
863 #ifdef COND_MTX_WAITQUEUEMOVE
864 if ((retval != -1) && (retval != 0)) {
865 if ((mutexrefs != 0) && (retval <= PTHRW_MAX_READERS/2)) {
866 dropcount = (retval);
867 #if _KSYN_TRACE_
868 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 2, dropcount, 0, 0);
869 #endif
870 retval = __mtx_droplock(mutex, dropcount, &flags, &pmtx, &mtxgen, &mtxugen, &notify);
871 }
872 }
873 #endif /* COND_MTX_WAITQUEUEMOVE */
874
875 oldval64 = (((uint64_t)(ugenval+diffgen)) << 32);
876 oldval64 |= lgenval;
877 newval64 = 0;
878
879 OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt);
880
881 #if _KSYN_TRACE_
882 (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
883 #endif
884 return(0);
885 }
886
887
888 /*
889 * Signal a condition variable, waking a specified thread.
890 */
891 int
892 _new_pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
893 {
894 npthread_cond_t * cond = (npthread_cond_t *)ocond;
895 int sig = cond->sig;
896 npthread_mutex_t * mutex;
897 int retval, dropcount;
898 uint32_t lgenval, ugenval, diffgen, mgen, ugen, flags, mtxgen, mtxugen, notify;
899 uint32_t * c_lseqcnt;
900 uint32_t * c_useqcnt;
901 uint64_t oldval64, newval64;
902 int mutexrefs;
903 uint32_t * pmtx = NULL;
904
905 /* to provide backwards compat for apps using united condtn vars */
906
907 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
908 return(EINVAL);
909 if (cond->sig != _PTHREAD_COND_SIG) {
910 LOCK(cond->lock);
911 if (cond->sig != _PTHREAD_COND_SIG) {
912 if (cond->sig == _PTHREAD_COND_SIG_init) {
913 _new_pthread_cond_init(ocond, NULL, 0);
914 } else {
915 UNLOCK(cond->lock);
916 return(EINVAL);
917 }
918 }
919 UNLOCK(cond->lock);
920 }
921
922 #if _KSYN_TRACE_
923 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0);
924 #endif
925 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
926 retry:
927 lgenval = *c_lseqcnt;
928 ugenval = *c_useqcnt;
929 diffgen = lgenval - ugenval; /* pendig waiters */
930 if (diffgen <= 0) {
931 #if _KSYN_TRACE_
932 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
933 #endif
934 return(0);
935 }
936
937 mutex = cond->busy;
938
939 if (OSAtomicCompareAndSwap32(ugenval, ugenval+1, (volatile int *)c_useqcnt) != TRUE)
940 goto retry;
941
942 #ifdef COND_MTX_WAITQUEUEMOVE
943 if ((mutex != NULL) && (cond->pshared != PTHREAD_PROCESS_SHARED)) {
944 #if _KSYN_TRACE_
945 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 1, 0, 0, 0);
946 #endif
947 (void)__mtx_holdlock(mutex, 1, &flags, &pmtx, &mgen, &ugen);
948 mutexrefs = 1;
949 } else {
950 if (cond->pshared != PTHREAD_PROCESS_SHARED)
951 flags = _PTHREAD_MTX_OPT_NOHOLD;
952 else
953 flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
954 mgen = ugen = 0;
955 mutexrefs = 0;
956 }
957 #else /* COND_MTX_WAITQUEUEMOVE */
958 if (cond->pshared != PTHREAD_PROCESS_SHARED)
959 flags = _PTHREAD_MTX_OPT_NOHOLD;
960 else
961 flags = _PTHREAD_MTX_OPT_NOHOLD | _PTHREAD_MTX_OPT_PSHARED;
962 mgen = ugen = 0;
963 mutexrefs = 0;
964
965 #endif /* COND_MTX_WAITQUEUEMOVE */
966
967 #if _KSYN_TRACE_
968 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 3, lgenval, ugenval+1, 0);
969 #endif
970 retval = __psynch_cvsignal(ocond, lgenval, ugenval+1,(pthread_mutex_t *)mutex, mgen, ugen, pthread_mach_thread_np(thread), flags);
971
972 #ifdef COND_MTX_WAITQUEUEMOVE
973 if ((retval != -1) && (retval != 0) && (mutexrefs != 0)) {
974 dropcount = retval;
975 #if _KSYN_TRACE_
976 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 4, dropcount, 0, 0);
977 #endif
978 retval = __mtx_droplock(mutex, dropcount, &flags, &pmtx, &mtxgen, &mtxugen, &notify);
979 }
980 #endif /* COND_MTX_WAITQUEUEMOVE */
981
982 if (lgenval == ugenval+1){
983 oldval64 = (((uint64_t)(ugenval+1)) << 32);
984 oldval64 |= lgenval;
985 newval64 = 0;
986 OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt);
987 #if _KSYN_TRACE_
988 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 5, 0, 0, 0);
989 #endif
990 }
991
992 #if _KSYN_TRACE_
993 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0);
994 #endif
995 return (0);
996 }
997
998 /*
999 * Signal a condition variable, waking only one thread.
1000 */
1001 int
1002 _new_pthread_cond_signal(pthread_cond_t *cond)
1003 {
1004 return _new_pthread_cond_signal_thread_np(cond, NULL);
1005 }
1006
1007 /*
1008 * Manage a list of condition variables associated with a mutex
1009 */
1010
1011
1012 /*
1013 * Suspend waiting for a condition variable.
1014 * Note: we have to keep a list of condition variables which are using
1015 * this same mutex variable so we can detect invalid 'destroy' sequences.
1016 * If isconforming < 0, we skip the _pthread_testcancel(), but keep the
1017 * remaining conforming behavior..
1018 */
1019 __private_extern__ int
1020 __new_pthread_cond_wait(pthread_cond_t *ocond,
1021 pthread_mutex_t *omutex,
1022 const struct timespec *abstime,
1023 int isRelative,
1024 int isconforming)
1025 {
1026 int retval;
1027 npthread_cond_t * cond = (npthread_cond_t *)ocond;
1028 npthread_mutex_t * mutex = (npthread_mutex_t * )omutex;
1029 mach_timespec_t then = {0,0};
1030 struct timespec cthen = {0,0};
1031 int sig = cond->sig;
1032 int msig = mutex->sig;
1033 int firstfit = 0;
1034 npthread_mutex_t * pmtx;
1035 uint32_t mtxgen, mtxugen, flags, updateval, notify;
1036 uint32_t lgenval, ugenval;
1037 uint32_t * c_lseqcnt;
1038 uint32_t * c_useqcnt;
1039 uint32_t * npmtx = NULL;
1040
1041 extern void _pthread_testcancel(pthread_t thread, int isconforming);
1042
1043 /* to provide backwards compat for apps using united condtn vars */
1044 if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init))
1045 return(EINVAL);
1046
1047 if (isconforming) {
1048 if((msig != _PTHREAD_MUTEX_SIG) && (msig != _PTHREAD_MUTEX_SIG_init))
1049 return(EINVAL);
1050 if (isconforming > 0)
1051 _pthread_testcancel(pthread_self(), 1);
1052 }
1053 if (cond->sig != _PTHREAD_COND_SIG)
1054 {
1055 LOCK(cond->lock);
1056 if (cond->sig != _PTHREAD_COND_SIG_init)
1057 {
1058 UNLOCK(cond->lock);
1059 return (EINVAL); /* Not a condition variable */
1060 }
1061 _new_pthread_cond_init(ocond, NULL, 0);
1062 UNLOCK(cond->lock);
1063 }
1064
1065 #if _KSYN_TRACE_
1066 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, (uint32_t)cond, 0, 0, (uint32_t)abstime, 0);
1067 #endif
1068 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
1069
1070 /* send relative time to kernel */
1071 if (abstime) {
1072 if (isRelative == 0) {
1073 struct timespec now;
1074 struct timeval tv;
1075 gettimeofday(&tv, NULL);
1076 TIMEVAL_TO_TIMESPEC(&tv, &now);
1077
1078 /* Compute relative time to sleep */
1079 then.tv_nsec = abstime->tv_nsec - now.tv_nsec;
1080 then.tv_sec = abstime->tv_sec - now.tv_sec;
1081 if (then.tv_nsec < 0)
1082 {
1083 then.tv_nsec += NSEC_PER_SEC;
1084 then.tv_sec--;
1085 }
1086 if (((int)then.tv_sec < 0) ||
1087 ((then.tv_sec == 0) && (then.tv_nsec == 0)))
1088 {
1089 UNLOCK(cond->lock);
1090 return ETIMEDOUT;
1091 }
1092 if (isconforming != 0) {
1093 cthen.tv_sec = abstime->tv_sec;
1094 cthen.tv_nsec = abstime->tv_nsec;
1095 if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) {
1096 UNLOCK(cond->lock);
1097 return EINVAL;
1098 }
1099 if (cthen.tv_nsec >= NSEC_PER_SEC) {
1100 UNLOCK(cond->lock);
1101 return EINVAL;
1102 }
1103 }
1104 } else {
1105 then.tv_sec = abstime->tv_sec;
1106 then.tv_nsec = abstime->tv_nsec;
1107 }
1108 if(isconforming && ((then.tv_sec < 0) || (then.tv_nsec < 0))) {
1109 return EINVAL;
1110 }
1111 if (then.tv_nsec >= NSEC_PER_SEC) {
1112 return EINVAL;
1113 }
1114 }
1115
1116 cond->busy = mutex;
1117 pmtx = mutex;
1118
1119 ugenval = *c_useqcnt;
1120 lgenval = OSAtomicIncrement32((volatile int32_t *)c_lseqcnt);
1121
1122
1123 #if _KSYN_TRACE_
1124 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 1, lgenval, ugenval, 0);
1125 #endif
1126 notify = 0;
1127 retval = __mtx_droplock(pmtx, 1, &flags, &npmtx, &mtxgen, &mtxugen, &notify);
1128 if (retval != 0)
1129 return(EINVAL);
1130 if ((notify & 1) == 0) {
1131 npmtx = NULL;
1132 }
1133 if ((notify & 0xc0000000) != 0)
1134 then.tv_nsec |= (notify & 0xc0000000);
1135
1136 #if _KSYN_TRACE_
1137 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 3, (uint32_t)mutex, 0, 0);
1138 #endif
1139
1140 if (isconforming) {
1141 pthread_cleanup_push(_new_cond_cleanup, (void *)cond);
1142 updateval = __psynch_cvwait(ocond, lgenval, ugenval, (pthread_mutex_t *)npmtx, mtxgen, mtxugen, (uint64_t)then.tv_sec, (uint64_t)then.tv_nsec);
1143 pthread_cleanup_pop(0);
1144 } else {
1145 updateval = __psynch_cvwait(ocond, lgenval, ugenval, (pthread_mutex_t *)npmtx, mtxgen, mtxugen, (uint64_t)then.tv_sec, (uint64_t)then.tv_nsec);
1146
1147 }
1148
1149 retval = 0;
1150
1151 #ifdef COND_MTX_WAITQUEUEMOVE
1152 /* Needs to handle timedout */
1153 if (updateval == (uint32_t)-1) {
1154 retval = errno;
1155 #if _KSYN_TRACE_
1156 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0);
1157 #endif
1158 /* add unlock ref to show one less waiter */
1159 _new_cond_dropwait(cond);
1160
1161 pthread_mutex_lock(omutex);
1162
1163 } else if ((updateval & PTHRW_MTX_NONE) != 0) {
1164 #if _KSYN_TRACE_
1165 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 5, updateval, 0, 0);
1166 #endif
1167 pthread_mutex_lock(omutex);
1168 } else {
1169 /* on successful return mutex held */
1170 /* returns 0 on succesful update */
1171 #if _KSYN_TRACE_
1172 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 6, updateval, 0, 0);
1173 #endif
1174 firstfit = (mutex->mtxopts.options.policy == _PTHREAD_MUTEX_POLICY_FIRSTFIT);
1175 if (__mtx_updatebits( mutex, updateval, firstfit, 1) == 1) {
1176 /* not expected to be here */
1177 LIBC_ABORT("CONDWAIT mutex acquire mishap");
1178 }
1179 if (mutex->mtxopts.options.type == PTHREAD_MUTEX_RECURSIVE)
1180 mutex->mtxopts.options.lock_count++;
1181 }
1182 #else /* COND_MTX_WAITQUEUEMOVE */
1183 if (updateval == (uint32_t)-1) {
1184 if (errno == ETIMEDOUT) {
1185 retval = ETIMEDOUT;
1186 } else if (errno == EINTR) {
1187 /*
1188 ** EINTR can be treated as a spurious wakeup unless we were canceled.
1189 */
1190 retval = 0;
1191 } else
1192 retval = EINVAL;
1193
1194 /* add unlock ref to show one less waiter */
1195 _new_cond_dropwait(cond);
1196 } else
1197 retval = 0;
1198 #if _KSYN_TRACE_
1199 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0);
1200 #endif
1201 pthread_mutex_lock(omutex);
1202
1203 #endif /* COND_MTX_WAITQUEUEMOVE */
1204
1205 #if _KSYN_TRACE_
1206 (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0, 0, retval, 0);
1207 #endif
1208 return(retval);
1209 }
1210
1211 static void
1212 _new_cond_cleanup(void *arg)
1213 {
1214 npthread_cond_t *cond = (npthread_cond_t *)arg;
1215 pthread_mutex_t *mutex;
1216
1217 // 4597450: begin
1218 pthread_t thread = pthread_self();
1219 int thcanceled = 0;
1220
1221 LOCK(thread->lock);
1222 thcanceled = (thread->detached & _PTHREAD_WASCANCEL);
1223 UNLOCK(thread->lock);
1224
1225 if (thcanceled == 0)
1226 return;
1227
1228 // 4597450: end
1229 mutex = cond->busy;
1230
1231 /* add unlock ref to show one less waiter */
1232 _new_cond_dropwait(cond);
1233
1234 /*
1235 ** Can't do anything if this fails -- we're on the way out
1236 */
1237 if (mutex != NULL)
1238 (void)pthread_mutex_lock(mutex);
1239
1240 }
1241
1242 void
1243 _new_cond_dropwait(npthread_cond_t * cond)
1244 {
1245 int sig = cond->sig;
1246 int retval;
1247 uint32_t lgenval, ugenval, diffgen, mgen, ugen, flags;
1248 uint32_t * c_lseqcnt;
1249 uint32_t * c_useqcnt;
1250 uint64_t oldval64, newval64;
1251
1252 /* to provide backwards compat for apps using united condtn vars */
1253
1254 if (sig != _PTHREAD_COND_SIG)
1255 return;
1256
1257 #if _KSYN_TRACE_
1258 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0xee, 0);
1259 #endif
1260 COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt);
1261 retry:
1262 lgenval = *c_lseqcnt;
1263 ugenval = *c_useqcnt;
1264 diffgen = lgenval - ugenval; /* pending waiters */
1265
1266 if (diffgen <= 0) {
1267 #if _KSYN_TRACE_
1268 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 1, 0, 0xee, 0);
1269 #endif
1270 return;
1271 }
1272
1273 if (OSAtomicCompareAndSwap32(ugenval, ugenval+1, (volatile int *)c_useqcnt) != TRUE)
1274 goto retry;
1275
1276 if (lgenval == ugenval+1) {
1277 /* last one */
1278 /* send last drop notify to erase pre post */
1279 flags = _PTHREAD_MTX_OPT_LASTDROP;
1280
1281 if (cond->pshared == PTHREAD_PROCESS_SHARED)
1282 flags |= _PTHREAD_MTX_OPT_PSHARED;
1283 mgen = ugen = 0;
1284
1285 #if _KSYN_TRACE_
1286 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 1, 0, 0xee, 0);
1287 #endif
1288 retval = __psynch_cvsignal((pthread_cond_t *)cond, lgenval, ugenval+1,(pthread_mutex_t *)NULL, mgen, ugen, MACH_PORT_NULL, flags);
1289
1290 oldval64 = (((uint64_t)(ugenval+1)) << 32);
1291 oldval64 |= lgenval;
1292 newval64 = 0;
1293 OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt);
1294 }
1295
1296 #if _KSYN_TRACE_
1297 (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 2, 0, 0xee, 0);
1298 #endif
1299 return;
1300 }
1301
1302
1303 int
1304 _new_pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1305 pthread_mutex_t *mutex,
1306 const struct timespec *abstime)
1307 {
1308 return (__new_pthread_cond_wait(cond, mutex, abstime, 1, 0));
1309 }
1310
1311
1312 int
1313 _new_pthread_cond_wait(pthread_cond_t *cond,
1314 pthread_mutex_t *mutex)
1315 {
1316 return(__new_pthread_cond_wait(cond, mutex, 0, 0, 1));
1317 }
1318
1319 int
1320 _new_pthread_cond_timedwait(pthread_cond_t *cond,
1321 pthread_mutex_t *mutex,
1322 const struct timespec *abstime)
1323 {
1324 return(__new_pthread_cond_wait(cond, mutex, abstime, 0, 1));
1325 }
1326
1327 #endif /* __i386__ || __x86_64__ */
1328
1329 #else /* !BUILDING_VARIANT */
1330
1331 extern int _pthread_cond_wait(pthread_cond_t *cond,
1332 pthread_mutex_t *mutex,
1333 const struct timespec *abstime,
1334 int isRelative,
1335 int isconforming);
1336
1337 #endif /* !BUILDING_VARIANT ] */
1338 /*
1339 * Initialize a condition variable. Note: 'attr' is ignored.
1340 */
1341
1342 /*
1343 * Initialize a condition variable. This is the public interface.
1344 * We can't trust the lock, so initialize it first before taking
1345 * it.
1346 */
1347 int
1348 pthread_cond_init(pthread_cond_t *cond,
1349 const pthread_condattr_t *attr)
1350 {
1351 int conforming;
1352
1353 #if __DARWIN_UNIX03
1354 conforming = 1;
1355 #else /* __DARWIN_UNIX03 */
1356 conforming = 0;
1357 #endif /* __DARWIN_UNIX03 */
1358
1359 LOCK_INIT(cond->lock);
1360 #if defined(__i386__) || defined(__x86_64__)
1361 if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
1362 return(_new_pthread_cond_init(cond, attr, conforming));
1363 }
1364 #endif /* __i386__ || __x86_64__ */
1365
1366 return (_pthread_cond_init(cond, attr, conforming));
1367 }
1368
1369 /*
1370 int
1371 pthread_cond_wait(pthread_cond_t *cond,
1372 pthread_mutex_t *mutex)
1373
1374 int
1375 pthread_cond_timedwait(pthread_cond_t *cond,
1376 pthread_mutex_t *mutex,
1377 const struct timespec *abstime)
1378
1379 moved to pthread_cancelable.c */