]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
23 | /* | |
24 | * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 | |
25 | * All Rights Reserved | |
26 | * | |
27 | * Permission to use, copy, modify, and distribute this software and | |
28 | * its documentation for any purpose and without fee is hereby granted, | |
29 | * provided that the above copyright notice appears in all copies and | |
30 | * that both the copyright notice and this permission notice appear in | |
31 | * supporting documentation. | |
32 | * | |
33 | * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE | |
34 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE. | |
36 | * | |
37 | * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR | |
38 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | |
39 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, | |
40 | * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION | |
41 | * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
42 | */ | |
43 | /* | |
44 | * MkLinux | |
45 | */ | |
46 | ||
47 | /* | |
48 | * POSIX Pthread Library | |
49 | */ | |
50 | ||
51 | #include "internal.h" | |
52 | #include <sys/time.h> /* For struct timespec and getclock(). */ | |
53 | #include <stdio.h> | |
54 | ||
55 | #ifdef PLOCKSTAT | |
56 | #include "plockstat.h" | |
57 | #else /* !PLOCKSTAT */ | |
58 | #define PLOCKSTAT_MUTEX_RELEASE(x, y) | |
59 | #endif /* PLOCKSTAT */ | |
60 | ||
61 | __private_extern__ int _pthread_cond_init(_pthread_cond *, const pthread_condattr_t *, int); | |
62 | __private_extern__ int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming); | |
63 | ||
64 | extern int __gettimeofday(struct timeval *, struct timezone *); | |
65 | ||
66 | #ifndef BUILDING_VARIANT | |
67 | static void _pthread_cond_cleanup(void *arg); | |
68 | static void _pthread_cond_updateval(_pthread_cond * cond, int error, uint32_t updateval); | |
69 | #endif | |
70 | ||
71 | static void | |
72 | COND_GETSEQ_ADDR(_pthread_cond *cond, | |
73 | volatile uint32_t **c_lseqcnt, | |
74 | volatile uint32_t **c_useqcnt, | |
75 | volatile uint32_t **c_sseqcnt) | |
76 | { | |
77 | if (cond->misalign) { | |
78 | *c_lseqcnt = &cond->c_seq[1]; | |
79 | *c_sseqcnt = &cond->c_seq[2]; | |
80 | *c_useqcnt = &cond->c_seq[0]; | |
81 | } else { | |
82 | *c_lseqcnt = &cond->c_seq[0]; | |
83 | *c_sseqcnt = &cond->c_seq[1]; | |
84 | *c_useqcnt = &cond->c_seq[2]; | |
85 | } | |
86 | } | |
87 | ||
88 | #ifndef BUILDING_VARIANT /* [ */ | |
89 | ||
90 | int | |
91 | pthread_condattr_init(pthread_condattr_t *attr) | |
92 | { | |
93 | attr->sig = _PTHREAD_COND_ATTR_SIG; | |
94 | attr->pshared = _PTHREAD_DEFAULT_PSHARED; | |
95 | return 0; | |
96 | } | |
97 | ||
98 | int | |
99 | pthread_condattr_destroy(pthread_condattr_t *attr) | |
100 | { | |
101 | attr->sig = _PTHREAD_NO_SIG; | |
102 | return 0; | |
103 | } | |
104 | ||
105 | int | |
106 | pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared) | |
107 | { | |
108 | int res = EINVAL; | |
109 | if (attr->sig == _PTHREAD_COND_ATTR_SIG) { | |
110 | *pshared = (int)attr->pshared; | |
111 | res = 0; | |
112 | } | |
113 | return res; | |
114 | } | |
115 | ||
116 | int | |
117 | pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared) | |
118 | { | |
119 | int res = EINVAL; | |
120 | if (attr->sig == _PTHREAD_COND_ATTR_SIG) { | |
121 | #if __DARWIN_UNIX03 | |
122 | if (pshared == PTHREAD_PROCESS_PRIVATE || pshared == PTHREAD_PROCESS_SHARED) | |
123 | #else /* __DARWIN_UNIX03 */ | |
124 | if (pshared == PTHREAD_PROCESS_PRIVATE) | |
125 | #endif /* __DARWIN_UNIX03 */ | |
126 | { | |
127 | attr->pshared = pshared; | |
128 | res = 0; | |
129 | } | |
130 | } | |
131 | return res; | |
132 | } | |
133 | ||
134 | __private_extern__ int | |
135 | _pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conforming) | |
136 | { | |
137 | volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
138 | ||
139 | cond->busy = NULL; | |
140 | cond->c_seq[0] = 0; | |
141 | cond->c_seq[1] = 0; | |
142 | cond->c_seq[2] = 0; | |
143 | cond->unused = 0; | |
144 | ||
145 | cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0; | |
146 | COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt); | |
147 | *c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c | |
148 | ||
149 | if (conforming) { | |
150 | if (attr) { | |
151 | cond->pshared = attr->pshared; | |
152 | } else { | |
153 | cond->pshared = _PTHREAD_DEFAULT_PSHARED; | |
154 | } | |
155 | } else { | |
156 | cond->pshared = _PTHREAD_DEFAULT_PSHARED; | |
157 | } | |
158 | ||
159 | // Ensure all contents are properly set before setting signature. | |
160 | OSMemoryBarrier(); | |
161 | cond->sig = _PTHREAD_COND_SIG; | |
162 | ||
163 | return 0; | |
164 | } | |
165 | ||
166 | static int | |
167 | _pthread_cond_check_init(_pthread_cond *cond, bool *inited) | |
168 | { | |
169 | int res = 0; | |
170 | if (cond->sig != _PTHREAD_COND_SIG) { | |
171 | res = EINVAL; | |
172 | if (cond->sig == _PTHREAD_COND_SIG_init) { | |
173 | LOCK(cond->lock); | |
174 | if (cond->sig == _PTHREAD_COND_SIG_init) { | |
175 | res = _pthread_cond_init(cond, NULL, 0); | |
176 | if (inited) { | |
177 | *inited = true; | |
178 | } | |
179 | } else if (cond->sig == _PTHREAD_COND_SIG) { | |
180 | res = 0; | |
181 | } | |
182 | UNLOCK(cond->lock); | |
183 | } | |
184 | } | |
185 | return res; | |
186 | } | |
187 | ||
188 | int | |
189 | pthread_cond_destroy(pthread_cond_t *ocond) | |
190 | { | |
191 | _pthread_cond *cond = (_pthread_cond *)ocond; | |
192 | int res = EINVAL; | |
193 | if (cond->sig == _PTHREAD_COND_SIG) { | |
194 | LOCK(cond->lock); | |
195 | ||
196 | uint64_t oldval64, newval64; | |
197 | uint32_t lcntval, ucntval, scntval; | |
198 | volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
199 | ||
200 | COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt); | |
201 | ||
202 | do { | |
203 | lcntval = *c_lseqcnt; | |
204 | ucntval = *c_useqcnt; | |
205 | scntval = *c_sseqcnt; | |
206 | ||
207 | // validate it is not busy | |
208 | if ((lcntval & PTHRW_COUNT_MASK) != (scntval & PTHRW_COUNT_MASK)) { | |
209 | //res = EBUSY; | |
210 | break; | |
211 | } | |
212 | oldval64 = (((uint64_t)scntval) << 32); | |
213 | oldval64 |= lcntval; | |
214 | newval64 = oldval64; | |
215 | } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE); | |
216 | ||
217 | // <rdar://problem/13782056> Need to clear preposts. | |
218 | uint32_t flags = 0; | |
219 | bool needclearpre = ((scntval & PTH_RWS_CV_PBIT) != 0); | |
220 | if (needclearpre && cond->pshared == PTHREAD_PROCESS_SHARED) { | |
221 | flags |= _PTHREAD_MTX_OPT_PSHARED; | |
222 | } | |
223 | ||
224 | cond->sig = _PTHREAD_NO_SIG; | |
225 | res = 0; | |
226 | ||
227 | UNLOCK(cond->lock); | |
228 | ||
229 | if (needclearpre) { | |
230 | (void)__psynch_cvclrprepost(cond, lcntval, ucntval, scntval, 0, lcntval, flags); | |
231 | } | |
232 | } else if (cond->sig == _PTHREAD_COND_SIG_init) { | |
233 | // Compatibility for misbehaving applications that attempt to | |
234 | // destroy a statically initialized condition variable. | |
235 | cond->sig = _PTHREAD_NO_SIG; | |
236 | res = 0; | |
237 | } | |
238 | return res; | |
239 | } | |
240 | ||
241 | static int | |
242 | _pthread_cond_signal(pthread_cond_t *ocond, bool broadcast, mach_port_t thread) | |
243 | { | |
244 | int res; | |
245 | _pthread_cond *cond = (_pthread_cond *)ocond; | |
246 | ||
247 | uint32_t updateval; | |
248 | uint32_t diffgen; | |
249 | uint32_t ulval; | |
250 | ||
251 | uint64_t oldval64, newval64; | |
252 | uint32_t lcntval, ucntval, scntval; | |
253 | volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
254 | ||
255 | int retry_count = 0, uretry_count = 0; | |
256 | int ucountreset = 0; | |
257 | ||
258 | bool inited = false; | |
259 | res = _pthread_cond_check_init(cond, &inited); | |
260 | if (res != 0 || inited == true) { | |
261 | return res; | |
262 | } | |
263 | ||
264 | COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt); | |
265 | ||
266 | bool retry; | |
267 | do { | |
268 | retry = false; | |
269 | ||
270 | lcntval = *c_lseqcnt; | |
271 | ucntval = *c_useqcnt; | |
272 | scntval = *c_sseqcnt; | |
273 | ||
274 | if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) || | |
275 | (thread == MACH_PORT_NULL && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) { | |
276 | /* validate it is spurious and return */ | |
277 | oldval64 = (((uint64_t)scntval) << 32); | |
278 | oldval64 |= lcntval; | |
279 | newval64 = oldval64; | |
280 | ||
281 | if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) { | |
282 | retry = true; | |
283 | continue; | |
284 | } else { | |
285 | return 0; | |
286 | } | |
287 | } | |
288 | ||
289 | if (thread) { | |
290 | break; | |
291 | } | |
292 | ||
293 | /* validate to eliminate spurious values, race snapshots */ | |
294 | if (is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) { | |
295 | /* since ucntval may be newer, just redo */ | |
296 | retry_count++; | |
297 | if (retry_count > 8192) { | |
298 | return EAGAIN; | |
299 | } else { | |
300 | sched_yield(); | |
301 | retry = true; | |
302 | continue; | |
303 | } | |
304 | } else if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) { | |
305 | /* since ucntval may be newer, just redo */ | |
306 | uretry_count++; | |
307 | if (uretry_count > 8192) { | |
308 | /* | |
309 | * U value if not used for a while can go out of sync | |
310 | * set this to S value and try one more time. | |
311 | */ | |
312 | if (ucountreset != 0) { | |
313 | return EAGAIN; | |
314 | } else if (OSAtomicCompareAndSwap32Barrier(ucntval, (scntval & PTHRW_COUNT_MASK), (volatile int32_t *)c_useqcnt) == TRUE) { | |
315 | /* now the U is reset to S value */ | |
316 | ucountreset = 1; | |
317 | uretry_count = 0; | |
318 | } | |
319 | } | |
320 | sched_yield(); | |
321 | retry = true; | |
322 | continue; | |
323 | } | |
324 | ||
325 | if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) { | |
326 | /* If U < S, set U = S+diff due to intr's TO, etc */ | |
327 | ulval = (scntval & PTHRW_COUNT_MASK); | |
328 | } else { | |
329 | /* If U >= S, set U = U+diff due to intr's TO, etc */ | |
330 | ulval = (ucntval & PTHRW_COUNT_MASK); | |
331 | } | |
332 | ||
333 | if (broadcast) { | |
334 | diffgen = diff_genseq(lcntval, ulval); | |
335 | // Set U = L | |
336 | ulval = (lcntval & PTHRW_COUNT_MASK); | |
337 | } else { | |
338 | ulval += PTHRW_INC; | |
339 | } | |
340 | ||
341 | } while (retry || OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE); | |
342 | ||
343 | uint32_t flags = 0; | |
344 | if (cond->pshared == PTHREAD_PROCESS_SHARED) { | |
345 | flags |= _PTHREAD_MTX_OPT_PSHARED; | |
346 | } | |
347 | ||
348 | uint64_t cvlsgen = ((uint64_t)scntval << 32) | lcntval; | |
349 | ||
350 | if (broadcast) { | |
351 | // pass old U val so kernel will know the diffgen | |
352 | uint64_t cvudgen = ((uint64_t)ucntval << 32) | diffgen; | |
353 | updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, NULL, 0, 0); | |
354 | } else { | |
355 | updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, thread, NULL, 0, 0, flags); | |
356 | } | |
357 | ||
358 | if (updateval != (uint32_t)-1 && updateval != 0) { | |
359 | _pthread_cond_updateval(cond, 0, updateval); | |
360 | } | |
361 | ||
362 | return 0; | |
363 | } | |
364 | ||
365 | ||
366 | /* | |
367 | * Signal a condition variable, waking up all threads waiting for it. | |
368 | */ | |
369 | int | |
370 | pthread_cond_broadcast(pthread_cond_t *ocond) | |
371 | { | |
372 | return _pthread_cond_signal(ocond, true, MACH_PORT_NULL); | |
373 | } | |
374 | ||
375 | /* | |
376 | * Signal a condition variable, waking a specified thread. | |
377 | */ | |
378 | int | |
379 | pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread) | |
380 | { | |
381 | mach_port_t mp = MACH_PORT_NULL; | |
382 | if (thread) { | |
383 | mp = pthread_mach_thread_np(thread); | |
384 | } | |
385 | return _pthread_cond_signal(ocond, false, mp); | |
386 | } | |
387 | ||
388 | /* | |
389 | * Signal a condition variable, waking only one thread. | |
390 | */ | |
391 | int | |
392 | pthread_cond_signal(pthread_cond_t *cond) | |
393 | { | |
394 | return pthread_cond_signal_thread_np(cond, NULL); | |
395 | } | |
396 | ||
397 | /* | |
398 | * Manage a list of condition variables associated with a mutex | |
399 | */ | |
400 | ||
401 | ||
402 | /* | |
403 | * Suspend waiting for a condition variable. | |
404 | * Note: we have to keep a list of condition variables which are using | |
405 | * this same mutex variable so we can detect invalid 'destroy' sequences. | |
406 | * If isconforming < 0, we skip the _pthread_testcancel(), but keep the | |
407 | * remaining conforming behavior.. | |
408 | */ | |
409 | __private_extern__ int | |
410 | _pthread_cond_wait(pthread_cond_t *ocond, | |
411 | pthread_mutex_t *omutex, | |
412 | const struct timespec *abstime, | |
413 | int isRelative, | |
414 | int isconforming) | |
415 | { | |
416 | int res; | |
417 | _pthread_cond *cond = (_pthread_cond *)ocond; | |
418 | _pthread_mutex *mutex = (_pthread_mutex *)omutex; | |
419 | struct timespec then = { 0, 0 }; | |
420 | uint32_t mtxgen, mtxugen, flags=0, updateval; | |
421 | uint32_t lcntval, ucntval, scntval; | |
422 | uint32_t nlval, ulval, savebits; | |
423 | volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
424 | uint64_t oldval64, newval64, mugen, cvlsgen; | |
425 | uint32_t *npmtx = NULL; | |
426 | ||
427 | extern void _pthread_testcancel(pthread_t thread, int isconforming); | |
428 | ||
429 | res = _pthread_cond_check_init(cond, NULL); | |
430 | if (res != 0) { | |
431 | return res; | |
432 | } | |
433 | ||
434 | if (isconforming) { | |
435 | if (mutex->sig != _PTHREAD_MUTEX_SIG && (mutex->sig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP) { | |
436 | return EINVAL; | |
437 | } | |
438 | if (isconforming > 0) { | |
439 | _pthread_testcancel(pthread_self(), 1); | |
440 | } | |
441 | } | |
442 | ||
443 | /* send relative time to kernel */ | |
444 | if (abstime) { | |
445 | if (isRelative == 0) { | |
446 | struct timespec now; | |
447 | struct timeval tv; | |
448 | __gettimeofday(&tv, NULL); | |
449 | TIMEVAL_TO_TIMESPEC(&tv, &now); | |
450 | ||
451 | /* Compute relative time to sleep */ | |
452 | then.tv_nsec = abstime->tv_nsec - now.tv_nsec; | |
453 | then.tv_sec = abstime->tv_sec - now.tv_sec; | |
454 | if (then.tv_nsec < 0) { | |
455 | then.tv_nsec += NSEC_PER_SEC; | |
456 | then.tv_sec--; | |
457 | } | |
458 | if (then.tv_sec < 0 || (then.tv_sec == 0 && then.tv_nsec == 0)) { | |
459 | return ETIMEDOUT; | |
460 | } | |
461 | if (isconforming && | |
462 | (abstime->tv_sec < 0 || | |
463 | abstime->tv_nsec < 0 || | |
464 | abstime->tv_nsec >= NSEC_PER_SEC)) { | |
465 | return EINVAL; | |
466 | } | |
467 | } else { | |
468 | then.tv_sec = abstime->tv_sec; | |
469 | then.tv_nsec = abstime->tv_nsec; | |
470 | if ((then.tv_sec == 0) && (then.tv_nsec == 0)) { | |
471 | return ETIMEDOUT; | |
472 | } | |
473 | } | |
474 | if (isconforming && (then.tv_sec < 0 || then.tv_nsec < 0)) { | |
475 | return EINVAL; | |
476 | } | |
477 | if (then.tv_nsec >= NSEC_PER_SEC) { | |
478 | return EINVAL; | |
479 | } | |
480 | } | |
481 | ||
482 | if (cond->busy != NULL && cond->busy != mutex) { | |
483 | return EINVAL; | |
484 | } | |
485 | ||
486 | COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt); | |
487 | ||
488 | do { | |
489 | lcntval = *c_lseqcnt; | |
490 | ucntval = *c_useqcnt; | |
491 | scntval = *c_sseqcnt; | |
492 | ||
493 | oldval64 = (((uint64_t)scntval) << 32); | |
494 | oldval64 |= lcntval; | |
495 | ||
496 | /* remove c and p bits on S word */ | |
497 | savebits = scntval & PTH_RWS_CV_BITSALL; | |
498 | ulval = (scntval & PTHRW_COUNT_MASK); | |
499 | nlval = lcntval + PTHRW_INC; | |
500 | newval64 = (((uint64_t)ulval) << 32); | |
501 | newval64 |= nlval; | |
502 | } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE); | |
503 | ||
504 | cond->busy = mutex; | |
505 | ||
506 | res = __mtx_droplock(mutex, &flags, &npmtx, &mtxgen, &mtxugen); | |
507 | ||
508 | /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/ | |
509 | if (res != 0) { | |
510 | return EINVAL; | |
511 | } | |
512 | if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) { | |
513 | npmtx = NULL; | |
514 | mugen = 0; | |
515 | } else { | |
516 | mugen = ((uint64_t)mtxugen << 32) | mtxgen; | |
517 | } | |
518 | flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */ | |
519 | ||
520 | cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval; | |
521 | ||
522 | // SUSv3 requires pthread_cond_wait to be a cancellation point | |
523 | if (isconforming) { | |
524 | pthread_cleanup_push(_pthread_cond_cleanup, (void *)cond); | |
525 | updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec); | |
526 | _pthread_testcancel(pthread_self(), isconforming); | |
527 | pthread_cleanup_pop(0); | |
528 | } else { | |
529 | updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec); | |
530 | } | |
531 | ||
532 | if (updateval == (uint32_t)-1) { | |
533 | int err = errno; | |
534 | switch (err & 0xff) { | |
535 | case ETIMEDOUT: | |
536 | res = ETIMEDOUT; | |
537 | break; | |
538 | case EINTR: | |
539 | // spurious wakeup (unless canceled) | |
540 | res = 0; | |
541 | break; | |
542 | default: | |
543 | res = EINVAL; | |
544 | break; | |
545 | } | |
546 | ||
547 | // add unlock ref to show one less waiter | |
548 | _pthread_cond_updateval(cond, err, 0); | |
549 | } else if (updateval != 0) { | |
550 | // Successful wait | |
551 | // The return due to prepost and might have bit states | |
552 | // update S and return for prepo if needed | |
553 | _pthread_cond_updateval(cond, 0, updateval); | |
554 | } | |
555 | ||
556 | pthread_mutex_lock(omutex); | |
557 | ||
558 | return res; | |
559 | } | |
560 | ||
561 | static void | |
562 | _pthread_cond_cleanup(void *arg) | |
563 | { | |
564 | _pthread_cond *cond = (_pthread_cond *)arg; | |
565 | pthread_mutex_t *mutex; | |
566 | ||
567 | // 4597450: begin | |
568 | pthread_t thread = pthread_self(); | |
569 | int thcanceled = 0; | |
570 | ||
571 | LOCK(thread->lock); | |
572 | thcanceled = (thread->detached & _PTHREAD_WASCANCEL); | |
573 | UNLOCK(thread->lock); | |
574 | ||
575 | if (thcanceled == 0) { | |
576 | return; | |
577 | } | |
578 | ||
579 | // 4597450: end | |
580 | mutex = (pthread_mutex_t *)cond->busy; | |
581 | ||
582 | // add unlock ref to show one less waiter | |
583 | _pthread_cond_updateval(cond, thread->cancel_error, 0); | |
584 | ||
585 | /* | |
586 | ** Can't do anything if this fails -- we're on the way out | |
587 | */ | |
588 | if (mutex != NULL) { | |
589 | (void)pthread_mutex_lock(mutex); | |
590 | } | |
591 | } | |
592 | ||
593 | #define ECVCERORR 256 | |
594 | #define ECVPERORR 512 | |
595 | ||
596 | static void | |
597 | _pthread_cond_updateval(_pthread_cond *cond, int error, uint32_t updateval) | |
598 | { | |
599 | int needclearpre; | |
600 | ||
601 | uint32_t diffgen, nsval; | |
602 | uint64_t oldval64, newval64; | |
603 | uint32_t lcntval, ucntval, scntval; | |
604 | volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
605 | ||
606 | if (error != 0) { | |
607 | updateval = PTHRW_INC; | |
608 | if ((error & ECVCERORR) != 0) { | |
609 | updateval |= PTH_RWS_CV_CBIT; | |
610 | } | |
611 | if ((error & ECVPERORR) != 0) { | |
612 | updateval |= PTH_RWS_CV_PBIT; | |
613 | } | |
614 | } | |
615 | ||
616 | COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt); | |
617 | ||
618 | do { | |
619 | lcntval = *c_lseqcnt; | |
620 | ucntval = *c_useqcnt; | |
621 | scntval = *c_sseqcnt; | |
622 | ||
623 | diffgen = diff_genseq(lcntval, scntval); // pending waiters | |
624 | ||
625 | oldval64 = (((uint64_t)scntval) << 32); | |
626 | oldval64 |= lcntval; | |
627 | ||
628 | if (diffgen <= 0) { | |
629 | /* TBD: Assert, should not be the case */ | |
630 | /* validate it is spurious and return */ | |
631 | newval64 = oldval64; | |
632 | } else { | |
633 | // update S by one | |
634 | ||
635 | // update scntval with number of expected returns and bits | |
636 | nsval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK); | |
637 | // set bits | |
638 | nsval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL)); | |
639 | ||
640 | // if L==S and c&p bits are set, needs clearpre | |
641 | if (((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) && | |
642 | ((nsval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) { | |
643 | // reset p bit but retain c bit on the sword | |
644 | nsval &= PTH_RWS_CV_RESET_PBIT; | |
645 | needclearpre = 1; | |
646 | } else { | |
647 | needclearpre = 0; | |
648 | } | |
649 | ||
650 | newval64 = (((uint64_t)nsval) << 32); | |
651 | newval64 |= lcntval; | |
652 | } | |
653 | } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE); | |
654 | ||
655 | if (diffgen > 0) { | |
656 | // if L == S, then reset associated mutex | |
657 | if ((nsval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) { | |
658 | cond->busy = NULL; | |
659 | } | |
660 | ||
661 | if (needclearpre != 0) { | |
662 | uint32_t flags = 0; | |
663 | if (cond->pshared == PTHREAD_PROCESS_SHARED) { | |
664 | flags |= _PTHREAD_MTX_OPT_PSHARED; | |
665 | } | |
666 | (void)__psynch_cvclrprepost(cond, lcntval, ucntval, nsval, 0, lcntval, flags); | |
667 | } | |
668 | } | |
669 | } | |
670 | ||
671 | ||
672 | int | |
673 | pthread_cond_timedwait_relative_np(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) | |
674 | { | |
675 | return _pthread_cond_wait(cond, mutex, abstime, 1, 0); | |
676 | } | |
677 | ||
678 | #endif /* !BUILDING_VARIANT ] */ | |
679 | ||
680 | int | |
681 | pthread_cond_init(pthread_cond_t *ocond, const pthread_condattr_t *attr) | |
682 | { | |
683 | int conforming; | |
684 | ||
685 | #if __DARWIN_UNIX03 | |
686 | conforming = 1; | |
687 | #else /* __DARWIN_UNIX03 */ | |
688 | conforming = 0; | |
689 | #endif /* __DARWIN_UNIX03 */ | |
690 | ||
691 | _pthread_cond *cond = (_pthread_cond *)ocond; | |
692 | LOCK_INIT(cond->lock); | |
693 | return _pthread_cond_init(cond, attr, conforming); | |
694 | } |