]>
Commit | Line | Data |
---|---|---|
9385eb3d | 1 | /* |
b5d655f7 | 2 | * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved. |
9385eb3d A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
9385eb3d A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. Please obtain a copy of the License at | |
10 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
11 | * file. | |
12 | * | |
13 | * The Original Code and all software distributed under the License are | |
14 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
15 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
16 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
18 | * Please see the License for the specific language governing rights and | |
19 | * limitations under the License. | |
20 | * | |
21 | * @APPLE_LICENSE_HEADER_END@ | |
22 | */ | |
e9ce8d39 A |
23 | /* |
24 | * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 | |
25 | * All Rights Reserved | |
26 | * | |
27 | * Permission to use, copy, modify, and distribute this software and | |
28 | * its documentation for any purpose and without fee is hereby granted, | |
29 | * provided that the above copyright notice appears in all copies and | |
30 | * that both the copyright notice and this permission notice appear in | |
31 | * supporting documentation. | |
32 | * | |
33 | * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE | |
34 | * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS | |
35 | * FOR A PARTICULAR PURPOSE. | |
36 | * | |
37 | * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR | |
38 | * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM | |
39 | * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, | |
40 | * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION | |
41 | * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | |
42 | */ | |
43 | /* | |
44 | * MkLinux | |
45 | */ | |
46 | ||
47 | /* | |
48 | * POSIX Pthread Library | |
49 | */ | |
50 | ||
51 | #include "pthread_internals.h" | |
52 | #include <sys/time.h> /* For struct timespec and getclock(). */ | |
53 | #include <stdio.h> | |
224c7076 | 54 | |
b5d655f7 | 55 | #ifdef PLOCKSTAT |
224c7076 | 56 | #include "plockstat.h" |
b5d655f7 A |
57 | #else /* !PLOCKSTAT */ |
58 | #define PLOCKSTAT_MUTEX_RELEASE(x, y) | |
59 | #endif /* PLOCKSTAT */ | |
60 | ||
34e8f829 A |
61 | extern int __semwait_signal(int, int, int, int, int64_t, int32_t); |
62 | extern int _pthread_cond_init(pthread_cond_t *, const pthread_condattr_t *, int); | |
3d9156a7 | 63 | extern int __unix_conforming; |
1f2f436a | 64 | extern int usenew_mtximpl; |
3d9156a7 | 65 | |
34e8f829 A |
66 | #ifdef PR_5243343 |
67 | /* 5243343 - temporary hack to detect if we are running the conformance test */ | |
68 | extern int PR_5243343_flag; | |
69 | #endif /* PR_5243343 */ | |
70 | ||
1f2f436a A |
71 | __private_extern__ int _pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime, int isRelative, int isconforming); |
72 | #ifndef BUILDING_VARIANT | |
73 | static void cond_cleanup(void *arg); | |
74 | static void cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval); | |
75 | static void __pthread_cond_set_signature(npthread_cond_t * cond); | |
76 | static int _pthread_cond_destroy_locked(pthread_cond_t *cond); | |
77 | #endif | |
34e8f829 A |
78 | |
79 | #if defined(__LP64__) | |
1f2f436a | 80 | #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \ |
34e8f829 A |
81 | { \ |
82 | if (cond->misalign != 0) { \ | |
83 | c_lseqcnt = &cond->c_seq[1]; \ | |
1f2f436a A |
84 | c_sseqcnt = &cond->c_seq[2]; \ |
85 | c_useqcnt = &cond->c_seq[0]; \ | |
34e8f829 A |
86 | } else { \ |
87 | /* aligned */ \ | |
88 | c_lseqcnt = &cond->c_seq[0]; \ | |
1f2f436a A |
89 | c_sseqcnt = &cond->c_seq[1]; \ |
90 | c_useqcnt = &cond->c_seq[2]; \ | |
34e8f829 A |
91 | } \ |
92 | } | |
93 | #else /* __LP64__ */ | |
1f2f436a | 94 | #define COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt) \ |
34e8f829 A |
95 | { \ |
96 | if (cond->misalign != 0) { \ | |
97 | c_lseqcnt = &cond->c_seq[1]; \ | |
1f2f436a A |
98 | c_sseqcnt = &cond->c_seq[2]; \ |
99 | c_useqcnt = &cond->c_seq[0]; \ | |
34e8f829 A |
100 | } else { \ |
101 | /* aligned */ \ | |
102 | c_lseqcnt = &cond->c_seq[0]; \ | |
1f2f436a A |
103 | c_sseqcnt = &cond->c_seq[1]; \ |
104 | c_useqcnt = &cond->c_seq[2]; \ | |
34e8f829 A |
105 | } \ |
106 | } | |
107 | #endif /* __LP64__ */ | |
108 | ||
109 | ||
110 | #define _KSYN_TRACE_ 0 | |
111 | ||
112 | #if _KSYN_TRACE_ | |
113 | /* The Function qualifiers */ | |
114 | #define DBG_FUNC_START 1 | |
115 | #define DBG_FUNC_END 2 | |
116 | #define DBG_FUNC_NONE 0 | |
117 | ||
118 | int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t); | |
119 | ||
120 | #define _KSYN_TRACE_UM_LOCK 0x9000060 | |
121 | #define _KSYN_TRACE_UM_UNLOCK 0x9000064 | |
122 | #define _KSYN_TRACE_UM_MHOLD 0x9000068 | |
123 | #define _KSYN_TRACE_UM_MDROP 0x900006c | |
124 | #define _KSYN_TRACE_UM_CVWAIT 0x9000070 | |
125 | #define _KSYN_TRACE_UM_CVSIG 0x9000074 | |
126 | #define _KSYN_TRACE_UM_CVBRD 0x9000078 | |
1f2f436a A |
127 | #define _KSYN_TRACE_UM_CDROPWT 0x90000a0 |
128 | #define _KSYN_TRACE_UM_CVCLRPRE 0x90000a4 | |
34e8f829 A |
129 | |
130 | #endif /* _KSYN_TRACE_ */ | |
34e8f829 A |
131 | |
132 | ||
3d9156a7 | 133 | #ifndef BUILDING_VARIANT /* [ */ |
9385eb3d | 134 | |
5b2abdfb A |
135 | int |
136 | pthread_condattr_init(pthread_condattr_t *attr) | |
137 | { | |
138 | attr->sig = _PTHREAD_COND_ATTR_SIG; | |
224c7076 A |
139 | attr->pshared = _PTHREAD_DEFAULT_PSHARED; |
140 | return (0); | |
5b2abdfb A |
141 | } |
142 | ||
143 | int | |
144 | pthread_condattr_destroy(pthread_condattr_t *attr) | |
145 | { | |
146 | attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */ | |
224c7076 | 147 | return (0); |
5b2abdfb A |
148 | } |
149 | ||
150 | int | |
151 | pthread_condattr_getpshared(const pthread_condattr_t *attr, | |
152 | int *pshared) | |
153 | { | |
154 | if (attr->sig == _PTHREAD_COND_ATTR_SIG) | |
155 | { | |
224c7076 A |
156 | *pshared = (int)attr->pshared; |
157 | return (0); | |
5b2abdfb A |
158 | } else |
159 | { | |
160 | return (EINVAL); /* Not an initialized 'attribute' structure */ | |
161 | } | |
162 | } | |
163 | ||
164 | ||
224c7076 A |
165 | |
166 | ||
167 | /* temp home till pshared is fixed correctly */ | |
5b2abdfb A |
168 | int |
169 | pthread_condattr_setpshared(pthread_condattr_t * attr, int pshared) | |
170 | { | |
224c7076 | 171 | |
5b2abdfb A |
172 | if (attr->sig == _PTHREAD_COND_ATTR_SIG) |
173 | { | |
224c7076 | 174 | #if __DARWIN_UNIX03 |
224c7076 | 175 | if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED)) |
224c7076 | 176 | #else /* __DARWIN_UNIX03 */ |
5b2abdfb | 177 | if ( pshared == PTHREAD_PROCESS_PRIVATE) |
224c7076 | 178 | #endif /* __DARWIN_UNIX03 */ |
5b2abdfb | 179 | { |
1f2f436a | 180 | attr->pshared = pshared; |
224c7076 | 181 | return (0); |
1f2f436a | 182 | } else { |
5b2abdfb A |
183 | return (EINVAL); /* Invalid parameter */ |
184 | } | |
185 | } else | |
186 | { | |
187 | return (EINVAL); /* Not an initialized 'attribute' structure */ | |
188 | } | |
189 | ||
190 | } | |
191 | ||
34e8f829 | 192 | __private_extern__ int |
1f2f436a | 193 | _pthread_cond_init(pthread_cond_t *ocond, |
34e8f829 A |
194 | const pthread_condattr_t *attr, |
195 | int conforming) | |
196 | { | |
197 | npthread_cond_t * cond = (npthread_cond_t *)ocond; | |
198 | ||
199 | cond->busy = (npthread_mutex_t *)NULL; | |
200 | cond->c_seq[0] = 0; | |
201 | cond->c_seq[1] = 0; | |
202 | cond->c_seq[2] = 0; | |
34e8f829 | 203 | cond->rfu = 0; |
1f2f436a | 204 | |
34e8f829 A |
205 | if (((uintptr_t)cond & 0x07) != 0) { |
206 | cond->misalign = 1; | |
1f2f436a | 207 | cond->c_seq[2] = PTH_RWS_CV_CBIT; |
34e8f829 A |
208 | } else { |
209 | cond->misalign = 0; | |
1f2f436a | 210 | cond->c_seq[1] = PTH_RWS_CV_CBIT; /* set Sword to 0c */ |
34e8f829 A |
211 | } |
212 | if (conforming) { | |
213 | if (attr) | |
214 | cond->pshared = attr->pshared; | |
215 | else | |
216 | cond->pshared = _PTHREAD_DEFAULT_PSHARED; | |
217 | } else | |
218 | cond->pshared = _PTHREAD_DEFAULT_PSHARED; | |
1f2f436a A |
219 | /* |
220 | * For the new style mutex, interlocks are not held all the time. | |
221 | * We needed the signature to be set in the end. And we need | |
222 | * to protect against the code getting reorganized by compiler. | |
223 | * cond->sig = _PTHREAD_COND_SIG; | |
224 | */ | |
225 | __pthread_cond_set_signature(cond); | |
34e8f829 A |
226 | return (0); |
227 | } | |
228 | ||
229 | int | |
1f2f436a | 230 | pthread_cond_destroy(pthread_cond_t * ocond) |
34e8f829 A |
231 | { |
232 | npthread_cond_t *cond = (npthread_cond_t *)ocond; | |
233 | int ret; | |
234 | ||
1f2f436a A |
235 | /* to provide backwards compat for apps using united condtn vars */ |
236 | if((cond->sig != _PTHREAD_COND_SIG) && (cond->sig != _PTHREAD_COND_SIG_init)) | |
237 | return(EINVAL); | |
238 | ||
34e8f829 | 239 | LOCK(cond->lock); |
1f2f436a | 240 | ret = _pthread_cond_destroy_locked(ocond); |
34e8f829 A |
241 | UNLOCK(cond->lock); |
242 | ||
243 | return(ret); | |
244 | } | |
245 | ||
1f2f436a A |
246 | static int |
247 | _pthread_cond_destroy_locked(pthread_cond_t * ocond) | |
34e8f829 A |
248 | { |
249 | npthread_cond_t *cond = (npthread_cond_t *)ocond; | |
250 | int ret; | |
1f2f436a A |
251 | volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt; |
252 | uint32_t lcntval , ucntval, scntval; | |
253 | uint64_t oldval64, newval64; | |
34e8f829 | 254 | |
1f2f436a | 255 | retry: |
34e8f829 A |
256 | if (cond->sig == _PTHREAD_COND_SIG) |
257 | { | |
1f2f436a A |
258 | COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt); |
259 | lcntval = *c_lseqcnt; | |
260 | ucntval = *c_useqcnt; | |
261 | scntval = *c_sseqcnt; | |
262 | ||
263 | if ((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) { | |
264 | /* validate it is not busy */ | |
265 | oldval64 = (((uint64_t)scntval) << 32); | |
266 | oldval64 |= lcntval; | |
267 | newval64 = oldval64; | |
268 | ||
269 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) | |
270 | goto retry; | |
34e8f829 A |
271 | cond->sig = _PTHREAD_NO_SIG; |
272 | ret = 0; | |
273 | } else | |
274 | ret = EBUSY; | |
1f2f436a A |
275 | } else if (cond->sig == _PTHREAD_COND_SIG_init) { |
276 | cond->sig = _PTHREAD_NO_SIG; | |
277 | ret = 0; | |
34e8f829 A |
278 | } else |
279 | ret = EINVAL; /* Not an initialized condition variable structure */ | |
280 | return (ret); | |
281 | } | |
282 | ||
283 | /* | |
284 | * Signal a condition variable, waking up all threads waiting for it. | |
285 | */ | |
286 | int | |
1f2f436a | 287 | pthread_cond_broadcast(pthread_cond_t *ocond) |
34e8f829 A |
288 | { |
289 | npthread_cond_t * cond = (npthread_cond_t *)ocond; | |
290 | int sig = cond->sig; | |
1f2f436a A |
291 | uint32_t flags, updateval; |
292 | uint32_t lcntval , ucntval, scntval; | |
293 | uint64_t oldval64, newval64, mugen, cvlsgen, cvudgen, mtid=0; | |
294 | int diffgen, error = 0; | |
295 | volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
34e8f829 | 296 | uint32_t * pmtx = NULL; |
1f2f436a A |
297 | uint32_t nlval, ulval; |
298 | int needclearpre = 0, retry_count = 0; | |
34e8f829 A |
299 | |
300 | /* to provide backwards compat for apps using united condtn vars */ | |
301 | if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init)) | |
302 | return(EINVAL); | |
303 | ||
304 | if (sig != _PTHREAD_COND_SIG) | |
305 | { | |
34e8f829 A |
306 | LOCK(cond->lock); |
307 | if (cond->sig == _PTHREAD_COND_SIG_init) | |
308 | { | |
1f2f436a A |
309 | _pthread_cond_init(ocond, NULL, 0); |
310 | /* just inited nothing to post */ | |
311 | UNLOCK(cond->lock); | |
312 | return (0); | |
34e8f829 | 313 | } else if (cond->sig != _PTHREAD_COND_SIG) { |
1f2f436a | 314 | /* Not a condition variable */ |
34e8f829 | 315 | UNLOCK(cond->lock); |
1f2f436a | 316 | return (EINVAL); |
34e8f829 A |
317 | } |
318 | UNLOCK(cond->lock); | |
319 | } | |
320 | ||
321 | #if _KSYN_TRACE_ | |
322 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0); | |
323 | #endif | |
324 | ||
1f2f436a | 325 | COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt); |
34e8f829 | 326 | retry: |
1f2f436a A |
327 | lcntval = *c_lseqcnt; |
328 | ucntval = *c_useqcnt; | |
329 | scntval = *c_sseqcnt; | |
330 | ||
331 | #if _KSYN_TRACE_ | |
332 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0); | |
333 | #endif | |
34e8f829 | 334 | |
1f2f436a A |
335 | if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) || |
336 | ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))) { | |
337 | /* validate it is spurious and return */ | |
338 | oldval64 = (((uint64_t)scntval) << 32); | |
339 | oldval64 |= lcntval; | |
340 | newval64 = oldval64; | |
341 | ||
342 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) | |
343 | goto retry; | |
34e8f829 | 344 | #if _KSYN_TRACE_ |
1f2f436a A |
345 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0); |
346 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0); | |
34e8f829 | 347 | #endif |
1f2f436a | 348 | return(0); |
34e8f829 | 349 | } |
34e8f829 | 350 | |
1f2f436a A |
351 | if (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK)) || is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) { |
352 | /* since ucntval may be newer, just redo */ | |
353 | retry_count++; | |
354 | if (retry_count > 8192) { | |
355 | return(EAGAIN); | |
356 | } else { | |
357 | sched_yield(); | |
358 | goto retry; | |
359 | } | |
360 | } | |
34e8f829 | 361 | |
1f2f436a A |
362 | if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) { |
363 | /* If U < S, set U = S+diff due to intr's TO, etc */ | |
364 | ulval = (scntval & PTHRW_COUNT_MASK); | |
34e8f829 | 365 | } else { |
1f2f436a A |
366 | /* If U >= S, set U = U+diff due to intr's TO, etc */ |
367 | ulval = (ucntval & PTHRW_COUNT_MASK); | |
34e8f829 | 368 | } |
1f2f436a A |
369 | #if _KSYN_TRACE_ |
370 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, lcntval, ucntval, scntval, diffgen, 0); | |
371 | #endif | |
34e8f829 | 372 | |
1f2f436a A |
373 | diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (ulval & PTHRW_COUNT_MASK)); |
374 | ||
375 | /* set U = L */ | |
376 | ulval = (lcntval & PTHRW_COUNT_MASK); | |
377 | if (OSAtomicCompareAndSwap32(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) { | |
378 | goto retry; | |
379 | } | |
380 | ||
381 | flags = 0; | |
382 | if (cond->pshared == PTHREAD_PROCESS_SHARED) | |
383 | flags |= _PTHREAD_MTX_OPT_PSHARED; | |
34e8f829 | 384 | pmtx = NULL; |
34e8f829 A |
385 | |
386 | #if _KSYN_TRACE_ | |
1f2f436a | 387 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, (uint32_t)cond, 3, diffgen, flags, 0); |
34e8f829 | 388 | #endif |
1f2f436a A |
389 | nlval = lcntval; |
390 | ||
391 | /* pass old u val so kernel will know the diffgen */ | |
392 | mugen = 0; | |
393 | cvlsgen = ((uint64_t)scntval << 32) | nlval; | |
394 | cvudgen = ((uint64_t)ucntval << 32) | diffgen; | |
395 | ||
396 | updateval = __psynch_cvbroad(ocond, cvlsgen, cvudgen, flags, (pthread_mutex_t *)pmtx, mugen, mtid); | |
397 | ||
398 | if (updateval != (uint32_t)-1) { | |
399 | ||
400 | /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */ | |
401 | /* Were any threads woken or bits to be set? */ | |
402 | if (updateval != 0) { | |
403 | retry2: | |
404 | needclearpre = 0; | |
405 | lcntval = *c_lseqcnt; | |
406 | ucntval = *c_useqcnt; | |
407 | scntval = *c_sseqcnt; | |
408 | /* update scntval with number of expected returns and bits */ | |
409 | nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK); | |
410 | /* set bits */ | |
411 | nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL)); | |
34e8f829 | 412 | |
34e8f829 | 413 | #if _KSYN_TRACE_ |
1f2f436a | 414 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, lcntval, scntval, updateval, 0); |
34e8f829 | 415 | #endif |
1f2f436a A |
416 | /* if L==S and c&p bits are set, needs clearpre */ |
417 | if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) | |
418 | && ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) { | |
419 | /* reset p bit but retain c bit on the sword */ | |
420 | nlval &= PTH_RWS_CV_RESET_PBIT; | |
421 | needclearpre = 1; | |
422 | } | |
34e8f829 | 423 | |
1f2f436a A |
424 | oldval64 = (((uint64_t)scntval) << 32); |
425 | oldval64 |= lcntval; | |
426 | newval64 = (((uint64_t)nlval) << 32); | |
427 | newval64 |= lcntval; | |
428 | ||
429 | #if _KSYN_TRACE_ | |
430 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_NONE, 0x25, nlval, scntval, updateval, 0); | |
431 | #endif | |
34e8f829 | 432 | |
1f2f436a A |
433 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) |
434 | goto retry2; | |
435 | ||
436 | /* if L == S, then reset associated mutex */ | |
437 | if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) { | |
438 | cond->busy = (npthread_mutex_t *)NULL; | |
439 | } | |
440 | ||
441 | if (needclearpre != 0) { | |
442 | (void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags); | |
443 | } | |
444 | } | |
445 | ||
446 | } | |
447 | error = 0; | |
34e8f829 A |
448 | |
449 | #if _KSYN_TRACE_ | |
1f2f436a | 450 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVBRD | DBG_FUNC_END, (uint32_t)cond, 0, error, 0, 0); |
34e8f829 | 451 | #endif |
1f2f436a | 452 | return(error); |
34e8f829 A |
453 | } |
454 | ||
455 | ||
456 | /* | |
457 | * Signal a condition variable, waking a specified thread. | |
458 | */ | |
1f2f436a | 459 | |
34e8f829 | 460 | int |
1f2f436a | 461 | pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread) |
34e8f829 A |
462 | { |
463 | npthread_cond_t * cond = (npthread_cond_t *)ocond; | |
464 | int sig = cond->sig; | |
1f2f436a A |
465 | uint32_t flags, updateval; |
466 | uint32_t lcntval , ucntval, scntval; | |
467 | uint32_t nlval, ulval=0; | |
468 | volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
469 | uint64_t oldval64, newval64, mugen, cvlsgen, mtid = 0; | |
470 | int needclearpre = 0, retry_count = 0; | |
471 | int error; | |
34e8f829 A |
472 | |
473 | /* to provide backwards compat for apps using united condtn vars */ | |
474 | ||
475 | if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init)) | |
476 | return(EINVAL); | |
1f2f436a | 477 | |
34e8f829 A |
478 | if (cond->sig != _PTHREAD_COND_SIG) { |
479 | LOCK(cond->lock); | |
480 | if (cond->sig != _PTHREAD_COND_SIG) { | |
481 | if (cond->sig == _PTHREAD_COND_SIG_init) { | |
1f2f436a A |
482 | _pthread_cond_init(ocond, NULL, 0); |
483 | /* just inited, nothing to post yet */ | |
484 | UNLOCK(cond->lock); | |
485 | return(0); | |
34e8f829 A |
486 | } else { |
487 | UNLOCK(cond->lock); | |
488 | return(EINVAL); | |
489 | } | |
490 | } | |
491 | UNLOCK(cond->lock); | |
492 | } | |
493 | ||
494 | #if _KSYN_TRACE_ | |
495 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_START, (uint32_t)cond, 0, 0, 0, 0); | |
496 | #endif | |
1f2f436a | 497 | COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt); |
34e8f829 | 498 | retry: |
1f2f436a A |
499 | lcntval = *c_lseqcnt; |
500 | ucntval = *c_useqcnt; | |
501 | scntval = *c_sseqcnt; | |
34e8f829 | 502 | #if _KSYN_TRACE_ |
1f2f436a | 503 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0); |
34e8f829 | 504 | #endif |
34e8f829 | 505 | |
1f2f436a A |
506 | if (((lcntval & PTHRW_COUNT_MASK) == (scntval & PTHRW_COUNT_MASK)) || |
507 | ((thread == 0) && ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK)))) { | |
508 | /* If L <= S+U, it is spurious broadcasr */ | |
509 | /* validate it is spurious and return */ | |
510 | oldval64 = (((uint64_t)scntval) << 32); | |
511 | oldval64 |= lcntval; | |
512 | newval64 = oldval64; | |
34e8f829 | 513 | |
1f2f436a A |
514 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) |
515 | goto retry; | |
34e8f829 | 516 | #if _KSYN_TRACE_ |
1f2f436a A |
517 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, 0xf1f1f1f1, 0); |
518 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, scntval, 0, 0xf1f1f1f1, 0); | |
34e8f829 | 519 | #endif |
1f2f436a | 520 | return(0); |
34e8f829 | 521 | } |
1f2f436a A |
522 | |
523 | if (((thread == 0) && (is_seqhigher((ucntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK)))) || is_seqhigher((scntval & PTHRW_COUNT_MASK), (lcntval & PTHRW_COUNT_MASK))) { | |
524 | /* since ucntval may be newer, just redo */ | |
525 | retry_count++; | |
526 | if (retry_count > 8192) { | |
527 | return(EAGAIN); | |
528 | } else { | |
529 | sched_yield(); | |
530 | goto retry; | |
531 | } | |
532 | } | |
533 | ||
534 | if (thread == 0) { | |
535 | /* | |
536 | * skip manipulating U count as ESRCH from kernel cannot be handled properly. | |
537 | * S count will cover the imbalance and next signal without thread or broadcast | |
538 | * will correct it. But we need to send the right U to kernel so it will use | |
539 | * that to look for the appropriate sequenc. So the ulval is computed anyway. | |
540 | */ | |
541 | ||
542 | if (is_seqlower(ucntval & PTHRW_COUNT_MASK, scntval & PTHRW_COUNT_MASK) != 0) { | |
543 | /* If U < S, set U = S+1 due to intr's TO, etc */ | |
544 | ulval = (scntval & PTHRW_COUNT_MASK) + PTHRW_INC; | |
545 | } else { | |
546 | /* If U >= S, set U = U+1 due to intr's TO, etc */ | |
547 | ulval = (ucntval & PTHRW_COUNT_MASK) + PTHRW_INC; | |
548 | } | |
549 | ||
550 | if (OSAtomicCompareAndSwap32(ucntval, ulval, (volatile int32_t *)c_useqcnt) != TRUE) { | |
551 | goto retry; | |
552 | } | |
553 | } | |
554 | ||
555 | flags = 0; | |
556 | if (cond->pshared == PTHREAD_PROCESS_SHARED) | |
557 | flags |= _PTHREAD_MTX_OPT_PSHARED; | |
34e8f829 A |
558 | |
559 | #if _KSYN_TRACE_ | |
1f2f436a | 560 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, (uint32_t)cond, 3, nlval, ulval, 0); |
34e8f829 | 561 | #endif |
1f2f436a A |
562 | nlval = lcntval; |
563 | /* pass old u val so kernel will know the diffgen */ | |
564 | mugen = 0; | |
565 | cvlsgen = ((uint64_t)scntval << 32) | nlval; | |
566 | ||
567 | updateval = __psynch_cvsignal(ocond, cvlsgen, ucntval, pthread_mach_thread_np(thread), (pthread_mutex_t *)0, mugen, mtid, flags); | |
568 | ||
569 | ||
570 | if (updateval != (uint32_t)-1) { | |
571 | ||
572 | /* if kernel granted woke some threads, updatwe S for them as they will not access cv on their way out */ | |
573 | /* Were any threads woken or bits to be set? */ | |
574 | if (updateval != 0) { | |
575 | retry2: | |
576 | lcntval = *c_lseqcnt; | |
577 | ucntval = *c_useqcnt; | |
578 | scntval = *c_sseqcnt; | |
579 | /* update scntval with number of expected returns and bits */ | |
580 | nlval = (scntval & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK); | |
581 | /* set bits */ | |
582 | nlval |= ((scntval & PTH_RWS_CV_BITSALL) | (updateval & PTH_RWS_CV_BITSALL)); | |
34e8f829 | 583 | |
34e8f829 | 584 | #if _KSYN_TRACE_ |
1f2f436a | 585 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, 0, 0, updateval, 0); |
34e8f829 | 586 | #endif |
1f2f436a A |
587 | /* if L==S and c&p bits are set, needs clearpre */ |
588 | if (((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) | |
589 | && ((nlval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL)) { | |
590 | /* reset p bit but retain c bit on the sword */ | |
591 | nlval &= PTH_RWS_CV_RESET_PBIT; | |
592 | needclearpre = 1; | |
593 | } else | |
594 | needclearpre = 0; | |
595 | ||
596 | oldval64 = (((uint64_t)scntval) << 32); | |
597 | oldval64 |= lcntval; | |
598 | newval64 = (((uint64_t)nlval) << 32); | |
599 | newval64 |= lcntval; | |
34e8f829 | 600 | |
34e8f829 | 601 | #if _KSYN_TRACE_ |
1f2f436a | 602 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_NONE, 0x25, nlval, ulval, updateval, 0); |
34e8f829 | 603 | #endif |
1f2f436a A |
604 | |
605 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) | |
606 | goto retry2; | |
607 | ||
608 | /* if L == S, then reset associated mutex */ | |
609 | if ((nlval & PTHRW_COUNT_MASK) == (lcntval & PTHRW_COUNT_MASK)) { | |
610 | cond->busy = (npthread_mutex_t *)NULL; | |
611 | } | |
612 | ||
613 | if (needclearpre != 0) { | |
614 | (void)__psynch_cvclrprepost(ocond, lcntval, ucntval, nlval, 0, lcntval, flags); | |
615 | } | |
616 | } | |
34e8f829 | 617 | } |
1f2f436a A |
618 | |
619 | error = 0; | |
620 | ||
34e8f829 A |
621 | #if _KSYN_TRACE_ |
622 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVSIG | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0); | |
623 | #endif | |
1f2f436a | 624 | return (error); |
34e8f829 A |
625 | } |
626 | ||
627 | /* | |
628 | * Signal a condition variable, waking only one thread. | |
629 | */ | |
630 | int | |
1f2f436a | 631 | pthread_cond_signal(pthread_cond_t *cond) |
34e8f829 | 632 | { |
1f2f436a | 633 | return pthread_cond_signal_thread_np(cond, NULL); |
34e8f829 A |
634 | } |
635 | ||
636 | /* | |
637 | * Manage a list of condition variables associated with a mutex | |
638 | */ | |
639 | ||
640 | ||
641 | /* | |
642 | * Suspend waiting for a condition variable. | |
643 | * Note: we have to keep a list of condition variables which are using | |
644 | * this same mutex variable so we can detect invalid 'destroy' sequences. | |
645 | * If isconforming < 0, we skip the _pthread_testcancel(), but keep the | |
646 | * remaining conforming behavior.. | |
647 | */ | |
648 | __private_extern__ int | |
1f2f436a | 649 | _pthread_cond_wait(pthread_cond_t *ocond, |
34e8f829 A |
650 | pthread_mutex_t *omutex, |
651 | const struct timespec *abstime, | |
652 | int isRelative, | |
653 | int isconforming) | |
654 | { | |
655 | int retval; | |
656 | npthread_cond_t * cond = (npthread_cond_t *)ocond; | |
657 | npthread_mutex_t * mutex = (npthread_mutex_t * )omutex; | |
658 | mach_timespec_t then = {0,0}; | |
659 | struct timespec cthen = {0,0}; | |
660 | int sig = cond->sig; | |
661 | int msig = mutex->sig; | |
34e8f829 | 662 | npthread_mutex_t * pmtx; |
1f2f436a A |
663 | uint32_t mtxgen, mtxugen, flags=0, updateval; |
664 | uint32_t lcntval , ucntval, scntval; | |
665 | uint32_t nlval, ulval, savebits; | |
666 | volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
667 | uint64_t oldval64, newval64, mugen, cvlsgen; | |
34e8f829 | 668 | uint32_t * npmtx = NULL; |
1f2f436a | 669 | int error, local_error; |
34e8f829 A |
670 | |
671 | extern void _pthread_testcancel(pthread_t thread, int isconforming); | |
672 | ||
673 | /* to provide backwards compat for apps using united condtn vars */ | |
674 | if((sig != _PTHREAD_COND_SIG) && (sig != _PTHREAD_COND_SIG_init)) | |
675 | return(EINVAL); | |
676 | ||
677 | if (isconforming) { | |
1f2f436a | 678 | if((msig != _PTHREAD_MUTEX_SIG) && ((msig & _PTHREAD_MUTEX_SIG_init_MASK) != _PTHREAD_MUTEX_SIG_CMP)) |
34e8f829 A |
679 | return(EINVAL); |
680 | if (isconforming > 0) | |
681 | _pthread_testcancel(pthread_self(), 1); | |
682 | } | |
1f2f436a | 683 | |
34e8f829 A |
684 | if (cond->sig != _PTHREAD_COND_SIG) |
685 | { | |
686 | LOCK(cond->lock); | |
1f2f436a A |
687 | if (cond->sig != _PTHREAD_COND_SIG) { |
688 | if (cond->sig == _PTHREAD_COND_SIG_init) { | |
689 | _pthread_cond_init(ocond, NULL, 0); | |
690 | } else { | |
34e8f829 | 691 | UNLOCK(cond->lock); |
1f2f436a A |
692 | return(EINVAL); |
693 | } | |
34e8f829 | 694 | } |
34e8f829 A |
695 | UNLOCK(cond->lock); |
696 | } | |
697 | ||
698 | #if _KSYN_TRACE_ | |
1f2f436a | 699 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, (uint32_t)cond, isRelative, 0, (uint32_t)abstime, 0); |
34e8f829 | 700 | #endif |
1f2f436a | 701 | COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt); |
34e8f829 A |
702 | |
703 | /* send relative time to kernel */ | |
704 | if (abstime) { | |
1f2f436a A |
705 | #if _KSYN_TRACE_ |
706 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_START, 0x11111111, abstime->tv_nsec, abstime->tv_sec, 0, 0); | |
707 | #endif | |
34e8f829 A |
708 | if (isRelative == 0) { |
709 | struct timespec now; | |
710 | struct timeval tv; | |
711 | gettimeofday(&tv, NULL); | |
712 | TIMEVAL_TO_TIMESPEC(&tv, &now); | |
713 | ||
714 | /* Compute relative time to sleep */ | |
715 | then.tv_nsec = abstime->tv_nsec - now.tv_nsec; | |
716 | then.tv_sec = abstime->tv_sec - now.tv_sec; | |
717 | if (then.tv_nsec < 0) | |
718 | { | |
719 | then.tv_nsec += NSEC_PER_SEC; | |
720 | then.tv_sec--; | |
721 | } | |
722 | if (((int)then.tv_sec < 0) || | |
723 | ((then.tv_sec == 0) && (then.tv_nsec == 0))) | |
724 | { | |
34e8f829 A |
725 | return ETIMEDOUT; |
726 | } | |
727 | if (isconforming != 0) { | |
728 | cthen.tv_sec = abstime->tv_sec; | |
729 | cthen.tv_nsec = abstime->tv_nsec; | |
730 | if ((cthen.tv_sec < 0) || (cthen.tv_nsec < 0)) { | |
34e8f829 A |
731 | return EINVAL; |
732 | } | |
733 | if (cthen.tv_nsec >= NSEC_PER_SEC) { | |
34e8f829 A |
734 | return EINVAL; |
735 | } | |
736 | } | |
737 | } else { | |
738 | then.tv_sec = abstime->tv_sec; | |
739 | then.tv_nsec = abstime->tv_nsec; | |
1f2f436a A |
740 | if ((then.tv_sec == 0) && (then.tv_nsec == 0)) { |
741 | return ETIMEDOUT; | |
742 | } | |
34e8f829 A |
743 | } |
744 | if(isconforming && ((then.tv_sec < 0) || (then.tv_nsec < 0))) { | |
745 | return EINVAL; | |
746 | } | |
747 | if (then.tv_nsec >= NSEC_PER_SEC) { | |
748 | return EINVAL; | |
749 | } | |
750 | } | |
751 | ||
1f2f436a A |
752 | if ((cond->busy != (npthread_mutex_t *)NULL) && (cond->busy != mutex)) |
753 | return (EINVAL); | |
754 | ||
34e8f829 | 755 | pmtx = mutex; |
1f2f436a A |
756 | retry: |
757 | lcntval = *c_lseqcnt; | |
758 | ucntval = *c_useqcnt; | |
759 | scntval = *c_sseqcnt; | |
34e8f829 | 760 | |
1f2f436a A |
761 | oldval64 = (((uint64_t)scntval) << 32); |
762 | oldval64 |= lcntval; | |
763 | ||
764 | /* remove c and p bits on S word */ | |
765 | savebits = scntval & PTH_RWS_CV_BITSALL; | |
766 | ulval = (scntval & PTHRW_COUNT_MASK); | |
767 | nlval = lcntval + PTHRW_INC; | |
768 | newval64 = (((uint64_t)ulval) << 32); | |
769 | newval64 |= nlval; | |
770 | ||
771 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) | |
772 | goto retry; | |
773 | ||
774 | cond->busy = mutex; | |
34e8f829 A |
775 | |
776 | #if _KSYN_TRACE_ | |
1f2f436a | 777 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, ucntval, scntval, 0); |
34e8f829 | 778 | #endif |
1f2f436a A |
779 | retval = __mtx_droplock(pmtx, PTHRW_INC, &flags, &npmtx, &mtxgen, &mtxugen); |
780 | ||
781 | /* TBD: cases are for normal (non owner for recursive mutex; error checking)*/ | |
34e8f829 A |
782 | if (retval != 0) |
783 | return(EINVAL); | |
1f2f436a | 784 | if ((flags & _PTHREAD_MTX_OPT_NOTIFY) == 0) { |
34e8f829 | 785 | npmtx = NULL; |
1f2f436a A |
786 | mugen = 0; |
787 | } else | |
788 | mugen = ((uint64_t)mtxugen << 32) | mtxgen; | |
789 | flags &= ~_PTHREAD_MTX_OPT_MUTEX; /* reset the mutex bit as this is cvar */ | |
34e8f829 A |
790 | |
791 | #if _KSYN_TRACE_ | |
1f2f436a | 792 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 3, (uint32_t)mutex, flags, 0); |
34e8f829 A |
793 | #endif |
794 | ||
1f2f436a A |
795 | |
796 | cvlsgen = ((uint64_t)(ulval | savebits)<< 32) | nlval; | |
797 | ||
34e8f829 | 798 | if (isconforming) { |
1f2f436a A |
799 | pthread_cleanup_push(cond_cleanup, (void *)cond); |
800 | updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec); | |
34e8f829 A |
801 | pthread_cleanup_pop(0); |
802 | } else { | |
1f2f436a | 803 | updateval = __psynch_cvwait(ocond, cvlsgen, ucntval, (pthread_mutex_t *)npmtx, mugen, flags, (int64_t)then.tv_sec, (int32_t)then.tv_nsec); |
34e8f829 A |
804 | |
805 | } | |
806 | ||
807 | retval = 0; | |
808 | ||
34e8f829 | 809 | if (updateval == (uint32_t)-1) { |
1f2f436a A |
810 | local_error = errno; |
811 | error = local_error & 0xff; | |
812 | if (error == ETIMEDOUT) { | |
34e8f829 | 813 | retval = ETIMEDOUT; |
1f2f436a | 814 | } else if (error == EINTR) { |
34e8f829 A |
815 | /* |
816 | ** EINTR can be treated as a spurious wakeup unless we were canceled. | |
817 | */ | |
818 | retval = 0; | |
819 | } else | |
820 | retval = EINVAL; | |
1f2f436a A |
821 | //#if _KSYN_TRACE_ |
822 | // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf1f1f2f2, local_error, error, 0); | |
823 | //#endif | |
34e8f829 A |
824 | |
825 | /* add unlock ref to show one less waiter */ | |
1f2f436a A |
826 | cond_dropwait(cond, local_error, 0); |
827 | } else { | |
828 | //#if _KSYN_TRACE_ | |
829 | // (void)__kdebug_trace(0x9000070 | 0, (uint32_t)cond, 0xf3f3f4f4, updateval, 0, 0); | |
830 | //#endif | |
831 | /* succesful wait */ | |
832 | if (updateval != 0) { | |
833 | /* the return due to prepost and might have bit states */ | |
834 | /* update S and return for prepo if needed */ | |
835 | cond_dropwait(cond, 0, updateval); | |
836 | } | |
34e8f829 | 837 | retval = 0; |
1f2f436a | 838 | } |
34e8f829 A |
839 | #if _KSYN_TRACE_ |
840 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_NONE, (uint32_t)cond, 4, retval, 0, 0); | |
841 | #endif | |
1f2f436a | 842 | pthread_mutex_lock(omutex); |
34e8f829 A |
843 | |
844 | #if _KSYN_TRACE_ | |
845 | (void)__kdebug_trace(_KSYN_TRACE_UM_CVWAIT | DBG_FUNC_END, (uint32_t)cond, 0, 0, retval, 0); | |
846 | #endif | |
847 | return(retval); | |
848 | } | |
849 | ||
1f2f436a A |
850 | /* |
851 | * For the new style mutex, interlocks are not held all the time. | |
852 | * We needed the signature to be set in the end. And we need | |
853 | * to protect against the code getting reorganized by compiler. | |
854 | */ | |
855 | static void | |
856 | __pthread_cond_set_signature(npthread_cond_t * cond) | |
857 | { | |
858 | cond->sig = _PTHREAD_COND_SIG; | |
859 | } | |
860 | ||
861 | ||
34e8f829 | 862 | static void |
1f2f436a | 863 | cond_cleanup(void *arg) |
34e8f829 A |
864 | { |
865 | npthread_cond_t *cond = (npthread_cond_t *)arg; | |
866 | pthread_mutex_t *mutex; | |
867 | ||
868 | // 4597450: begin | |
869 | pthread_t thread = pthread_self(); | |
870 | int thcanceled = 0; | |
871 | ||
872 | LOCK(thread->lock); | |
873 | thcanceled = (thread->detached & _PTHREAD_WASCANCEL); | |
874 | UNLOCK(thread->lock); | |
875 | ||
876 | if (thcanceled == 0) | |
877 | return; | |
878 | ||
879 | // 4597450: end | |
1f2f436a | 880 | mutex = (pthread_mutex_t *) cond->busy; |
34e8f829 A |
881 | |
882 | /* add unlock ref to show one less waiter */ | |
1f2f436a | 883 | cond_dropwait(cond, thread->cancel_error, 0); |
34e8f829 A |
884 | |
885 | /* | |
886 | ** Can't do anything if this fails -- we're on the way out | |
887 | */ | |
888 | if (mutex != NULL) | |
889 | (void)pthread_mutex_lock(mutex); | |
34e8f829 A |
890 | } |
891 | ||
1f2f436a A |
892 | #define ECVCERORR 256 |
893 | #define ECVPERORR 512 | |
894 | ||
34e8f829 | 895 | void |
1f2f436a | 896 | cond_dropwait(npthread_cond_t * cond, int error, uint32_t updateval) |
34e8f829 A |
897 | { |
898 | int sig = cond->sig; | |
1f2f436a A |
899 | pthread_cond_t * ocond = (pthread_cond_t *)cond; |
900 | int needclearpre = 0; | |
901 | uint32_t diffgen, nlval, ulval, flags; | |
902 | uint32_t lcntval , ucntval, scntval, lval; | |
903 | volatile uint32_t * c_lseqcnt, *c_useqcnt, *c_sseqcnt; | |
34e8f829 A |
904 | uint64_t oldval64, newval64; |
905 | ||
906 | /* to provide backwards compat for apps using united condtn vars */ | |
907 | ||
908 | if (sig != _PTHREAD_COND_SIG) | |
909 | return; | |
910 | ||
1f2f436a A |
911 | COND_GETSEQ_ADDR(cond, c_lseqcnt, c_useqcnt, c_sseqcnt); |
912 | ||
913 | if (error != 0) { | |
914 | lval = PTHRW_INC; | |
915 | if ((error & ECVCERORR) != 0) | |
916 | lval |= PTH_RWS_CV_CBIT; | |
917 | if ((error & ECVPERORR) != 0) | |
918 | lval |= PTH_RWS_CV_PBIT; | |
919 | } else { | |
920 | lval = updateval; | |
921 | } | |
34e8f829 | 922 | #if _KSYN_TRACE_ |
1f2f436a | 923 | (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_START, (uint32_t)cond, error, updateval, 0xee, 0); |
34e8f829 | 924 | #endif |
34e8f829 | 925 | retry: |
1f2f436a A |
926 | lcntval = *c_lseqcnt; |
927 | ucntval = *c_useqcnt; | |
928 | scntval = *c_sseqcnt; | |
34e8f829 | 929 | |
1f2f436a A |
930 | diffgen = diff_genseq((lcntval & PTHRW_COUNT_MASK), (scntval & PTHRW_COUNT_MASK)); /* pendig waiters */ |
931 | #if _KSYN_TRACE_ | |
932 | (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, lcntval, scntval, diffgen, 0); | |
933 | #endif | |
34e8f829 | 934 | if (diffgen <= 0) { |
1f2f436a A |
935 | /* TBD: Assert, should not be the case */ |
936 | /* validate it is spurious and return */ | |
937 | oldval64 = (((uint64_t)scntval) << 32); | |
938 | oldval64 |= lcntval; | |
939 | newval64 = oldval64; | |
940 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) | |
941 | goto retry; | |
34e8f829 | 942 | #if _KSYN_TRACE_ |
1f2f436a | 943 | (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, (uint32_t)cond, 0, 0, 0, 0); |
34e8f829 A |
944 | #endif |
945 | return; | |
946 | } | |
34e8f829 | 947 | |
1f2f436a A |
948 | /* update S by one */ |
949 | oldval64 = (((uint64_t)scntval) << 32); | |
950 | oldval64 |= lcntval; | |
951 | ||
952 | /* update scntval with number of expected returns and bits */ | |
953 | ulval = (scntval & PTHRW_COUNT_MASK) + (lval & PTHRW_COUNT_MASK); | |
954 | /* set bits */ | |
955 | ulval |= ((scntval & PTH_RWS_CV_BITSALL) | (lval & PTH_RWS_CV_BITSALL)); | |
956 | ||
957 | nlval = lcntval; | |
958 | ||
959 | needclearpre = 0; | |
960 | ||
961 | /* If L==S, need to return to kernel */ | |
962 | if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) { | |
963 | if ((ulval & PTH_RWS_CV_BITSALL) == PTH_RWS_CV_BITSALL) { | |
964 | /* reset p bit but retain c bit on the sword */ | |
965 | needclearpre = 1; | |
966 | ulval &= PTH_RWS_CV_RESET_PBIT; | |
967 | } | |
968 | } | |
34e8f829 | 969 | |
1f2f436a A |
970 | newval64 = (((uint64_t)ulval) << 32); |
971 | newval64 |= nlval; | |
34e8f829 A |
972 | |
973 | #if _KSYN_TRACE_ | |
1f2f436a | 974 | (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 0xffff, nlval, ulval, 0); |
34e8f829 | 975 | #endif |
1f2f436a A |
976 | if (OSAtomicCompareAndSwap64(oldval64, newval64, (volatile int64_t *)c_lseqcnt) != TRUE) |
977 | goto retry; | |
978 | ||
979 | #if _KSYN_TRACE_ | |
980 | (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, (uint32_t)cond, 2, 0, 0xee, 0); | |
981 | #endif | |
982 | if ((nlval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) { | |
983 | /* last usage remove the mutex */ | |
984 | cond->busy = NULL; | |
985 | } | |
34e8f829 | 986 | |
1f2f436a A |
987 | #if _KSYN_TRACE_ |
988 | (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_NONE, nlval, ucntval, ulval, PTHRW_INC, 0); | |
989 | #endif | |
990 | if (needclearpre != 0) { | |
991 | flags = 0; | |
992 | if (cond->pshared == PTHREAD_PROCESS_SHARED) | |
993 | flags |= _PTHREAD_MTX_OPT_PSHARED; | |
994 | /* reset prepost */ | |
995 | (void)__psynch_cvclrprepost(ocond, nlval, ucntval, ulval, 0, nlval, flags); | |
34e8f829 | 996 | } |
34e8f829 | 997 | #if _KSYN_TRACE_ |
1f2f436a | 998 | (void)__kdebug_trace(_KSYN_TRACE_UM_CDROPWT | DBG_FUNC_END, nlval, ucntval, ulval, PTHRW_INC, 0); |
34e8f829 A |
999 | #endif |
1000 | return; | |
1001 | } | |
1002 | ||
1003 | ||
1004 | int | |
1f2f436a | 1005 | pthread_cond_timedwait_relative_np(pthread_cond_t *cond, |
34e8f829 A |
1006 | pthread_mutex_t *mutex, |
1007 | const struct timespec *abstime) | |
1008 | { | |
1f2f436a | 1009 | return (_pthread_cond_wait(cond, mutex, abstime, 1, 0)); |
34e8f829 A |
1010 | } |
1011 | ||
34e8f829 | 1012 | |
224c7076 | 1013 | |
3d9156a7 | 1014 | #else /* !BUILDING_VARIANT */ |
34e8f829 | 1015 | |
3d9156a7 A |
1016 | extern int _pthread_cond_wait(pthread_cond_t *cond, |
1017 | pthread_mutex_t *mutex, | |
1018 | const struct timespec *abstime, | |
1019 | int isRelative, | |
1020 | int isconforming); | |
1021 | ||
1022 | #endif /* !BUILDING_VARIANT ] */ | |
224c7076 A |
1023 | /* |
1024 | * Initialize a condition variable. Note: 'attr' is ignored. | |
1025 | */ | |
3d9156a7 | 1026 | |
224c7076 A |
1027 | /* |
1028 | * Initialize a condition variable. This is the public interface. | |
1029 | * We can't trust the lock, so initialize it first before taking | |
1030 | * it. | |
1031 | */ | |
3d9156a7 | 1032 | int |
224c7076 A |
1033 | pthread_cond_init(pthread_cond_t *cond, |
1034 | const pthread_condattr_t *attr) | |
3d9156a7 A |
1035 | { |
1036 | int conforming; | |
3d9156a7 | 1037 | |
224c7076 A |
1038 | #if __DARWIN_UNIX03 |
1039 | conforming = 1; | |
3d9156a7 | 1040 | #else /* __DARWIN_UNIX03 */ |
224c7076 | 1041 | conforming = 0; |
3d9156a7 | 1042 | #endif /* __DARWIN_UNIX03 */ |
224c7076 | 1043 | |
1f2f436a | 1044 | /* lock is same offset in both structures */ |
224c7076 | 1045 | LOCK_INIT(cond->lock); |
1f2f436a | 1046 | |
224c7076 | 1047 | return (_pthread_cond_init(cond, attr, conforming)); |
3d9156a7 A |
1048 | } |
1049 | ||
224c7076 A |
1050 | /* |
1051 | int | |
1052 | pthread_cond_wait(pthread_cond_t *cond, | |
1053 | pthread_mutex_t *mutex) | |
1054 | ||
3d9156a7 A |
1055 | int |
1056 | pthread_cond_timedwait(pthread_cond_t *cond, | |
1057 | pthread_mutex_t *mutex, | |
1058 | const struct timespec *abstime) | |
3d9156a7 | 1059 | |
224c7076 | 1060 | moved to pthread_cancelable.c */ |