]> git.saurik.com Git - apple/libc.git/blob - pthreads/pthread_rwlock.c
75c6a39dba41c1b82cfdad3d31dc569f2811be3d
[apple/libc.git] / pthreads / pthread_rwlock.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*-
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
49 */
50
51 /*
52 * POSIX Pthread Library
53 * -- Read Write Lock support
54 * 4/24/02: A. Ramesh
55 * Ported from FreeBSD
56 */
57
58 #include "pthread_internals.h"
59 #include <stdio.h> /* For printf(). */
60
61 extern int __unix_conforming;
62
63 #ifdef PLOCKSTAT
64 #include "plockstat.h"
65 #else /* !PLOCKSTAT */
66 #define PLOCKSTAT_RW_ERROR(x, y, z)
67 #define PLOCKSTAT_RW_BLOCK(x, y)
68 #define PLOCKSTAT_RW_BLOCKED(x, y, z)
69 #define PLOCKSTAT_RW_ACQUIRE(x, y)
70 #define PLOCKSTAT_RW_RELEASE(x, y)
71 #endif /* PLOCKSTAT */
72
73 #define READ_LOCK_PLOCKSTAT 0
74 #define WRITE_LOCK_PLOCKSTAT 1
75
76 #define BLOCK_FAIL_PLOCKSTAT 0
77 #define BLOCK_SUCCESS_PLOCKSTAT 1
78
79 /* maximum number of times a read lock may be obtained */
80 #define MAX_READ_LOCKS (INT_MAX - 1)
81
82
83 #ifndef BUILDING_VARIANT /* [ */
84 __private_extern__ int usenew_impl = 1;
85 #else /* BUILDING_VARIANT */
86 extern int usenew_impl;
87 #endif /* BUILDING_VARIANT */
88
89 extern int PR_5243343_flag;
90
91 #if defined(__LP64__)
92 #define RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr) \
93 { \
94 if (rwlock->misalign != 0) { \
95 lcntaddr = &rwlock->rw_seq[1]; \
96 seqaddr = &rwlock->rw_seq[2]; \
97 ucntaddr = &rwlock->rw_seq[3]; \
98 } else { \
99 lcntaddr = &rwlock->rw_seq[0]; \
100 seqaddr = &rwlock->rw_seq[1]; \
101 ucntaddr = &rwlock->rw_seq[2]; \
102 } \
103 }
104 #else /* __LP64__ */
105 #define RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr) \
106 { \
107 if (rwlock->misalign != 0) { \
108 lcntaddr = &rwlock->rw_seq[1]; \
109 seqaddr = &rwlock->rw_seq[2]; \
110 ucntaddr = &rwlock->rw_seq[3]; \
111 } else { \
112 lcntaddr = &rwlock->rw_seq[0]; \
113 seqaddr = &rwlock->rw_seq[1]; \
114 ucntaddr = &rwlock->rw_seq[2]; \
115 } \
116 }
117 #endif /* __LP64__ */
118
119 __private_extern__ int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
120
121
122 #define _KSYN_TRACE_ 0
123
124 #if _KSYN_TRACE_
125 #include <sys/sysctl.h>
126 #ifndef BUILDING_VARIANT /* [ */
127 static void set_enable(int);
128 #endif /* !BUILDING_VARIANT ] */
129
130 /* The Function qualifiers */
131 #define DBG_FUNC_START 1
132 #define DBG_FUNC_END 2
133 #define DBG_FUNC_NONE 0
134
135 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
136
137 #define _KSYN_TRACE_RW_RDLOCK 0x9000080
138 #define _KSYN_TRACE_RW_WRLOCK 0x9000084
139 #define _KSYN_TRACE_RW_UNLOCK 0x9000088
140 #define _KSYN_TRACE_RW_UNACT1 0x900808c
141 #define _KSYN_TRACE_RW_UNACT2 0x9008090
142 #define _KSYN_TRACE_RW_UNACTK 0x9008094
143 #define _KSYN_TRACE_RW_UNACTE 0x9008098
144 #define _KSYN_TRACE_RW_UNACTR 0x900809c
145 #define _KSYN_TRACE_RW_TOOMANY 0x90080a0
146 #define _KSYN_TRACE_RW_TRYWRLOCK 0x90080a4
147 #define _KSYN_TRACE_RW_TRYRDLOCK 0x90080a8
148 #endif /* _KSYN_TRACE_ */
149
150 __private_extern__ void rwlock_action_onreturn(pthread_rwlock_t * rwlock, uint32_t updateval);
151 __private_extern__ int rw_diffgenseq(uint32_t x, uint32_t y);
152
153 #ifndef BUILDING_VARIANT /* [ */
154 static uint32_t modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits);
155
156 int
157 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
158 {
159 attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
160 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
161 return (0);
162 }
163
164 int
165 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
166 {
167 attr->sig = _PTHREAD_NO_SIG; /* Uninitialized */
168 attr->pshared = 0;
169 return (0);
170 }
171
172 int
173 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr,
174 int *pshared)
175 {
176 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
177 {
178 *pshared = (int)attr->pshared;
179 return (0);
180 } else
181 {
182 return (EINVAL); /* Not an initialized 'attribute' structure */
183 }
184 }
185
186
187 int
188 pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
189 {
190 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG)
191 {
192 #if __DARWIN_UNIX03
193 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
194 #else /* __DARWIN_UNIX03 */
195 if ( pshared == PTHREAD_PROCESS_PRIVATE)
196 #endif /* __DARWIN_UNIX03 */
197 {
198 attr->pshared = pshared ;
199 return (0);
200 } else
201 {
202 return (EINVAL); /* Invalid parameter */
203 }
204 } else
205 {
206 return (EINVAL); /* Not an initialized 'attribute' structure */
207 }
208
209 }
210
211 __private_extern__ int
212 __pthread_rwlock_init(pthread_rwlock_t * orwlock, const pthread_rwlockattr_t *attr)
213 {
214 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
215
216 if ((attr != NULL) && (attr->pshared == PTHREAD_PROCESS_SHARED)) {
217 rwlock->pshared = PTHREAD_PROCESS_SHARED;
218 rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
219 } else {
220 rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
221 rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
222 }
223
224 if (((uintptr_t)rwlock & 0x07) != 0) {
225 rwlock->misalign = 1;
226 #if defined(__LP64__)
227 rwlock->rw_lcntaddr = &rwlock->rw_seq[1];
228 rwlock->rw_seqaddr = &rwlock->rw_seq[2];
229 rwlock->rw_ucntaddr = &rwlock->rw_seq[3];
230 rwlock->rw_seq[1]= PTHRW_RWLOCK_INIT;
231 rwlock->rw_seq[2]= PTHRW_RWS_INIT;
232 rwlock->rw_seq[3]= 0;
233 #else /* __LP64__ */
234 rwlock->rw_lcntaddr = &rwlock->rw_seq[1];
235 rwlock->rw_seqaddr = &rwlock->rw_seq[2];
236 rwlock->rw_ucntaddr = &rwlock->rw_seq[3];
237 rwlock->rw_seq[1]= PTHRW_RWLOCK_INIT;
238 rwlock->rw_seq[2]= PTHRW_RWS_INIT;
239 rwlock->rw_seq[3]= 0;
240 #endif /* __LP64__ */
241
242 } else {
243 rwlock->misalign = 0;
244 #if defined(__LP64__)
245 rwlock->rw_lcntaddr = &rwlock->rw_seq[0];
246 rwlock->rw_seqaddr = &rwlock->rw_seq[1];
247 rwlock->rw_ucntaddr = &rwlock->rw_seq[2];
248 rwlock->rw_seq[0]= PTHRW_RWLOCK_INIT;
249 rwlock->rw_seq[1]= PTHRW_RWS_INIT;
250 rwlock->rw_seq[2]= 0;
251 #else /* __LP64__ */
252 rwlock->rw_lcntaddr = &rwlock->rw_seq[0];
253 rwlock->rw_seqaddr = &rwlock->rw_seq[1];
254 rwlock->rw_ucntaddr = &rwlock->rw_seq[2];
255 rwlock->rw_seq[0]= PTHRW_RWLOCK_INIT;
256 rwlock->rw_seq[1]= PTHRW_RWS_INIT;
257 rwlock->rw_seq[2]= 0;
258 #endif /* __LP64__ */
259
260 }
261
262 rwlock->reserv = 0;
263 rwlock->rw_owner = NULL;
264 #if defined(__LP64__)
265 memset(rwlock->rfu, 0, PTHRW_RFU_64BIT);
266 #else
267 memset(rwlock->rfu, 0, PTHRW_RFU_32BIT);
268 #endif
269
270 rwlock->sig = _PTHREAD_RWLOCK_SIG;
271
272 return(0);
273 }
274
275 #if _KSYN_TRACE_
276 static void
277 set_enable(int val)
278 {
279 int mib[6];
280 size_t needed = 0;
281
282 mib[0] = CTL_KERN;
283 mib[1] = KERN_KDEBUG;
284 mib[2] = KERN_KDENABLE;
285 mib[3] = val;
286 mib[4] = 0;
287 mib[5] = 0;
288 /* best effort to stop the trace */
289 (void)sysctl(mib, 4, NULL, &needed, NULL, 0);
290 }
291 #endif
292
293 static uint32_t
294 modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
295 {
296 uint32_t lval = lgenval & PTHRW_BIT_MASK;
297 uint32_t uval = updateval & PTHRW_BIT_MASK;
298 uint32_t rval, nlval;
299
300 nlval = (lval | uval) & ~(PTH_RWL_MBIT);
301
302 /* reconcile bits on the lock with what kernel needs to set */
303 if ((uval & PTH_RWL_LBIT) != 0)
304 nlval &= ~PTH_RWL_KBIT;
305 else if (((uval & PTH_RWL_KBIT) == 0) && ((lval & PTH_RWL_WBIT) == 0))
306 nlval &= ~PTH_RWL_KBIT;
307
308 if (savebits !=0 ) {
309 if (((savebits & PTH_RWS_WSVBIT) != 0) && ((nlval & PTH_RWL_WBIT) == 0) &&
310 ((nlval & PTH_RWL_EBIT) == 0)) {
311 if ((nlval & PTH_RWL_LBIT) == 0)
312 nlval |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
313 else
314 nlval |= PTH_RWL_WBIT;
315 }
316 if (((savebits & PTH_RWS_YSVBIT) != 0) && ((nlval & PTH_RWL_YBIT) == 0) &&
317 ((nlval & PTH_RWL_EBIT) == 0)) {
318 nlval |= PTH_RWL_YBIT;
319 }
320 if (((savebits & PTH_RWS_USVBIT) != 0) && ((nlval & PTH_RWL_EBIT) == 0)) {
321 if ((nlval & PTH_RWL_LBIT) == 0)
322 nlval |= (PTH_RWL_UBIT | PTH_RWL_KBIT);
323 else
324 nlval |= PTH_RWL_UBIT;
325 }
326 }
327 rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
328 return(rval);
329 }
330
331
332 __private_extern__ void
333 rwlock_action_onreturn(pthread_rwlock_t * orwlock, uint32_t updateval)
334 {
335
336 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
337 uint32_t lcntval, rw_seq, newval = 0, newsval, lval, uval;
338 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
339 uint64_t oldval64, newval64;
340 int setbits = 0;
341 int overlap = 0;
342 uint32_t savebits = 0;
343 int isoverlap = 0;
344
345 /* TBD: restore U bit */
346 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
347 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
348 } else {
349 lcntaddr = rwlock->rw_lcntaddr;
350 seqaddr = rwlock->rw_seqaddr;
351 }
352
353 #if _KSYN_TRACE_
354 if (__pthread_lock_debug != 0)
355 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_START, updateval, 0, 0, 0, 0);
356 #endif
357
358 isoverlap = updateval & PTH_RWL_MBIT;
359
360 loop:
361 setbits = 0;
362 lcntval = *lcntaddr;
363 rw_seq = *seqaddr;
364 savebits = 0;
365
366 if (isoverlap != 0) {
367 /* overlap return, just increment and inspect bits */
368 setbits = 1;
369 overlap = 1;
370 /* set s word, increment by specified value */
371 newsval = rw_seq + (updateval & PTHRW_COUNT_MASK);
372 if ((newsval & PTHRW_RWS_SAVEMASK) != 0) {
373 savebits = newsval & PTHRW_RWS_SAVEMASK;
374 newsval &= ~PTHRW_RWS_SAVEMASK;
375 }
376 } else {
377 /* normal return */
378 if (is_rws_setunlockinit(rw_seq) != 0) {
379 setbits = 1;
380 /* set s word to passed in value */
381 newsval = (rw_seq & PTHRW_COUNT_MASK) + (updateval & PTHRW_COUNT_MASK);
382 if ((rw_seq & PTHRW_RWS_SAVEMASK) != 0) {
383 savebits = rw_seq & PTHRW_RWS_SAVEMASK;
384 newsval &= ~PTHRW_RWS_SAVEMASK;
385 }
386 } else {
387 newval = lcntval;
388 newsval = rw_seq;
389 }
390 }
391 if (setbits != 0) {
392 newval = modbits(lcntval, updateval, savebits);
393
394 #if _KSYN_TRACE_
395 if (__pthread_lock_debug != 0)
396 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, rw_seq, newsval, 0xeeeeeeee, updateval, 0);
397 if (__pthread_lock_debug != 0)
398 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_NONE, lcntval, newval, 0xeeeeeeee, updateval, 0);
399 #endif
400 oldval64 = (((uint64_t)rw_seq) << 32);
401 oldval64 |= lcntval;
402 newval64 = (((uint64_t)newsval) << 32);
403 newval64 |= newval;
404
405 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
406 goto loop;
407 /* Check for consistency */
408 lval = lcntval & PTHRW_BIT_MASK;
409 uval = updateval & PTHRW_BIT_MASK;
410 }
411
412 #if _KSYN_TRACE_
413 if (__pthread_lock_debug != 0)
414 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1 | DBG_FUNC_END, rw_seq, newsval, 0xffffffff, 0, 0);
415 #endif
416 return;
417 }
418
419 /* returns are not bit shifted */
420 __private_extern__ int
421 rw_diffgenseq(uint32_t x, uint32_t y)
422 {
423 uint32_t lx = (x & PTHRW_COUNT_MASK);
424 uint32_t ly = (y &PTHRW_COUNT_MASK);
425
426 if (lx > ly) {
427 return(lx-ly);
428 } else {
429 return((PTHRW_MAX_READERS - y) + lx + PTHRW_INC);
430 }
431
432 }
433
434 #ifdef NOTYET
435 /********************************************************** */
436 static int pthread_rwlock_upgrade_internal(pthread_rwlock_t * orwlock, int trylock);
437
438 int
439 pthread_rwlock_longrdlock_np(pthread_rwlock_t * orwlock)
440 {
441 pthread_t self;
442 uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
443 int error = 0, retry_count = 0;
444 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
445 uint64_t oldval64, newval64;
446 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
447 uint64_t myid = 0;
448
449 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
450 LOCK(rwlock->lock);
451 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
452 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
453 UNLOCK(rwlock->lock);
454 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
455 return(error);
456 }
457 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
458 UNLOCK(rwlock->lock);
459 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
460 return(EINVAL);
461 }
462 UNLOCK(rwlock->lock);
463 }
464
465 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
466 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
467 } else {
468 lcntaddr = rwlock->rw_lcntaddr;
469 ucntaddr = rwlock->rw_ucntaddr;
470 seqaddr = rwlock->rw_seqaddr;
471 }
472
473 loop:
474 lcntval = *lcntaddr;
475 ucntval = *ucntaddr;
476 rw_seq = *seqaddr;
477
478 if (can_rwl_longreadinuser(lcntval))
479 goto gotlock;
480
481 #if __DARWIN_UNIX03
482 if (is_rwl_ebit_set(lcntval)) {
483 self = pthread_self();
484 if(rwlock->rw_owner == self) {
485 error = EDEADLK;
486 goto out;
487 }
488 }
489 #endif /* __DARWIN_UNIX03 */
490
491 /* need to block in kernel */
492 newval = (lcntval + PTHRW_INC);
493
494 newsval = rw_seq;
495 if (is_rws_setseq(rw_seq)) {
496 newsval &= PTHRW_SW_Reset_BIT_MASK;
497 newsval |= (newval & PTHRW_COUNT_MASK);
498 }
499
500 /* update lock seq and block in kernel */
501
502 oldval64 = (((uint64_t)rw_seq) << 32);
503 oldval64 |= lcntval;
504
505 newval64 = (((uint64_t)(newsval)) << 32);
506 newval64 |= newval;
507
508 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
509 goto loop;
510 kblock:
511 updateval = __psynch_rw_longrdlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
512 if (updateval == (uint32_t)-1) {
513 error = errno;
514 } else
515 error = 0;
516
517 if (error == EINTR)
518 goto kblock;
519
520 if (error == 0) {
521 rwlock_action_onreturn(orwlock, updateval);
522 if ( is_rwl_lbit_clear(updateval)) {
523 #if _KSYN_TRACE_
524 set_enable(2);
525 #endif /* _KSYN_TRACE_ */
526 (void)pthread_threadid_np(pthread_self(), &myid);
527 LIBC_ABORT("yieldwrlock from kernel without EBit %x: tid %x\n", updateval, (uint32_t)myid);
528 /* kernel cannot wakeup without granting E bit */
529 }
530 goto successout;
531 } else {
532 #if _KSYN_TRACE_
533 set_enable(2);
534 #endif /* _KSYN_TRACE_ */
535 (void)pthread_threadid_np(pthread_self(), &myid);
536 LIBC_ABORT("yieldwrlock from kernel with unknown error %x: tid %x\n", updateval, (uint32_t)myid);
537 goto out;
538 }
539
540 gotlock:
541 if (rw_diffgenseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
542 /* since ucntval may be newer, just redo */
543 retry_count++;
544 if (retry_count > 1024) {
545
546 #if _KSYN_TRACE_
547 if (__pthread_lock_debug != 0)
548 (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY | DBG_FUNC_NONE, (uint32_t)rwlock, 0XEEEEEEEE, lcntval, ucntval, 0);
549 #endif
550 error = EAGAIN;
551 goto out;
552 } else {
553 sched_yield();
554 goto loop;
555 }
556 }
557
558 /* Need to update L and S word */
559 newval = (lcntval + PTHRW_INC) | PTH_RWL_LBIT;
560 newsval = (rw_seq + PTHRW_INC);
561
562 oldval64 = (((uint64_t)rw_seq) << 32);
563 oldval64 |= lcntval;
564 newval64 = (((uint64_t)newsval) << 32);
565 newval64 |= newval;
566
567 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
568 goto loop;
569
570 successout:
571 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
572 return(0);
573 out:
574 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
575 return(error);
576 }
577
578 int
579 pthread_rwlock_yieldwrlock_np(pthread_rwlock_t * orwlock)
580 {
581 uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
582 int error = 0;
583 #if __DARWIN_UNIX03
584 pthread_t self = pthread_self();
585 #endif /* __DARWIN_UNIX03 */
586 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
587 uint64_t oldval64, newval64;
588 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
589 uint64_t myid = 0;
590
591 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
592 LOCK(rwlock->lock);
593 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
594 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
595 UNLOCK(rwlock->lock);
596 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
597 return(error);
598 }
599 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
600 UNLOCK(rwlock->lock);
601 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
602 return(EINVAL);
603 }
604 UNLOCK(rwlock->lock);
605 }
606
607 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
608 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
609 } else {
610 lcntaddr = rwlock->rw_lcntaddr;
611 ucntaddr = rwlock->rw_ucntaddr;
612 seqaddr = rwlock->rw_seqaddr;
613 }
614
615 loop:
616 lcntval = *lcntaddr;
617 ucntval = *ucntaddr;
618 rw_seq = *seqaddr;
619
620 #if __DARWIN_UNIX03
621 if (is_rwl_ebit_set(lcntval)) {
622 if (rwlock->rw_owner == self) {
623 error = EDEADLK;
624 goto out;
625 }
626 }
627 #endif /* __DARWIN_UNIX03 */
628
629 if (lcntval == PTHRW_RWL_INIT) {
630 /* if we can acquire set L and S word */
631 lcntval = PTHRW_RWL_INIT;
632 newval = PTHRW_RWL_INIT | PTHRW_INC | PTH_RWL_KBIT| PTH_RWL_EBIT;
633 newsval = rw_seq + PTHRW_INC;
634
635 oldval64 = (((uint64_t)rw_seq) << 32);
636 oldval64 |= lcntval;
637
638 newval64 = (((uint64_t)newsval) << 32);
639 newval64 |= newval;
640
641 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) == TRUE) {
642 goto gotit;
643 } else
644 goto loop;
645 }
646
647 newval = (lcntval + PTHRW_INC)| PTH_RWL_YBIT;
648
649 newsval = rw_seq;
650 if (is_rws_setseq(rw_seq)) {
651 newsval &= PTHRW_SW_Reset_BIT_MASK;
652 newsval |= (newval & PTHRW_COUNT_MASK);
653 }
654
655 oldval64 = (((uint64_t)rw_seq) << 32);
656 oldval64 |= lcntval;
657
658 newval64 = (((uint64_t)(newsval)) << 32);
659 newval64 |= newval;
660
661 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
662 goto loop;
663
664 PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
665 retry:
666 updateval = __psynch_rw_yieldwrlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
667 if (updateval == (uint32_t)-1) {
668 error = errno;
669 } else
670 error = 0;
671
672 if (error == EINTR)
673 goto retry;
674
675
676 PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
677 if (error != 0) {
678 #if _KSYN_TRACE_
679 set_enable(2);
680 #endif /* _KSYN_TRACE_ */
681 (void)pthread_threadid_np(pthread_self(), &myid);
682 LIBC_ABORT("yieldwrlock from kernel with unknown error %x: tid %x\n", updateval, (uint32_t)myid);
683 }
684
685
686 out:
687 if (error == 0) {
688 gotit:
689 rwlock_action_onreturn(orwlock, updateval);
690 if ( is_rwl_ebit_clear(updateval)) {
691 #if _KSYN_TRACE_
692 set_enable(2);
693 #endif /* _KSYN_TRACE_ */
694 (void)pthread_threadid_np(pthread_self(), &myid);
695 LIBC_ABORT("yieldwrlock from kernel without EBit %x: tid %x\n", updateval, (uint32_t)myid);
696 }
697 #if __DARWIN_UNIX03
698 rwlock->rw_owner = self;
699 #endif /* __DARWIN_UNIX03 */
700 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
701 return(0);
702 } else {
703 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
704 return(error);
705 }
706 }
707 int
708 pthread_rwlock_downgrade_np(pthread_rwlock_t * orwlock)
709 {
710 uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
711 int error = 0, haswbit = 0, hasubit = 0, hasybit = 0;
712 #if __DARWIN_UNIX03
713 pthread_t self = pthread_self();
714 #endif /* __DARWIN_UNIX03 */
715 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
716 uint64_t oldval64, newval64;
717 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
718 uint64_t myid = 0;
719
720 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
721 LOCK(rwlock->lock);
722 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
723 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
724 UNLOCK(rwlock->lock);
725 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
726 return(error);
727 }
728 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
729 UNLOCK(rwlock->lock);
730 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
731 return(EINVAL);
732 }
733 UNLOCK(rwlock->lock);
734 }
735 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
736 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
737 } else {
738 lcntaddr = rwlock->rw_lcntaddr;
739 ucntaddr = rwlock->rw_ucntaddr;
740 seqaddr = rwlock->rw_seqaddr;
741 }
742
743 loop:
744 lcntval = *lcntaddr;
745 ucntval = *ucntaddr;
746 rw_seq = *seqaddr;
747
748
749 /* if not holding exclusive lock, return */
750 if ((is_rwl_ebit_set(lcntval )== 0) || (rwlock->rw_owner != self)) {
751 return(EINVAL);
752 }
753
754 /* no other waiters and be granted in user space? ? */
755 if ((lcntval & PTHRW_COUNT_MASK) == (ucntval + PTHRW_INC)) {
756 #if 0
757 /* should have no write waiters pending */
758 if (is_rwl_wbit_set(lcntval) != 0) {
759 #if _KSYN_TRACE_
760 set_enable(2);
761 #endif /* _KSYN_TRACE_ */
762 (void)pthread_threadid_np(pthread_self(), &myid);
763 LIBC_ABORT("downgrade in user mode but W bit set %x: tid %x\n", lcntval, (uint32_t)myid);
764 }
765 #endif
766 /* preserve count and remove ke bits */
767 newval = lcntval & ~(PTH_RWL_EBIT | PTH_RWL_KBIT);
768 /* if we can acquire set L and S word */
769 newsval = rw_seq;
770
771 oldval64 = (((uint64_t)rw_seq) << 32);
772 oldval64 |= lcntval;
773
774 newval64 = (((uint64_t)newsval) << 32);
775 newval64 |= newval;
776
777 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) == TRUE) {
778 #if __DARWIN_UNIX03
779 rwlock->rw_owner = (pthread_t)0;
780 #endif /* __DARWIN_UNIX03 */
781 return(0);
782 } else
783 goto loop;
784 } else {
785
786 haswbit = lcntval & PTH_RWL_WBIT;
787 hasubit = lcntval & PTH_RWL_UBIT;
788 hasybit = lcntval & PTH_RWL_YBIT;
789
790 /* reset all bits and set k */
791 newval = (lcntval & PTHRW_COUNT_MASK) | PTH_RWL_KBIT;
792 /* set I bit on S word */
793 newsval = rw_seq | PTH_RWS_IBIT;
794 if (haswbit != 0)
795 newsval |= PTH_RWS_WSVBIT;
796 if (hasubit != 0)
797 newsval |= PTH_RWS_USVBIT;
798 if (hasybit != 0)
799 newsval |= PTH_RWS_YSVBIT;
800
801 oldval64 = (((uint64_t)rw_seq) << 32);
802 oldval64 |= lcntval;
803
804 newval64 = (((uint64_t)newsval) << 32);
805 newval64 |= newval;
806
807 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
808 goto loop;
809
810 #if __DARWIN_UNIX03
811 rwlock->rw_owner = 0;
812 #endif /* __DARWIN_UNIX03 */
813
814 retry:
815 updateval = __psynch_rw_downgrade(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
816 if (updateval == (uint32_t)-1) {
817 error = errno;
818 } else
819 error = 0;
820
821 /* TBD: what to do with the error, EINTR ?? */
822 if (error == EINTR)
823 goto retry;
824
825 if (error == 0) {
826 rwlock_action_onreturn(orwlock, updateval);
827 return(0);
828 } else {
829 #if _KSYN_TRACE_
830 set_enable(1);
831 #endif /* _KSYN_TRACE_ */
832 (void)pthread_threadid_np(pthread_self(), &myid);
833 LIBC_ABORT("downgrade from kernel with unknown error %x with tid %x\n", updateval, (uint32_t)myid);
834 }
835 /* Not reached */
836 }
837 return(EINVAL);
838 }
839
840 int
841 pthread_rwlock_upgrade_np(pthread_rwlock_t * orwlock)
842 {
843 return(pthread_rwlock_upgrade_internal(orwlock, 0));
844 }
845
846 int
847 pthread_rwlock_tryupgrade_np(pthread_rwlock_t *orwlock)
848 {
849 return(pthread_rwlock_upgrade_internal(orwlock, 1));
850 }
851
852 static int
853 pthread_rwlock_upgrade_internal(pthread_rwlock_t * orwlock, int trylock)
854 {
855 uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
856 int error = 0, flags ;
857 #if __DARWIN_UNIX03
858 pthread_t self = pthread_self();
859 #endif /* __DARWIN_UNIX03 */
860 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
861 uint64_t oldval64, newval64;
862 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
863 uint64_t myid = 0;
864
865 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
866 /* check for static initialization */
867 LOCK(rwlock->lock);
868 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
869 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
870 UNLOCK(rwlock->lock);
871 return(error);
872 }
873 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
874 UNLOCK(rwlock->lock);
875 return(EINVAL);
876 }
877 UNLOCK(rwlock->lock);
878 }
879 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
880 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
881 } else {
882 lcntaddr = rwlock->rw_lcntaddr;
883 ucntaddr = rwlock->rw_ucntaddr;
884 seqaddr = rwlock->rw_seqaddr;
885 }
886
887 loop:
888 lcntval = *lcntaddr;
889 ucntval = *ucntaddr;
890 rw_seq = *seqaddr;
891
892 if (is_rwl_eubit_set(lcntval) !=0) {
893 return(EBUSY);
894 }
895
896 /* set U and K bit and go to kernel */
897 newval = (lcntval | (PTH_RWL_UBIT | PTH_RWL_KBIT));
898 newsval = rw_seq;
899 #if 0
900 if (is_rws_setseq(rw_seq)) {
901 newsval &= PTHRW_SW_Reset_BIT_MASK;
902 newsval |= (newval & PTHRW_COUNT_MASK);
903 }
904 #endif
905
906 /* update lock seq and block in kernel */
907
908 oldval64 = (((uint64_t)rw_seq) << 32);
909 oldval64 |= lcntval;
910
911 newval64 = (((uint64_t)(newsval)) << 32);
912 newval64 |= newval;
913
914 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
915 goto loop;
916 flags = rwlock->rw_flags;
917 if (trylock != 0) {
918 flags |= _PTHREAD_RWLOCK_UPGRADE_TRY;
919 }
920 retry:
921 updateval = __psynch_rw_upgrade(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
922 if (updateval == (uint32_t)-1) {
923 error = errno;
924 } else
925 error = 0;
926
927 if (error == EINTR)
928 goto retry;
929
930
931 if (error == 0) {
932 rwlock_action_onreturn(orwlock, updateval);
933 if ( is_rwl_ebit_clear(updateval)) {
934 #if _KSYN_TRACE_
935 set_enable(2);
936 #endif /* _KSYN_TRACE_ */
937 (void)pthread_threadid_np(pthread_self(), &myid);
938 LIBC_ABORT("upgrade from kernel without EBit %x: tid %x\n", updateval, (uint32_t)myid);
939 }
940 #if __DARWIN_UNIX03
941 rwlock->rw_owner = self;
942 #endif /* __DARWIN_UNIX03 */
943 return(0);
944 } else {
945 if (trylock != 0) {
946 return (EBUSY);
947 }
948 }
949
950 return(error);
951 }
952
953 /* Returns true if the rwlock is held for reading by any thread or held for writing by the current thread */
954 int
955 pthread_rwlock_held_np(pthread_rwlock_t * orwlock)
956 {
957 uint32_t lcntval, ucntval, rw_seq;
958 int error = 0;
959 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
960 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
961
962 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
963 LOCK(rwlock->lock);
964 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
965 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
966 UNLOCK(rwlock->lock);
967 return(0);
968 }
969 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
970 UNLOCK(rwlock->lock);
971 return(-1);
972 }
973 UNLOCK(rwlock->lock);
974 }
975
976 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
977 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
978 } else {
979 lcntaddr = rwlock->rw_lcntaddr;
980 ucntaddr = rwlock->rw_ucntaddr;
981 seqaddr = rwlock->rw_seqaddr;
982 }
983
984 lcntval = *lcntaddr;
985 ucntval = *ucntaddr;
986 rw_seq = *seqaddr;
987
988 if ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))
989 return(0);
990
991 return(1);
992 }
993
994 /* Returns true if the rwlock is held for reading by any thread */
995 int
996 pthread_rwlock_rdheld_np(pthread_rwlock_t * orwlock)
997 {
998 uint32_t lcntval, ucntval, rw_seq;
999 int error = 0;
1000 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1001 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1002
1003 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1004 LOCK(rwlock->lock);
1005 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1006 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
1007 UNLOCK(rwlock->lock);
1008 return(0);
1009 }
1010 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
1011 UNLOCK(rwlock->lock);
1012 return(-1);
1013 }
1014 UNLOCK(rwlock->lock);
1015 }
1016
1017
1018 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1019 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1020 } else {
1021 lcntaddr = rwlock->rw_lcntaddr;
1022 ucntaddr = rwlock->rw_ucntaddr;
1023 seqaddr = rwlock->rw_seqaddr;
1024 }
1025
1026 lcntval = *lcntaddr;
1027 ucntval = *ucntaddr;
1028 rw_seq = *seqaddr;
1029
1030 if ((lcntval & PTHRW_COUNT_MASK) == (ucntval & PTHRW_COUNT_MASK))
1031 return(0);
1032
1033 if (is_rwl_ebit_set(lcntval) !=0) {
1034 return(0);
1035 }
1036 return(1);
1037 }
1038
1039 /* Returns true if the rwlock is held for writing by the current thread */
1040 int
1041 pthread_rwlock_wrheld_np(pthread_rwlock_t * orwlock)
1042 {
1043 uint32_t lcntval, ucntval, rw_seq;
1044 pthread_t self = pthread_self();
1045 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1046 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1047 int error = 0;
1048
1049 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1050 LOCK(rwlock->lock);
1051 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1052 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
1053 UNLOCK(rwlock->lock);
1054 return(0);
1055 }
1056 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
1057 UNLOCK(rwlock->lock);
1058 return(-1);
1059 }
1060 UNLOCK(rwlock->lock);
1061 }
1062
1063 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1064 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1065 } else {
1066 lcntaddr = rwlock->rw_lcntaddr;
1067 ucntaddr = rwlock->rw_ucntaddr;
1068 seqaddr = rwlock->rw_seqaddr;
1069 }
1070
1071 lcntval = *lcntaddr;
1072 ucntval = *ucntaddr;
1073 rw_seq = *seqaddr;
1074
1075 if ((is_rwl_ebit_set(lcntval)) && (rwlock->rw_owner == self)) {
1076 return(1);
1077 }
1078 return(0);
1079 }
1080 /******************************************************/
1081 #endif /* NOTYET */
1082
1083
1084 #endif /* !BUILDING_VARIANT ] */
1085
1086 int
1087 pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
1088 {
1089 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1090 #if __DARWIN_UNIX03
1091 uint32_t rw_lcnt, rw_ucnt;
1092 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1093 #endif /* __DARWIN_UNIX03 */
1094
1095 if (rwlock->sig != _PTHREAD_RWLOCK_SIG && rwlock->sig != _PTHREAD_RWLOCK_SIG_init)
1096 return(EINVAL);
1097 if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
1098 #if __DARWIN_UNIX03
1099 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1100 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1101 } else {
1102 lcntaddr = rwlock->rw_lcntaddr;
1103 ucntaddr = rwlock->rw_ucntaddr;
1104 seqaddr = rwlock->rw_seqaddr;
1105 }
1106
1107 rw_lcnt = *lcntaddr;
1108 rw_ucnt = *ucntaddr;
1109
1110 if((rw_lcnt & PTHRW_COUNT_MASK) != rw_ucnt)
1111 return(EBUSY);
1112
1113 #endif /* __DARWIN_UNIX03 */
1114 //bzero(rwlock, sizeof(npthread_rwlock_t));
1115 rwlock->sig = _PTHREAD_NO_SIG;
1116 return(0);
1117 } else if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1118 rwlock->sig = _PTHREAD_NO_SIG;
1119 return(0);
1120 } else
1121 return(EINVAL);
1122 }
1123
1124
1125 int
1126 pthread_rwlock_init(pthread_rwlock_t * orwlock, const pthread_rwlockattr_t *attr)
1127 {
1128 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1129 #if __DARWIN_UNIX03
1130 uint32_t rw_lcnt, rw_ucnt;
1131 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1132 #endif /* __DARWIN_UNIX03 */
1133
1134 #if __DARWIN_UNIX03
1135 if (attr && (attr->sig != _PTHREAD_RWLOCK_ATTR_SIG)) {
1136 return(EINVAL);
1137 }
1138
1139 if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
1140 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1141 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1142 } else {
1143 lcntaddr = rwlock->rw_lcntaddr;
1144 ucntaddr = rwlock->rw_ucntaddr;
1145 seqaddr = rwlock->rw_seqaddr;
1146 }
1147
1148 rw_lcnt = *lcntaddr;
1149 rw_ucnt = *ucntaddr;
1150
1151 if ((rw_lcnt & PTHRW_COUNT_MASK) != rw_ucnt)
1152 return(EBUSY);
1153
1154 }
1155 #endif
1156 LOCK_INIT(rwlock->lock);
1157 return(__pthread_rwlock_init(orwlock, attr));
1158
1159 }
1160
1161 int
1162 pthread_rwlock_rdlock(pthread_rwlock_t * orwlock)
1163 {
1164 #if __DARWIN_UNIX03
1165 pthread_t self;
1166 #endif /* __DARWIN_UNIX03 */
1167 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1168 uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
1169 int error = 0, retry_count = 0;
1170 uint64_t oldval64, newval64;
1171 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1172 uint64_t myid = 0;
1173
1174 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1175 LOCK(rwlock->lock);
1176 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1177 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
1178 UNLOCK(rwlock->lock);
1179 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
1180 return(error);
1181 }
1182 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
1183 UNLOCK(rwlock->lock);
1184 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
1185 return(EINVAL);
1186 }
1187 UNLOCK(rwlock->lock);
1188 }
1189
1190 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1191 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1192 } else {
1193 lcntaddr = rwlock->rw_lcntaddr;
1194 ucntaddr = rwlock->rw_ucntaddr;
1195 seqaddr = rwlock->rw_seqaddr;
1196 }
1197
1198 #if _KSYN_TRACE_
1199 if (__pthread_lock_debug != 0)
1200 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
1201 #endif
1202 loop:
1203 lcntval = *lcntaddr;
1204 ucntval = *ucntaddr;
1205 rw_seq = *seqaddr;
1206 #if _KSYN_TRACE_
1207 if (__pthread_lock_debug != 0)
1208 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lcntval, (ucntval | 0xee), rw_seq, 0);
1209 #endif
1210
1211 /* if l bit is on or u and k bit is clear, acquire lock in userland */
1212 if (can_rwl_readinuser(lcntval))
1213 goto gotlock;
1214
1215 #if __DARWIN_UNIX03
1216 if (is_rwl_ebit_set(lcntval)) {
1217 self = pthread_self();
1218 if(rwlock->rw_owner == self) {
1219 error = EDEADLK;
1220 goto out;
1221 }
1222 }
1223 #endif /* __DARWIN_UNIX03 */
1224
1225
1226 /* Need to block in kernel , remove Rbit */
1227 newval = (lcntval + PTHRW_INC) & PTH_RWLOCK_RESET_RBIT;
1228
1229 newsval = rw_seq;
1230 if (is_rws_setseq(rw_seq)) {
1231 newsval &= PTHRW_SW_Reset_BIT_MASK;
1232 newsval |= (newval & PTHRW_COUNT_MASK);
1233 }
1234
1235 oldval64 = (((uint64_t)rw_seq) << 32);
1236 oldval64 |= lcntval;
1237
1238 newval64 = (((uint64_t)newsval) << 32);
1239 newval64 |= newval;
1240
1241 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
1242 goto loop;
1243
1244 /* give writers priority over readers */
1245 PLOCKSTAT_RW_BLOCK(orwlock, READ_LOCK_PLOCKSTAT);
1246
1247 #if _KSYN_TRACE_
1248 if (__pthread_lock_debug != 0)
1249 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lcntval, newval, newsval, 0);
1250 #endif
1251
1252 retry:
1253 updateval = __psynch_rw_rdlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
1254
1255 if (updateval == (uint32_t)-1) {
1256 error = errno;
1257 } else
1258 error = 0;
1259
1260 if (error == EINTR)
1261 goto retry;
1262
1263 if (error == 0) {
1264 rwlock_action_onreturn(orwlock, updateval);
1265 PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
1266 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
1267 return(0);
1268 } else {
1269 PLOCKSTAT_RW_BLOCKED(orwlock, READ_LOCK_PLOCKSTAT, BLOCK_FAIL_PLOCKSTAT);
1270 #if _KSYN_TRACE_
1271 set_enable(1);
1272 #endif /* _KSYN_TRACE_ */
1273 (void)pthread_threadid_np(pthread_self(), &myid);
1274 LIBC_ABORT("rdlock from kernel with unknown error %x with tid %x\n", updateval, (uint32_t)myid);
1275 goto out;
1276 }
1277 /* Not reached */
1278
1279 gotlock:
1280 if (rw_diffgenseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
1281 /* since ucntval may be newer, just redo */
1282 retry_count++;
1283 if (retry_count > 1024) {
1284
1285 #if _KSYN_TRACE_
1286 if (__pthread_lock_debug != 0)
1287 (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY | DBG_FUNC_NONE, (uint32_t)rwlock, 0XEEEEEEEE, lcntval, ucntval, 0);
1288 #endif
1289 error = EAGAIN;
1290 goto out;
1291 } else {
1292 sched_yield();
1293 goto loop;
1294 }
1295 }
1296
1297 /* Need to update L (remove R bit) and S word */
1298 newval = (lcntval + PTHRW_INC) & PTH_RWLOCK_RESET_RBIT;
1299 newsval = (rw_seq + PTHRW_INC);
1300
1301 oldval64 = (((uint64_t)rw_seq) << 32);
1302 oldval64 |= lcntval;
1303 newval64 = (((uint64_t)newsval) << 32);
1304 newval64 |= newval;
1305
1306 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
1307 goto loop;
1308
1309 #if _KSYN_TRACE_
1310 if (__pthread_lock_debug != 0)
1311 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lcntval, newval, 0);
1312 #endif
1313
1314 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
1315 #if _KSYN_TRACE_
1316 if (__pthread_lock_debug != 0)
1317 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, 0, 0, 0);
1318 #endif
1319 return(0);
1320 out:
1321 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
1322 #if _KSYN_TRACE_
1323 if (__pthread_lock_debug != 0)
1324 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
1325 #endif
1326 return(error);
1327 }
1328
1329 int
1330 pthread_rwlock_tryrdlock(pthread_rwlock_t * orwlock)
1331 {
1332 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1333 uint32_t lcntval, ucntval, rw_seq, newval, newsval;
1334 int error = 0, retry_count = 0;
1335 uint64_t oldval64, newval64;
1336 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1337
1338 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1339 LOCK(rwlock->lock);
1340 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1341 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
1342 UNLOCK(rwlock->lock);
1343 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
1344 return(error);
1345 }
1346 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
1347 UNLOCK(rwlock->lock);
1348 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, EINVAL);
1349 return(EINVAL);
1350 }
1351 UNLOCK(rwlock->lock);
1352 }
1353
1354 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1355 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1356 } else {
1357 lcntaddr = rwlock->rw_lcntaddr;
1358 ucntaddr = rwlock->rw_ucntaddr;
1359 seqaddr = rwlock->rw_seqaddr;
1360 }
1361
1362 loop:
1363 lcntval = *lcntaddr;
1364 ucntval = *ucntaddr;
1365 rw_seq = *seqaddr;
1366 #if _KSYN_TRACE_
1367 if (__pthread_lock_debug != 0)
1368 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK | DBG_FUNC_START, (uint32_t)rwlock, lcntval, ucntval, rw_seq, 0);
1369 #endif
1370
1371 /* if l bit is on or u and k bit is clear, acquire lock in userland */
1372 if (can_rwl_readinuser(lcntval))
1373 goto gotlock;
1374
1375 error = EBUSY;
1376 goto out;
1377
1378 gotlock:
1379 if (rw_diffgenseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
1380 /* since ucntval may be newer, just redo */
1381 retry_count++;
1382 if (retry_count > 1024) {
1383
1384 #if _KSYN_TRACE_
1385 if (__pthread_lock_debug != 0)
1386 (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY | DBG_FUNC_NONE, (uint32_t)rwlock, 0XEEEEEEEE, lcntval, ucntval, 0);
1387 #endif
1388 error = EAGAIN;
1389 goto out;
1390 } else {
1391 sched_yield();
1392 goto loop;
1393 }
1394 }
1395
1396 /* Need to update L(remove Rbit ) and S word */
1397 newval = (lcntval + PTHRW_INC) & PTH_RWLOCK_RESET_RBIT;
1398 newsval = (rw_seq + PTHRW_INC);
1399
1400 oldval64 = (((uint64_t)rw_seq) << 32);
1401 oldval64 |= lcntval;
1402 newval64 = (((uint64_t)newsval) << 32);
1403 newval64 |= newval;
1404
1405 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
1406 goto loop;
1407
1408 #if _KSYN_TRACE_
1409 if (__pthread_lock_debug != 0)
1410 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lcntval, newval, 0);
1411 #endif
1412
1413 PLOCKSTAT_RW_ACQUIRE(orwlock, READ_LOCK_PLOCKSTAT);
1414 return(0);
1415
1416 out:
1417 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, error);
1418 #if _KSYN_TRACE_
1419 if (__pthread_lock_debug != 0)
1420 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, error, 0, 0);
1421 #endif
1422 return(error);
1423 }
1424
1425 int
1426 pthread_rwlock_trywrlock(pthread_rwlock_t * orwlock)
1427 {
1428 #if __DARWIN_UNIX03
1429 pthread_t self = pthread_self();
1430 #endif /* __DARWIN_UNIX03 */
1431 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1432 uint32_t lcntval, rw_seq, newval, newsval;
1433 int error = 0, gotlock = 0;
1434 uint64_t oldval64, newval64;
1435 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1436
1437 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1438 LOCK(rwlock->lock);
1439 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1440 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
1441 UNLOCK(rwlock->lock);
1442 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
1443 return(error);
1444 }
1445 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
1446 UNLOCK(rwlock->lock);
1447 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
1448 return(EINVAL);
1449 }
1450 UNLOCK(rwlock->lock);
1451 }
1452
1453 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1454 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1455 } else {
1456 lcntaddr = rwlock->rw_lcntaddr;
1457 ucntaddr = rwlock->rw_ucntaddr;
1458 seqaddr = rwlock->rw_seqaddr;
1459 }
1460
1461 #if _KSYN_TRACE_
1462 if (__pthread_lock_debug != 0)
1463 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
1464 #endif
1465 loop:
1466 lcntval = *lcntaddr;
1467 rw_seq = *seqaddr;
1468
1469 /* can we acquire in userland? */
1470 if ((lcntval & PTH_RWL_RBIT) != 0) {
1471 newval = ((lcntval + PTHRW_INC) & PTHRW_COUNT_MASK) | PTH_RWL_IBIT | PTH_RWL_KBIT| PTH_RWL_EBIT;
1472 newsval = rw_seq + PTHRW_INC;
1473 gotlock = 1;
1474 } else
1475 gotlock = 0;
1476
1477
1478 oldval64 = (((uint64_t)rw_seq) << 32);
1479 oldval64 |= lcntval;
1480
1481 if (gotlock != 0) {
1482 newval64 = (((uint64_t)newsval) << 32);
1483 newval64 |= newval;
1484 } else
1485 newval64 = oldval64;
1486
1487 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE) {
1488 goto loop;
1489 }
1490 if (gotlock == 1) {
1491 #if __DARWIN_UNIX03
1492 rwlock->rw_owner = self;
1493 #endif /* __DARWIN_UNIX03 */
1494 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
1495 #if _KSYN_TRACE_
1496 if (__pthread_lock_debug != 0)
1497 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0, 0, 0, 0);
1498 #endif
1499 return(0);
1500 } else {
1501 #if _KSYN_TRACE_
1502 if (__pthread_lock_debug != 0)
1503 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, EBUSY, 0, 0);
1504 #endif
1505
1506 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EBUSY);
1507 return(EBUSY);
1508 }
1509 }
1510
1511 int
1512 pthread_rwlock_wrlock(pthread_rwlock_t * orwlock)
1513 {
1514 #if __DARWIN_UNIX03
1515 pthread_t self = pthread_self();
1516 #endif /* __DARWIN_UNIX03 */
1517 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1518 uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval;
1519 int error = 0, gotlock = 0;
1520 uint64_t oldval64, newval64;
1521 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1522 uint64_t myid = 0;
1523
1524 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1525 LOCK(rwlock->lock);
1526 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1527 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
1528 UNLOCK(rwlock->lock);
1529 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
1530 return(error);
1531 }
1532 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
1533 UNLOCK(rwlock->lock);
1534 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, EINVAL);
1535 return(EINVAL);
1536 }
1537 UNLOCK(rwlock->lock);
1538 }
1539
1540 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1541 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1542 } else {
1543 lcntaddr = rwlock->rw_lcntaddr;
1544 ucntaddr = rwlock->rw_ucntaddr;
1545 seqaddr = rwlock->rw_seqaddr;
1546 }
1547
1548 #if _KSYN_TRACE_
1549 if (__pthread_lock_debug != 0)
1550 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
1551 #endif
1552 loop:
1553 lcntval = *lcntaddr;
1554 ucntval = *ucntaddr;
1555 rw_seq = *seqaddr;
1556
1557 #if _KSYN_TRACE_
1558 if (__pthread_lock_debug != 0)
1559 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, lcntval, ucntval, rw_seq, 0);
1560 #endif
1561
1562 #if __DARWIN_UNIX03
1563 if (is_rwl_ebit_set(lcntval)) {
1564 if(rwlock->rw_owner == self) {
1565 error = EDEADLK;
1566 goto out;
1567 }
1568 }
1569 #endif /* __DARWIN_UNIX03 */
1570
1571
1572 if ((lcntval & PTH_RWL_RBIT) != 0) {
1573 /* lock is restart state, writer can acquire the lock */
1574 newval = ((lcntval + PTHRW_INC) & PTHRW_COUNT_MASK) | PTH_RWL_IBIT | PTH_RWL_KBIT| PTH_RWL_EBIT;
1575
1576 newsval = rw_seq + PTHRW_INC;
1577 gotlock = 1;
1578
1579 } else {
1580 if (is_rwl_lbit_set(lcntval))
1581 newval = (lcntval + PTHRW_INC)| PTH_RWL_WBIT;
1582 else
1583 newval = (lcntval + PTHRW_INC) | PTH_RWL_KBIT| PTH_RWL_WBIT;
1584
1585 newsval = rw_seq;
1586 if (is_rws_setseq(rw_seq)) {
1587 newsval &= PTHRW_SW_Reset_BIT_MASK;
1588 newsval |= (newval & PTHRW_COUNT_MASK);
1589 }
1590 gotlock = 0;
1591 }
1592
1593 /* update lock seq */
1594 oldval64 = (((uint64_t)rw_seq) << 32);
1595 oldval64 |= lcntval;
1596
1597 newval64 = (((uint64_t)newsval) << 32);
1598 newval64 |= newval;
1599
1600 #if _KSYN_TRACE_
1601 if (__pthread_lock_debug != 0)
1602 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555555, lcntval, newval, 0);
1603 #endif
1604 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
1605 goto loop;
1606
1607 /* lock acquired in userland itself? */
1608 if (gotlock != 0)
1609 goto gotit;
1610
1611 /* unable to acquire in userland, transition to kernel */
1612
1613 PLOCKSTAT_RW_BLOCK(orwlock, WRITE_LOCK_PLOCKSTAT);
1614 retry:
1615 updateval = __psynch_rw_wrlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
1616 if (updateval == (uint32_t)-1) {
1617 error = errno;
1618 } else
1619 error = 0;
1620
1621 if (error == EINTR) {
1622 goto retry;
1623 }
1624
1625 if (error != 0) {
1626 #if _KSYN_TRACE_
1627 set_enable(2);
1628 #endif /* _KSYN_TRACE_ */
1629 (void)pthread_threadid_np(pthread_self(), &myid);
1630 LIBC_ABORT("wrlock from kernel with unknown error %x: tid %x\n", updateval, (uint32_t)myid);
1631 }
1632
1633 #if _KSYN_TRACE_
1634 if (__pthread_lock_debug != 0)
1635 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x33333333, newval, updateval, 0);
1636 #endif
1637 PLOCKSTAT_RW_BLOCKED(orwlock, WRITE_LOCK_PLOCKSTAT, BLOCK_SUCCESS_PLOCKSTAT);
1638 if (error == 0) {
1639 rwlock_action_onreturn(orwlock, updateval);
1640 gotit:
1641 #if __DARWIN_UNIX03
1642 rwlock->rw_owner = self;
1643 #endif /* __DARWIN_UNIX03 */
1644 PLOCKSTAT_RW_ACQUIRE(orwlock, WRITE_LOCK_PLOCKSTAT);
1645 #if _KSYN_TRACE_
1646 if (__pthread_lock_debug != 0)
1647 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
1648 #endif
1649 return(0);
1650 }
1651 #if __DARWIN_UNIX03
1652 out:
1653 #endif /* __DARWIN_UNIX03 */
1654 PLOCKSTAT_RW_ERROR(orwlock, WRITE_LOCK_PLOCKSTAT, error);
1655 #if _KSYN_TRACE_
1656 if (__pthread_lock_debug != 0)
1657 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
1658 #endif
1659 return(error);
1660 }
1661
1662
1663 int
1664 pthread_rwlock_unlock(pthread_rwlock_t * orwlock)
1665 {
1666 npthread_rwlock_t * rwlock = (npthread_rwlock_t *)orwlock;
1667 uint32_t lcntval, ucntval, rw_seq, newval, newsval, updateval, ulval;
1668 int error = 0, wrlock = 0, haswbit = 0, hasubit = 0, hasybit = 0;
1669 uint64_t oldval64, newval64;
1670 volatile uint32_t * lcntaddr, *ucntaddr, *seqaddr;
1671 uint64_t myid = 0;
1672
1673 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
1674 LOCK(rwlock->lock);
1675 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
1676 if ((error = __pthread_rwlock_init(orwlock, NULL)) != 0) {
1677 UNLOCK(rwlock->lock);
1678 PLOCKSTAT_RW_ERROR(orwlock, wrlock, error);
1679 return(error);
1680 }
1681 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG){
1682 UNLOCK(rwlock->lock);
1683 PLOCKSTAT_RW_ERROR(orwlock, wrlock, EINVAL);
1684 return(EINVAL);
1685 }
1686 UNLOCK(rwlock->lock);
1687 }
1688
1689 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
1690 RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr);
1691 } else {
1692 lcntaddr = rwlock->rw_lcntaddr;
1693 ucntaddr = rwlock->rw_ucntaddr;
1694 seqaddr = rwlock->rw_seqaddr;
1695 }
1696
1697 #if _KSYN_TRACE_
1698 if (__pthread_lock_debug != 0)
1699 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_START, (uint32_t)rwlock, 0, 0, 0, 0);
1700 #endif
1701 loop:
1702 lcntval = *lcntaddr;
1703 ucntval = *ucntaddr;
1704 rw_seq = *seqaddr;
1705
1706
1707
1708 #if _KSYN_TRACE_
1709 if (__pthread_lock_debug != 0)
1710 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x51515151, lcntval, ucntval, 0);
1711 if (__pthread_lock_debug != 0)
1712 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x51515151, rw_seq, 0, 0);
1713 #endif
1714 /* check for spurious unlocks */
1715 if ((lcntval & PTH_RWL_RBIT) != 0) {
1716 newval = lcntval ;
1717 newsval = rw_seq;
1718
1719 oldval64 = (((uint64_t)rw_seq) << 32);
1720 oldval64 |= lcntval;
1721
1722 newval64 = (((uint64_t)newsval) << 32);
1723 newval64 |= newval;
1724
1725 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) == TRUE) {
1726 /* spurious unlock, return */
1727 error = EINVAL;
1728 #if _KSYN_TRACE_
1729 if (__pthread_lock_debug != 0)
1730 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x1a1b1c1d, lcntval, ucntval, 0);
1731 #endif
1732 goto succout;
1733 } else
1734 goto loop;
1735 }
1736
1737 if (is_rwl_ebit_set(lcntval)) {
1738 wrlock = 1;
1739 #if __DARWIN_UNIX03
1740 rwlock->rw_owner = (pthread_t)0;
1741 #endif /* __DARWIN_UNIX03 */
1742 }
1743
1744 /* update U */
1745
1746 ulval = (ucntval + PTHRW_INC);
1747
1748 if (OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)ucntaddr) != TRUE)
1749 goto loop;
1750
1751 lp11:
1752 /* just validate the l and S values */
1753 oldval64 = (((uint64_t)rw_seq) << 32);
1754 oldval64 |= lcntval;
1755
1756 newval64 = oldval64;
1757
1758 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE) {
1759 lcntval = *lcntaddr;
1760 rw_seq = *seqaddr;
1761 goto lp11;
1762 }
1763
1764 #if _KSYN_TRACE_
1765 if (__pthread_lock_debug != 0)
1766 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xd1d2d3d4, lcntval, rw_seq, 0);
1767 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xd1d2d3d4, ulval, 0, 0);
1768 #endif
1769
1770 /* last unlock, note U is already updated ? */
1771 if((lcntval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
1772
1773 #if _KSYN_TRACE_
1774 if (__pthread_lock_debug != 0)
1775 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xbbbbbbbb, lcntval, ucntval, 0);
1776 #endif
1777 /* Set L with R and init bits and set S to L */
1778 newval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWLOCK_INIT;
1779 newsval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWS_INIT;
1780
1781 oldval64 = (((uint64_t)rw_seq) << 32);
1782 oldval64 |= lcntval;
1783
1784 newval64 = (((uint64_t)newsval) << 32);
1785 newval64 |= newval;
1786
1787 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE) {
1788 #if _KSYN_TRACE_
1789 if (__pthread_lock_debug != 0)
1790 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xcccccccc, 0, 0, 0);
1791 #endif
1792 lcntval = *lcntaddr;
1793 rw_seq = *seqaddr;
1794 goto lp11;
1795 }
1796 #if _KSYN_TRACE_
1797 if (__pthread_lock_debug != 0)
1798 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0xdddddddd, lcntval, ucntval, 0);
1799 #endif
1800 goto succout;
1801 }
1802
1803 /* if it is not exclusive or no Writer/yield pending, skip */
1804 if ((lcntval & (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_YBIT | PTH_RWL_KBIT)) == 0) {
1805 goto succout;
1806 }
1807
1808 /* kernel transition needed? */
1809 /* U+1 == S? */
1810 if ((ulval + PTHRW_INC) != (rw_seq & PTHRW_COUNT_MASK)) {
1811 if ((lcntval & PTH_RWL_UBIT) != 0) {
1812 /* if U bit is set U + 2 == S ? */
1813 if ((ulval + PTHRW_INC + PTHRW_INC) != (rw_seq & PTHRW_COUNT_MASK))
1814 goto succout;
1815 } else
1816 goto succout;
1817 }
1818
1819 haswbit = lcntval & PTH_RWL_WBIT;
1820 hasubit = lcntval & PTH_RWL_UBIT;
1821 hasybit = lcntval & PTH_RWL_YBIT;
1822
1823 /* reset all bits and set k */
1824 newval = (lcntval & PTHRW_COUNT_MASK) | PTH_RWL_KBIT;
1825 /* set I bit on S word */
1826 newsval = rw_seq | PTH_RWS_IBIT;
1827 if (haswbit != 0)
1828 newsval |= PTH_RWS_WSVBIT;
1829 if (hasubit != 0)
1830 newsval |= PTH_RWS_USVBIT;
1831 if (hasybit != 0)
1832 newsval |= PTH_RWS_YSVBIT;
1833
1834 oldval64 = (((uint64_t)rw_seq) << 32);
1835 oldval64 |= lcntval;
1836
1837 newval64 = (((uint64_t)newsval) << 32);
1838 newval64 |= newval;
1839
1840 if (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE)
1841 goto lp11;
1842
1843 #if _KSYN_TRACE_
1844 if (__pthread_lock_debug != 0)
1845 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555511, 1, ulval, 0);
1846 #endif
1847 updateval = __psynch_rw_unlock(orwlock, lcntval, ulval, newsval, rwlock->rw_flags);
1848 if (updateval == (uint32_t)-1) {
1849 error = errno;
1850 } else
1851 error = 0;
1852
1853 if(error != 0) {
1854
1855 /* not sure what is the scenario */
1856 if(error != EINTR) {
1857 #if _KSYN_TRACE_
1858 set_enable(4);
1859 #endif /* _KSYN_TRACE_ */
1860 (void)pthread_threadid_np(pthread_self(), &myid);
1861 LIBC_ABORT("rwunlock from kernel with unknown error %x: tid %x\n", error, (uint32_t)myid);
1862 goto succout;
1863 }
1864 error = 0;
1865 }
1866
1867 #if _KSYN_TRACE_
1868 if (__pthread_lock_debug != 0)
1869 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_NONE, (uint32_t)rwlock, 0x55555522, 3, lcntval, 0);
1870 #endif
1871
1872 succout:
1873 PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
1874 #if _KSYN_TRACE_
1875 if (__pthread_lock_debug != 0)
1876 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK | DBG_FUNC_END, (uint32_t)rwlock, 0xAAAAAAAA, error, 0, 0);
1877 #endif
1878 return(0);
1879 }
1880