2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
52 * POSIX Pthread Library
53 * -- Read Write Lock support
58 #include "pthread_internals.h"
59 #include <stdio.h> /* For printf(). */
61 extern int __unix_conforming
;
64 #include "plockstat.h"
65 #else /* !PLOCKSTAT */
66 #define PLOCKSTAT_RW_ERROR(x, y, z)
67 #define PLOCKSTAT_RW_BLOCK(x, y)
68 #define PLOCKSTAT_RW_BLOCKED(x, y, z)
69 #define PLOCKSTAT_RW_ACQUIRE(x, y)
70 #define PLOCKSTAT_RW_RELEASE(x, y)
71 #endif /* PLOCKSTAT */
73 #define READ_LOCK_PLOCKSTAT 0
74 #define WRITE_LOCK_PLOCKSTAT 1
76 #define BLOCK_FAIL_PLOCKSTAT 0
77 #define BLOCK_SUCCESS_PLOCKSTAT 1
79 /* maximum number of times a read lock may be obtained */
80 #define MAX_READ_LOCKS (INT_MAX - 1)
82 #if defined(__i386__) || defined(__x86_64__)
84 #ifndef BUILDING_VARIANT /* [ */
86 #else /* BUILDING_VARIANT */
87 extern int usenew_impl
;
88 #endif /* BUILDING_VARIANT */
92 #define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
94 if (rwlock->misalign != 0) { \
95 lseqaddr = &rwlock->rw_seq[1]; \
96 wcaddr = &rwlock->rw_seq[2]; \
97 useqaddr = &rwlock->rw_seq[3]; \
99 lseqaddr = &rwlock->rw_seq[0]; \
100 wcaddr = &rwlock->rw_seq[1]; \
101 useqaddr = &rwlock->rw_seq[2]; \
105 #define RWLOCK_GETSEQ_ADDR(rwlock, lseqaddr, useqaddr, wcaddr) \
107 if (rwlock->misalign != 0) { \
108 lseqaddr = &rwlock->rw_seq[0]; \
109 wcaddr = &rwlock->rw_seq[1]; \
110 useqaddr = &rwlock->rw_seq[2]; \
112 lseqaddr = &rwlock->rw_seq[1]; \
113 wcaddr = &rwlock->rw_seq[2]; \
114 useqaddr = &rwlock->rw_seq[3]; \
117 #endif /* __LP64__ */
119 int _new_pthread_rwlock_destroy(pthread_rwlock_t
*rwlock
);
120 int _new_pthread_rwlock_init(pthread_rwlock_t
*rwlock
, const pthread_rwlockattr_t
*attr
);
121 int _new_pthread_rwlock_rdlock(pthread_rwlock_t
*rwlock
);
122 int _new_pthread_rwlock_tryrdlock(pthread_rwlock_t
*rwlock
);
123 int _new_pthread_rwlock_longrdlock_np(pthread_rwlock_t
*rwlock
);
124 int _new_pthread_rwlock_trywrlock(pthread_rwlock_t
*rwlock
);
125 int _new_pthread_rwlock_wrlock(pthread_rwlock_t
*rwlock
);
126 int _new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t
*rwlock
);
127 int _new_pthread_rwlock_unlock(pthread_rwlock_t
*rwlock
);
128 int _new_pthread_rwlock_downgrade_np(pthread_rwlock_t
*rwlock
);
129 int _new_pthread_rwlock_upgrade_np(pthread_rwlock_t
*rwlock
);
131 #define _KSYN_TRACE_ 0
134 /* The Function qualifiers */
135 #define DBG_FUNC_START 1
136 #define DBG_FUNC_END 2
137 #define DBG_FUNC_NONE 0
139 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
141 #define _KSYN_TRACE_RW_RDLOCK 0x9000080
142 #define _KSYN_TRACE_RW_WRLOCK 0x9000084
143 #define _KSYN_TRACE_RW_UNLOCK 0x9000088
144 #define _KSYN_TRACE_RW_UNACT1 0x900808c
145 #define _KSYN_TRACE_RW_UNACT2 0x9008090
146 #define _KSYN_TRACE_RW_UNACTK 0x9008094
147 #define _KSYN_TRACE_RW_UNACTE 0x9008098
148 #endif /* _KSYN_TRACE_ */
149 #endif /* __i386__ || __x86_64__ */
151 #ifndef BUILDING_VARIANT /* [ */
153 #if defined(__i386__) || defined(__x86_64__)
154 static int rwlock_unlock_action_onread(pthread_rwlock_t
* rwlock
, uint32_t updateval
);
155 static int rwlock_unlock_action1(pthread_rwlock_t
* rwlock
, uint32_t lgenval
, uint32_t updateval
);
156 static int rwlock_unlock_action2(pthread_rwlock_t
* rwlock
, uint32_t lgenval
, uint32_t updateval
);
157 static uint32_t modbits(uint32_t lgenval
, uint32_t updateval
);
158 static int rwlock_unlock_action_k(pthread_rwlock_t
* rwlock
, uint32_t lgenval
, uint32_t updateval
);
159 static int rwlock_exclusive_lockreturn(pthread_rwlock_t
* rwlock
, uint32_t updateval
);
160 static int rw_diffgenseq(uint32_t x
, uint32_t y
);
161 #endif /* __i386__ || __x86_64__ */
165 pthread_rwlockattr_init(pthread_rwlockattr_t
*attr
)
167 attr
->sig
= _PTHREAD_RWLOCK_ATTR_SIG
;
168 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
173 pthread_rwlockattr_destroy(pthread_rwlockattr_t
*attr
)
175 attr
->sig
= _PTHREAD_NO_SIG
; /* Uninitialized */
181 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t
*attr
,
184 if (attr
->sig
== _PTHREAD_RWLOCK_ATTR_SIG
)
186 *pshared
= (int)attr
->pshared
;
190 return (EINVAL
); /* Not an initialized 'attribute' structure */
196 pthread_rwlockattr_setpshared(pthread_rwlockattr_t
* attr
, int pshared
)
198 if (attr
->sig
== _PTHREAD_RWLOCK_ATTR_SIG
)
201 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) || (pshared
== PTHREAD_PROCESS_SHARED
))
202 #else /* __DARWIN_UNIX03 */
203 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
204 #endif /* __DARWIN_UNIX03 */
206 attr
->pshared
= pshared
;
210 return (EINVAL
); /* Invalid parameter */
214 return (EINVAL
); /* Not an initialized 'attribute' structure */
219 #if defined(__i386__) || defined(__x86_64__) /* [ */
221 _new_pthread_rwlock_destroy(pthread_rwlock_t
*orwlock
)
223 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
225 uint32_t rw_lseqcnt
, rw_useqcnt
;
226 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
227 #endif /* __DARWIN_UNIX03 */
229 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
233 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
234 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
236 lseqaddr
= rwlock
->rw_lseqaddr
;
237 useqaddr
= rwlock
->rw_useqaddr
;
238 wcaddr
= rwlock
->rw_wcaddr
;
241 rw_lseqcnt
= *lseqaddr
;
242 rw_useqcnt
= *useqaddr
;
244 if((rw_lseqcnt
& PTHRW_COUNT_MASK
) != rw_useqcnt
)
247 #endif /* __DARWIN_UNIX03 */
248 //bzero(rwlock, sizeof(npthread_rwlock_t));
249 rwlock
->sig
= _PTHREAD_NO_SIG
;
256 _new_pthread_rwlock_init(pthread_rwlock_t
* orwlock
, const pthread_rwlockattr_t
*attr
)
258 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
260 uint32_t rw_lseqcnt
, rw_useqcnt
;
261 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
262 #endif /* __DARWIN_UNIX03 */
265 if (attr
&& (attr
->sig
!= _PTHREAD_RWLOCK_ATTR_SIG
)) {
269 /* if already inited check whether it is in use, then return EBUSY */
270 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG
) {
271 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
272 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
274 lseqaddr
= rwlock
->rw_lseqaddr
;
275 useqaddr
= rwlock
->rw_useqaddr
;
276 wcaddr
= rwlock
->rw_wcaddr
;
278 rw_lseqcnt
= *lseqaddr
;
279 rw_useqcnt
= *useqaddr
;
281 if ((rw_lseqcnt
& PTHRW_COUNT_MASK
) != rw_useqcnt
)
285 #endif /* __DARWIN_UNIX03 */
287 /* initialize the lock */
288 bzero(rwlock
, sizeof(pthread_rwlock_t
));
290 if ((attr
!= NULL
) && (attr
->pshared
== PTHREAD_PROCESS_SHARED
)) {
291 rwlock
->pshared
= PTHREAD_PROCESS_SHARED
;
292 rwlock
->rw_flags
= PTHRW_KERN_PROCESS_SHARED
;
295 rwlock
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
296 rwlock
->rw_flags
= PTHRW_KERN_PROCESS_PRIVATE
;
299 if (((uintptr_t)rwlock
& 0x07) != 0) {
300 rwlock
->misalign
= 1;
301 #if defined(__LP64__)
302 rwlock
->rw_lseqaddr
= &rwlock
->rw_seq
[1];
303 rwlock
->rw_wcaddr
= &rwlock
->rw_seq
[2];
304 rwlock
->rw_useqaddr
= &rwlock
->rw_seq
[3];
305 rwlock
->rw_seq
[1]= PTHRW_RW_INIT
;
307 rwlock
->rw_lseqaddr
= &rwlock
->rw_seq
[0];
308 rwlock
->rw_wcaddr
= &rwlock
->rw_seq
[1];
309 rwlock
->rw_useqaddr
= &rwlock
->rw_seq
[2];
310 rwlock
->rw_seq
[0]= PTHRW_RW_INIT
;
311 #endif /* __LP64__ */
314 rwlock
->misalign
= 0;
315 #if defined(__LP64__)
316 rwlock
->rw_lseqaddr
= &rwlock
->rw_seq
[0];
317 rwlock
->rw_wcaddr
= &rwlock
->rw_seq
[1];
318 rwlock
->rw_useqaddr
= &rwlock
->rw_seq
[2];
319 rwlock
->rw_seq
[0]= PTHRW_RW_INIT
;
321 rwlock
->rw_lseqaddr
= &rwlock
->rw_seq
[1];
322 rwlock
->rw_wcaddr
= &rwlock
->rw_seq
[2];
323 rwlock
->rw_useqaddr
= &rwlock
->rw_seq
[3];
324 rwlock
->rw_seq
[1]= PTHRW_RW_INIT
;
325 #endif /* __LP64__ */
328 rwlock
->sig
= _PTHREAD_RWLOCK_SIG
;
334 _new_pthread_rwlock_rdlock(pthread_rwlock_t
* orwlock
)
338 #endif /* __DARWIN_UNIX03 */
339 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
340 uint32_t lgenval
, ugenval
, rw_wc
, newval
, updateval
;
342 uint64_t oldval64
, newval64
;
343 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
345 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
346 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
347 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
348 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
352 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
357 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
358 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
360 lseqaddr
= rwlock
->rw_lseqaddr
;
361 useqaddr
= rwlock
->rw_useqaddr
;
362 wcaddr
= rwlock
->rw_wcaddr
;
369 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lgenval
, newval
, rw_wc
, 0);
372 if (is_rw_lbit_set(lgenval
))
374 if(is_rw_ewubit_clear(lgenval
))
378 if (is_rw_ebit_set(lgenval
)) {
379 self
= pthread_self();
380 if(rwlock
->rw_owner
== self
) {
385 #endif /* __DARWIN_UNIX03 */
387 /* mean Lbit is set and R bit not set; block in kernel */
388 newval
= (lgenval
+ PTHRW_INC
);
390 oldval64
= (((uint64_t)rw_wc
) << 32);
393 newval64
= (((uint64_t)(rw_wc
+ 1)) << 32);
396 if (OSAtomicCompareAndSwap64(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) != TRUE
)
399 /* give writers priority over readers */
400 PLOCKSTAT_RW_BLOCK(orwlock
, READ_LOCK_PLOCKSTAT
);
403 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, lgenval
, newval
, rw_wc
+1, 0);
407 updateval
= __psynch_rw_rdlock(orwlock
, (newval
& ~PTHRW_RW_INIT
), ugenval
, rw_wc
, rwlock
->rw_flags
);
409 if (updateval
== (uint32_t)-1) {
417 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
422 if ((updateval
& PTHRW_RW_HUNLOCK
) != 0) {
423 ret
= rwlock_unlock_action_onread(orwlock
, (updateval
& ~PTHRW_RW_HUNLOCK
));
425 LIBC_ABORT("rdlock_unlock handling failed");
428 PLOCKSTAT_RW_BLOCKED(orwlock
, READ_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
429 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
432 PLOCKSTAT_RW_BLOCKED(orwlock
, READ_LOCK_PLOCKSTAT
, BLOCK_FAIL_PLOCKSTAT
);
438 /* check for max readers */
440 if (rw_diffgenseq(lgenval
, ugenval
) >= PTHRW_MAX_READERS
) {
445 newval
= (lgenval
+ PTHRW_INC
);
448 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, lgenval
, newval
, 0);
451 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
452 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
454 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, 0, 0, 0);
460 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
462 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
469 _new_pthread_rwlock_tryrdlock(pthread_rwlock_t
* orwlock
)
471 uint32_t lgenval
, newval
, ugenval
;
473 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
474 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
476 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
477 /* check for static initialization */
478 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
479 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
480 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
484 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
489 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
490 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
492 lseqaddr
= rwlock
->rw_lseqaddr
;
493 useqaddr
= rwlock
->rw_useqaddr
;
494 wcaddr
= rwlock
->rw_wcaddr
;
499 if (is_rw_lbit_set(lgenval
))
501 if (is_rw_ewubit_clear(lgenval
))
510 if (rw_diffgenseq(lgenval
, ugenval
) >= PTHRW_MAX_READERS
) {
515 newval
= (lgenval
+ PTHRW_INC
);
516 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
517 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
522 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
527 /*****************************************************************************/
528 /* TBD need to add towards MAX_READERS */
530 _new_pthread_rwlock_longrdlock_np(pthread_rwlock_t
* orwlock
)
533 uint32_t lgenval
, ugenval
, rw_wc
, newval
, updateval
;
535 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
536 uint64_t oldval64
, newval64
;
537 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
539 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
540 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
541 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
542 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
546 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
551 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
552 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
554 lseqaddr
= rwlock
->rw_lseqaddr
;
555 useqaddr
= rwlock
->rw_useqaddr
;
556 wcaddr
= rwlock
->rw_wcaddr
;
565 if (is_rw_ewuybit_clear(lgenval
))
568 /* if w bit is set ensure there is no deadlock */
569 if (is_rw_ebit_set(lgenval
)) {
570 self
= pthread_self();
571 if(rwlock
->rw_owner
== self
) {
577 newval
= (lgenval
+ PTHRW_INC
);
578 /* update lock seq and block in kernel */
580 oldval64
= (((uint64_t)rw_wc
) << 32);
583 newval64
= (((uint64_t)(rw_wc
+ 1)) << 32);
586 if (OSAtomicCompareAndSwap64(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) != TRUE
)
589 updateval
= __psynch_rw_longrdlock(orwlock
, newval
, ugenval
, (rw_wc
+1), rwlock
->rw_flags
);
590 if (updateval
== (uint32_t)-1) {
598 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
601 if ((updateval
& PTHRW_RW_HUNLOCK
) != 0) {
602 ret
= rwlock_unlock_action_onread(orwlock
, (updateval
& ~PTHRW_RW_HUNLOCK
));
604 LIBC_ABORT("rdlock_unlock handling failed");
609 while (error
== FALSE
) {
611 newval
= lgenval
| PTHRW_LBIT
;
612 error
= OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
);
621 newval
= ((lgenval
+ PTHRW_INC
)| PTHRW_LBIT
);
622 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) != TRUE
)
626 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
629 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
632 /**************************************************************/
636 _new_pthread_rwlock_trywrlock(pthread_rwlock_t
* orwlock
)
639 uint32_t lgenval
, newval
;
641 pthread_t self
= pthread_self();
642 #endif /* __DARWIN_UNIX03 */
643 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
644 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
646 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
647 /* check for static initialization */
648 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
649 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
650 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
654 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
659 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
660 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
662 lseqaddr
= rwlock
->rw_lseqaddr
;
663 useqaddr
= rwlock
->rw_useqaddr
;
664 wcaddr
= rwlock
->rw_wcaddr
;
667 lgenval
= PTHRW_RW_INIT
;
668 newval
= PTHRW_RW_INIT
| PTHRW_INC
| PTHRW_EBIT
;
669 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
671 rwlock
->rw_owner
= self
;
672 #endif /* __DARWIN_UNIX03 */
673 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
676 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EBUSY
);
681 _new_pthread_rwlock_wrlock(pthread_rwlock_t
* orwlock
)
683 uint32_t lgenval
, newval
, ugenval
, updateval
, rw_wc
;
686 pthread_t self
= pthread_self();
687 #endif /* __DARWIN_UNIX03 */
688 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
689 uint64_t oldval64
, newval64
;
690 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
692 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
693 /* check for static initialization */
694 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
695 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
696 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
700 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
706 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
707 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
709 lseqaddr
= rwlock
->rw_lseqaddr
;
710 useqaddr
= rwlock
->rw_useqaddr
;
711 wcaddr
= rwlock
->rw_wcaddr
;
715 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, 0, 0, 0, 0);
723 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, lgenval
, ugenval
, rw_wc
, 0);
726 if (is_rw_ebit_set(lgenval
)) {
727 if(rwlock
->rw_owner
== self
) {
732 #endif /* __DARWIN_UNIX03 */
734 if (lgenval
== PTHRW_RW_INIT
) {
735 newval
= ( PTHRW_RW_INIT
| PTHRW_INC
| PTHRW_EBIT
);
736 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
741 newval
= (lgenval
+ PTHRW_INC
) | PTHRW_WBIT
| PTHRW_SHADOW_W
;
743 /* update lock seq and block in kernel */
744 oldval64
= (((uint64_t)rw_wc
) << 32);
747 newval64
= (((uint64_t)(rw_wc
+ 1)) << 32);
751 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, lgenval
, newval
, 0);
753 if (OSAtomicCompareAndSwap64(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) != TRUE
)
757 PLOCKSTAT_RW_BLOCK(orwlock
, WRITE_LOCK_PLOCKSTAT
);
759 updateval
= __psynch_rw_wrlock(orwlock
, newval
, ugenval
, (rw_wc
+1), rwlock
->rw_flags
);
760 if (updateval
== (uint32_t)-1) {
765 if (error
== EINTR
) {
770 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x33333333, newval
, updateval
, 0);
772 PLOCKSTAT_RW_BLOCKED(orwlock
, WRITE_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
774 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
778 if (is_rw_ebit_clear(updateval
)) {
779 /* kernel cannot wakeup without granting E bit */
783 error
= rwlock_exclusive_lockreturn(orwlock
, updateval
);
787 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
791 rwlock
->rw_owner
= self
;
792 #endif /* __DARWIN_UNIX03 */
793 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
795 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
800 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
802 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
809 /*****************************************************************************/
811 _new_pthread_rwlock_yieldwrlock_np(pthread_rwlock_t
* orwlock
)
813 uint32_t lgenval
, newval
, ugenval
, updateval
, rw_wc
;
816 pthread_t self
= pthread_self();
817 #endif /* __DARWIN_UNIX03 */
818 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
819 uint64_t oldval64
, newval64
;
820 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
822 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
823 /* check for static initialization */
824 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
825 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
826 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
830 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
836 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
837 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
839 lseqaddr
= rwlock
->rw_lseqaddr
;
840 useqaddr
= rwlock
->rw_useqaddr
;
841 wcaddr
= rwlock
->rw_wcaddr
;
849 if (is_rw_ebit_set(lgenval
)) {
850 if (rwlock
->rw_owner
== self
) {
855 #endif /* __DARWIN_UNIX03 */
857 if (lgenval
== PTHRW_RW_INIT
) {
858 newval
= PTHRW_RW_INIT
| PTHRW_INC
| PTHRW_EBIT
;
859 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
864 newval
= (lgenval
+ PTHRW_INC
);
865 if ((lgenval
& PTHRW_WBIT
) == 0)
866 newval
|= PTHRW_YBIT
;
868 oldval64
= (((uint64_t)rw_wc
) << 32);
871 newval64
= (((uint64_t)(rw_wc
+ 1)) << 32);
874 if (OSAtomicCompareAndSwap64(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) != TRUE
)
875 PLOCKSTAT_RW_BLOCK(orwlock
, WRITE_LOCK_PLOCKSTAT
);
877 updateval
= __psynch_rw_yieldwrlock(orwlock
, newval
, ugenval
, (rw_wc
+1), rwlock
->rw_flags
);
878 if (updateval
== (uint32_t)-1) {
887 PLOCKSTAT_RW_BLOCKED(orwlock
, WRITE_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
889 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
893 if (is_rw_ebit_clear(updateval
)) {
894 /* kernel cannot wakeup without granting E bit */
898 error
= rwlock_exclusive_lockreturn(orwlock
, updateval
);
902 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
906 rwlock
->rw_owner
= self
;
907 #endif /* __DARWIN_UNIX03 */
908 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
911 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
915 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
918 /**************************************************************/
922 _new_pthread_rwlock_unlock(pthread_rwlock_t
* orwlock
)
924 uint32_t lgenval
, ugenval
, rw_wc
, newval
, nlval
, ulval
;
926 int wrlock
= 0, kern_trans
;
927 uint32_t updateval
, bits
, newbits
;
928 uint32_t isupgrade
= 0;
929 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
930 int retry_count
= 0, retry_count1
= 0;
931 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
932 pthread_t self
= NULL
;
933 uint64_t threadid
= 0;
934 int ubitchanged
= 0, initbitset
= 0, num
;
936 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
937 /* check for static initialization */
938 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
939 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
940 PLOCKSTAT_RW_ERROR(orwlock
, wrlock
, error
);
944 PLOCKSTAT_RW_ERROR(orwlock
, wrlock
, EINVAL
);
949 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
950 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
952 lseqaddr
= rwlock
->rw_lseqaddr
;
953 useqaddr
= rwlock
->rw_useqaddr
;
954 wcaddr
= rwlock
->rw_wcaddr
;
958 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, 0, 0, 0, 0);
967 if ((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
& PTHRW_COUNT_MASK
)) {
970 if (retry_count
< 1024)
978 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, lgenval
, ugenval
, 0);
979 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, rw_wc
, 0, 0);
981 if (is_rw_ebit_set(lgenval
)) {
984 rwlock
->rw_owner
= (pthread_t
)0;
985 #endif /* __DARWIN_UNIX03 */
989 if((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
+ PTHRW_INC
)) {
990 if (OSAtomicCompareAndSwap32(ugenval
, 0, (volatile int32_t *)useqaddr
) != TRUE
) {
993 if (OSAtomicCompareAndSwap32(lgenval
, PTHRW_RW_INIT
, (volatile int32_t *)lseqaddr
) != TRUE
) {
994 if (OSAtomicCompareAndSwap32(0, ugenval
, (volatile int32_t *)useqaddr
) != TRUE
) {
997 nlval
= ugenval
+ulval
;
998 if (OSAtomicCompareAndSwap32(ulval
, nlval
, (volatile int32_t *)useqaddr
) != TRUE
)
1008 /* do we need kernel trans? */
1011 nlval
= lgenval
& PTHRW_COUNT_MASK
;
1012 if (ubitchanged
== 0)
1013 ulval
= (ugenval
+ PTHRW_INC
) & PTHRW_COUNT_MASK
;
1015 ulval
= ugenval
& PTHRW_COUNT_MASK
;
1017 num
= rw_diffgenseq(nlval
, ulval
);
1018 kern_trans
= ( num
== (rw_wc
<< PTHRW_COUNT_SHIFT
));
1019 /* if three more waiters than needed for kernel tras*/
1020 if ((ubitchanged
==0) && (kern_trans
== 0) && (num
< (rw_wc
<< PTHRW_COUNT_SHIFT
))) {
1023 if (retry_count1
< 1024)
1028 if (ubitchanged
== 0) {
1029 if (OSAtomicCompareAndSwap32(ugenval
, ugenval
+PTHRW_INC
, (volatile int32_t *)useqaddr
) != TRUE
)
1035 if (kern_trans
== 0) {
1040 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 1, ugenval
+PTHRW_INC
, 0);
1043 bits
= lgenval
& PTHRW_BIT_MASK
;
1045 /* if this is first unlock to kernel, notify kernel of init status */
1046 if ((bits
& PTHRW_RW_INIT
) != 0) {
1047 /* reset the initbit if present */
1048 newbits
&= ~PTHRW_RW_INIT
;
1049 initbitset
= PTHRW_RW_INIT
;
1051 if (((bits
& PTHRW_EBIT
) != 0) && ((bits
& PTHRW_WBIT
) == 0)) {
1052 /* reset E bit is no U bit is set */
1053 newbits
&= ~PTHRW_EBIT
;
1055 /* clear shadow bit, as W is going to be sent to kernel */
1056 if ((bits
& PTHRW_WBIT
) != 0) {
1057 newbits
&= ~PTHRW_SHADOW_W
;
1061 if (bits
& PTHRW_LBIT
)
1062 newbits
&= ~PTHRW_LBIT
;
1063 if (bits
& PTHRW_UBIT
) {
1064 /* reset U and set E bit */
1065 newbits
&= ~PTHRW_LBIT
;
1066 newbits
|= PTHRW_EBIT
;
1067 isupgrade
= PTHRW_UBIT
;
1070 /* updates bits on the L */
1071 newval
= (lgenval
& PTHRW_COUNT_MASK
) | newbits
;
1072 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) != TRUE
) {
1073 /* reread the value */
1074 lgenval
= *lseqaddr
;
1075 ugenval
= *useqaddr
;
1077 /* since lgen changed check for trans again */
1082 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 2, newval
, 0);
1085 /* send upgrade bit to kernel */
1086 newval
|= (isupgrade
| initbitset
);
1087 updateval
= __psynch_rw_unlock(orwlock
, newval
, ugenval
+PTHRW_INC
, rw_wc
, rwlock
->rw_flags
);
1088 if (updateval
== (uint32_t)-1) {
1094 /* not sure what is the scenario */
1100 * If the unlock is spurious return. Also if the
1101 * exclusive lock is being granted, let that thread
1102 * manage the status bits, otherwise stale bits exclusive
1103 * bit can be set, if that thread had already unlocked.
1105 if ((updateval
& (PTHRW_RW_SPURIOUS
| PTHRW_EBIT
)) != 0) {
1110 lgenval
= *lseqaddr
;
1114 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 3, lgenval
, 0);
1116 /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
1117 if ((lgenval
& PTHRW_COUNT_MASK
) == (updateval
& PTHRW_COUNT_MASK
)) {
1118 if (OSAtomicCompareAndSwap32(lgenval
, updateval
, (volatile int32_t *)lseqaddr
) != TRUE
)
1123 /* state bits are same? */
1124 if ((lgenval
& PTHRW_BIT_MASK
) == (updateval
& PTHRW_BIT_MASK
)) {
1129 newval
= ((lgenval
& PTHRW_UN_BIT_MASK
) << PTHRW_COUNT_SHIFT
) | (updateval
& PTHRW_BIT_MASK
);
1132 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 4, newval
, 0);
1134 /* high bits are state on the lock; lowbits are one kernel need to set */
1137 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
)) : {
1138 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1141 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
)) : {
1142 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1145 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1146 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1149 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
)) : {
1150 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1153 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1154 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1157 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_EBIT
)) : {
1158 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1161 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_LBIT
)) : {
1162 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1165 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1166 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1169 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1170 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1177 case ((PTHRW_LBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1178 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1183 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
)) : {
1184 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1187 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1188 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1191 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
)) : {
1192 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1195 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1196 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1200 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1201 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1204 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1205 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1211 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
)) : {
1212 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1215 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1216 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1219 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
)) : {
1220 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1223 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1224 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1228 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1229 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1232 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1233 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1239 case ((PTHRW_EBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1240 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1245 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
)) : {
1246 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1249 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1250 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1253 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1254 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1257 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_EBIT
)) : {
1258 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1261 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_LBIT
)) : {
1262 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1265 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1266 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1269 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1270 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1275 case (((PTHRW_WBIT
| PTHRW_LBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1276 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1279 case (((PTHRW_WBIT
| PTHRW_LBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_LBIT
)) : {
1280 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1283 case (((PTHRW_WBIT
| PTHRW_LBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1284 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1289 /* illegal states */
1290 self
= pthread_self();
1291 threadid
= self
->thread_id
;
1293 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 6, lgenval
, 0);
1294 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 7, updateval
, 0);
1296 LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval
, lgenval
, updateval
, (uint32_t)threadid
);
1303 PLOCKSTAT_RW_RELEASE(orwlock
, wrlock
);
1305 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
1309 PLOCKSTAT_RW_ERROR(orwlock
, wrlock
, error
);
1311 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
1317 /*****************************************************************************/
1319 _new_pthread_rwlock_downgrade_np(pthread_rwlock_t
* orwlock
)
1321 uint32_t lgenval
, newval
, ugenval
, rw_wc
;
1323 pthread_t self
= pthread_self();
1324 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1325 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1328 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1329 /* check for static initialization */
1330 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1331 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1338 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1339 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1341 lseqaddr
= rwlock
->rw_lseqaddr
;
1342 useqaddr
= rwlock
->rw_useqaddr
;
1343 wcaddr
= rwlock
->rw_wcaddr
;
1347 lgenval
= *lseqaddr
;
1348 ugenval
= *useqaddr
;
1351 if ((is_rw_ebit_set(lgenval
)) && (rwlock
->rw_owner
!= self
)) {
1355 if ((lgenval
& PTHRW_COUNT_MASK
) != ugenval
) {
1357 newval
= lgenval
& ~PTHRW_EBIT
;
1359 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
1361 rwlock
->rw_owner
= 0;
1362 #endif /* __DARWIN_UNIX03 */
1364 error
= __psynch_rw_downgrade(orwlock
, newval
, ugenval
, rw_wc
, rwlock
->rw_flags
);
1377 _new_pthread_rwlock_upgrade_np(pthread_rwlock_t
* orwlock
)
1379 uint32_t lgenval
, newval
, ugenval
, ulval
, updateval
, rw_wc
;
1380 int error
= 0, kern_trans
;
1381 pthread_t self
= pthread_self();
1382 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1383 uint64_t oldval64
, newval64
;
1384 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1386 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1387 /* check for static initialization */
1388 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1389 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1396 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1397 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1399 lseqaddr
= rwlock
->rw_lseqaddr
;
1400 useqaddr
= rwlock
->rw_useqaddr
;
1401 wcaddr
= rwlock
->rw_wcaddr
;
1404 lgenval
= *lseqaddr
;
1405 ugenval
= *useqaddr
;
1408 if (is_rw_uebit_set(lgenval
)) {
1413 if ((lgenval
& PTHRW_COUNT_MASK
) == ugenval
)
1416 if (lgenval
> ugenval
)
1417 ulval
= (lgenval
& PTHRW_COUNT_MASK
) - (ugenval
& PTHRW_COUNT_MASK
);
1419 ulval
= (ugenval
& PTHRW_COUNT_MASK
) - (lgenval
& PTHRW_COUNT_MASK
);
1422 newval
= lgenval
| PTHRW_UBIT
;
1426 if (ulval
== ((rw_wc
- 1) << PTHRW_COUNT_SHIFT
))
1428 } else if (ulval
== 1)
1431 if (kern_trans
== 0) {
1432 newval
= ((lgenval
| PTHRW_EBIT
) & ~PTHRW_LBIT
);
1434 newval
= lgenval
| PTHRW_UBIT
;
1436 if (kern_trans
== 0) {
1437 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) != TRUE
)
1441 newval
= (lgenval
+ PTHRW_INC
);
1443 oldval64
= (((uint64_t)rw_wc
) << 32);
1444 oldval64
|= lgenval
;
1446 newval64
= (((uint64_t)(rw_wc
+ 1)) << 32);
1449 if (OSAtomicCompareAndSwap64(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) != TRUE
)
1451 /* kern_trans == 1 */
1453 updateval
= __psynch_rw_upgrade(orwlock
, newval
, ugenval
, rw_wc
+1, rwlock
->rw_flags
);
1454 if (updateval
== (uint32_t)-1) {
1463 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
1467 if (is_rw_ebit_set(updateval
)) {
1468 /* kernel cannot wakeup without granting E bit */
1472 error
= rwlock_exclusive_lockreturn(orwlock
, updateval
);
1473 if (error
== EAGAIN
)
1476 OSAtomicDecrement32((volatile int32_t *)wcaddr
);
1480 rwlock
->rw_owner
= self
;
1481 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
1490 pthread_rwlock_tryupgrade_np(pthread_rwlock_t
*orwlock
)
1492 pthread_t self
= pthread_self();
1493 uint32_t lgenval
, newval
, ugenval
, ulval
, rw_wc
;
1494 int error
= 0, kern_trans
;
1495 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1496 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1498 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1499 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1500 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1507 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1508 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1510 lseqaddr
= rwlock
->rw_lseqaddr
;
1511 useqaddr
= rwlock
->rw_useqaddr
;
1512 wcaddr
= rwlock
->rw_wcaddr
;
1516 lgenval
= *lseqaddr
;
1517 ugenval
= *useqaddr
;
1520 if (is_rw_uebit_set(lgenval
)) {
1524 if ((lgenval
& PTHRW_COUNT_MASK
) == ugenval
)
1527 if (lgenval
> ugenval
)
1528 ulval
= (lgenval
& PTHRW_COUNT_MASK
) - (ugenval
& PTHRW_COUNT_MASK
);
1530 ulval
= (ugenval
& PTHRW_COUNT_MASK
) - (lgenval
& PTHRW_COUNT_MASK
);
1533 newval
= lgenval
| PTHRW_UBIT
;
1537 /* there is only one reader thread */
1538 if (ulval
== (rw_wc
- 1))
1540 } else if (ulval
== 1)
1543 if (kern_trans
== 0) {
1544 newval
= (lgenval
| PTHRW_EBIT
) & ~PTHRW_LBIT
;
1545 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) != TRUE
)
1548 rwlock
->rw_owner
= self
;
1549 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
1555 /* Returns true if the rwlock is held for reading by any thread or held for writing by the current thread */
1557 pthread_rwlock_held_np(pthread_rwlock_t
* orwlock
)
1559 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1560 uint32_t lgenval
, ugenval
;
1562 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1564 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1565 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1566 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1574 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1575 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1577 lseqaddr
= rwlock
->rw_lseqaddr
;
1578 useqaddr
= rwlock
->rw_useqaddr
;
1579 wcaddr
= rwlock
->rw_wcaddr
;
1582 lgenval
= *lseqaddr
;
1583 ugenval
= *useqaddr
;
1585 if ((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
& PTHRW_COUNT_MASK
))
1591 /* Returns true if the rwlock is held for reading by any thread */
1593 pthread_rwlock_rdheld_np(pthread_rwlock_t
* orwlock
)
1595 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1598 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1600 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1601 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1602 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1610 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1611 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1613 lseqaddr
= rwlock
->rw_lseqaddr
;
1614 useqaddr
= rwlock
->rw_useqaddr
;
1615 wcaddr
= rwlock
->rw_wcaddr
;
1618 lgenval
= *lseqaddr
;
1620 if (is_rw_ebit_set(lgenval
)) {
1626 /* Returns true if the rwlock is held for writing by the current thread */
1628 pthread_rwlock_wrheld_np(pthread_rwlock_t
* orwlock
)
1630 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1634 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1636 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1637 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1638 if ((error
= pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1646 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1647 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1649 lseqaddr
= rwlock
->rw_lseqaddr
;
1650 useqaddr
= rwlock
->rw_useqaddr
;
1651 wcaddr
= rwlock
->rw_wcaddr
;
1654 self
= pthread_self();
1656 lgenval
= *lseqaddr
;
1657 if ((is_rw_ebit_set(lgenval
)) && (rwlock
->rw_owner
== self
)) {
1662 /**************************************************************/
1666 rwlock_unlock_action_onread(pthread_rwlock_t
* orwlock
, uint32_t updateval
)
1668 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1670 uint32_t lgenval
, newval
;
1671 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1675 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1676 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1678 lseqaddr
= rwlock
->rw_lseqaddr
;
1679 useqaddr
= rwlock
->rw_useqaddr
;
1680 wcaddr
= rwlock
->rw_wcaddr
;
1683 lgenval
= *lseqaddr
;
1686 lgenval
= *lseqaddr
;
1690 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 3, lgenval
, 0);
1692 /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
1693 if ((lgenval
& PTHRW_COUNT_MASK
) == (updateval
& PTHRW_COUNT_MASK
)) {
1694 if (OSAtomicCompareAndSwap32(lgenval
, updateval
, (volatile int32_t *)lseqaddr
) != TRUE
)
1699 /* state bits are same? */
1700 if ((lgenval
& PTHRW_BIT_MASK
) == (updateval
& PTHRW_BIT_MASK
)) {
1705 newval
= ((lgenval
& PTHRW_UN_BIT_MASK
) << PTHRW_COUNT_SHIFT
) | (updateval
& PTHRW_BIT_MASK
);
1708 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 4, newval
, 0);
1710 /* high bits are state on the lock; lowbits are one kernel need to set */
1713 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
)) : {
1714 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1717 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
)) : {
1718 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1721 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1722 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1725 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
)) : {
1726 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1729 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1730 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1733 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_EBIT
)) : {
1734 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1737 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_LBIT
)) : {
1738 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1741 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1742 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1745 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1746 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1753 case ((PTHRW_LBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1754 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1759 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
)) : {
1760 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1763 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1764 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1767 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
)) : {
1768 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1771 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1772 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1776 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1777 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1780 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1781 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1787 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
)) : {
1788 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1791 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1792 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1795 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
)) : {
1796 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1799 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1800 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1804 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1805 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1808 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1809 error
= rwlock_unlock_action_k(orwlock
, lgenval
, updateval
);
1815 case ((PTHRW_EBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1816 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1821 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
)) : {
1822 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1825 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1826 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1829 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
1830 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1833 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_EBIT
)) : {
1834 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1837 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_LBIT
)) : {
1838 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1841 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1842 error
= rwlock_unlock_action2(orwlock
, lgenval
, updateval
);
1845 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
1846 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1851 case (((PTHRW_WBIT
| PTHRW_LBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_LBIT
)) : {
1852 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1855 case (((PTHRW_WBIT
| PTHRW_LBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_LBIT
)) : {
1856 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1859 case (((PTHRW_WBIT
| PTHRW_LBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_LBIT
)) : {
1860 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
1865 /* illegal states */
1866 self
= pthread_self();
1867 threadid
= self
->thread_id
;
1869 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 6, lgenval
, 0);
1870 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 7, updateval
, 0);
1872 LIBC_ABORT("incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval
, lgenval
, updateval
, (uint32_t)threadid
);
1880 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1
| DBG_FUNC_NONE
, lgenval
, newval
, 0, 0, 0);
1887 modbits(uint32_t lgenval
, uint32_t updateval
)
1889 uint32_t lval
= lgenval
& PTHRW_BIT_MASK
;
1890 uint32_t uval
= updateval
& PTHRW_BIT_MASK
;
1891 uint32_t rval
, nlval
;
1893 nlval
= (lval
| uval
);
1894 if ((uval
& PTHRW_EBIT
) == 0)
1895 nlval
&= ~PTHRW_EBIT
;
1896 if ((nlval
& (PTHRW_WBIT
| PTHRW_YBIT
)) == (PTHRW_WBIT
| PTHRW_YBIT
))
1897 nlval
&= ~PTHRW_YBIT
;
1898 /* no new writers and kernel resets w bit, reset W bit on the lock */
1899 if (((nlval
& (PTHRW_WBIT
| PTHRW_SHADOW_W
)) == PTHRW_WBIT
) && ((updateval
& PTHRW_WBIT
) == 0))
1900 nlval
&= ~PTHRW_WBIT
;
1902 rval
= (lgenval
& PTHRW_COUNT_MASK
) | nlval
;
1907 rwlock_unlock_action1(pthread_rwlock_t
* orwlock
, uint32_t lgenval
, uint32_t updateval
)
1909 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1912 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1914 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1915 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1917 lseqaddr
= rwlock
->rw_lseqaddr
;
1918 useqaddr
= rwlock
->rw_useqaddr
;
1919 wcaddr
= rwlock
->rw_wcaddr
;
1922 newval
= modbits(lgenval
, updateval
);
1923 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) != TRUE
)
1926 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1
| DBG_FUNC_NONE
, lgenval
, newval
, 0, 0, 0);
1932 rwlock_unlock_action2(pthread_rwlock_t
* orwlock
, uint32_t lgenval
, uint32_t updateval
)
1934 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1936 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1938 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1939 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1941 lseqaddr
= rwlock
->rw_lseqaddr
;
1942 useqaddr
= rwlock
->rw_useqaddr
;
1943 wcaddr
= rwlock
->rw_wcaddr
;
1946 newval
= modbits(lgenval
, updateval
);
1947 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
1948 /* roundtrip kernel */
1951 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2
| DBG_FUNC_NONE
, lgenval
, newval
, 0, 0, 0);
1953 (void) __psynch_rw_unlock2(orwlock
, lgenval
, *useqaddr
, *wcaddr
, rwlock
->rw_flags
);
1957 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT2
| DBG_FUNC_NONE
, 0xffffffff, 0, 0, 0, 0);
1963 /* This is used when an exclusive write lock of any kind is being granted. For unlock thread, it needs to try to set the bit, if not move on */
1965 rwlock_unlock_action_k(pthread_rwlock_t
* orwlock
, uint32_t lgenval
, uint32_t updateval
)
1967 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1969 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
1971 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1972 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
1974 lseqaddr
= rwlock
->rw_lseqaddr
;
1975 useqaddr
= rwlock
->rw_useqaddr
;
1976 wcaddr
= rwlock
->rw_wcaddr
;
1979 newval
= modbits(lgenval
, updateval
);
1981 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK
| DBG_FUNC_NONE
, lgenval
, updateval
, newval
, 0, 0);
1983 /* try to set, if not not a prolem as the thread taking exclusive will take care of the discrepency */
1985 if (OSAtomicCompareAndSwap32(lgenval
, newval
, (volatile int32_t *)lseqaddr
) == TRUE
) {
1987 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK
| DBG_FUNC_NONE
, 0x55555555, lgenval
, newval
, 0, 0);
1992 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTK
| DBG_FUNC_NONE
, 0xAAAAAAAA, lgenval
, newval
, 0, 0);
2001 rwlock_exclusive_lockreturn(pthread_rwlock_t
* orwlock
, uint32_t updateval
)
2003 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
2004 uint32_t lgenval
, newval
;
2005 volatile uint32_t * lseqaddr
, *useqaddr
, *wcaddr
;
2011 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
2012 RWLOCK_GETSEQ_ADDR(rwlock
, lseqaddr
, useqaddr
, wcaddr
);
2014 lseqaddr
= rwlock
->rw_lseqaddr
;
2015 useqaddr
= rwlock
->rw_useqaddr
;
2016 wcaddr
= rwlock
->rw_wcaddr
;
2020 lgenval
= *lseqaddr
;
2022 /* if the kernel antcipated seq and one on the lock are same, set the one from kernel */
2023 if ((lgenval
& PTHRW_COUNT_MASK
) == (updateval
& PTHRW_COUNT_MASK
)) {
2024 if (OSAtomicCompareAndSwap32(lgenval
, updateval
, (volatile int32_t *)lseqaddr
) != TRUE
)
2030 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE
| DBG_FUNC_NONE
, lgenval
, updateval
, 1, 0, 0);
2032 /* state bits are same? */
2033 if ((lgenval
& PTHRW_BIT_MASK
) == (updateval
& PTHRW_BIT_MASK
)) {
2039 newval
= ((lgenval
& PTHRW_UN_BIT_MASK
) << PTHRW_COUNT_SHIFT
) | (updateval
& PTHRW_BIT_MASK
);
2042 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE
| DBG_FUNC_NONE
, newval
, 0, 2, 0, 0);
2044 /* high bits are state on the lock; lowbits are one kernel need to set */
2047 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
)) : {
2048 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2051 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
2052 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2055 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_EBIT
)) : {
2056 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2059 case ((PTHRW_WBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
2065 /* All L states illegal here */
2068 case (PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) : {
2069 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2071 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
2075 case ((PTHRW_YBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
2081 case ((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) : {
2082 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2085 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
2090 case (((PTHRW_YBIT
| PTHRW_UBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
2096 case ((PTHRW_EBIT
<< PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
2097 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2102 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_EBIT
)) : {
2103 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2106 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_WBIT
| PTHRW_EBIT
)) : {
2107 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2110 case (((PTHRW_WBIT
| PTHRW_EBIT
) << PTHRW_COUNT_SHIFT
) | (PTHRW_YBIT
| PTHRW_EBIT
)) : {
2111 error
= rwlock_unlock_action1(orwlock
, lgenval
, updateval
);
2115 /* All WL states are illegal*/
2118 /* illegal states */
2119 self
= pthread_self();
2120 threadid
= self
->thread_id
;
2122 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 6, lgenval
, 0);
2123 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, 7, updateval
, 0);
2125 LIBC_ABORT("rwlock_exclusive_lockreturn: incorect state on return 0x%x: lgenval 0x%x, updateval 0x%x; threadid (0x%x)\n", newval
, lgenval
, updateval
, (uint32_t)threadid
);
2128 if (error
== EINVAL
)
2132 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACTE
| DBG_FUNC_NONE
, error
, 0, 0xffffffff, 0, 0);
2137 /* returns are not bit shifted */
2139 rw_diffgenseq(uint32_t x
, uint32_t y
)
2141 uint32_t lx
= (x
& PTHRW_COUNT_MASK
);
2142 uint32_t ly
= (y
&PTHRW_COUNT_MASK
);
2147 return((PTHRW_MAX_READERS
- y
) + lx
+ PTHRW_INC
);
2152 #endif /* i386 || x86_64 ] */
2155 #endif /* !BUILDING_VARIANT ] */
2158 pthread_rwlock_destroy(pthread_rwlock_t
*rwlock
)
2160 #if defined(__i386__) || defined(__x86_64__) || defined(__DARWIN_UNIX03)
2162 #endif /* __i386__ || __x86_64__ */
2165 #if defined(__i386__) || defined(__x86_64__)
2166 if ((usenew_impl
!= 0)) {
2167 return(_new_pthread_rwlock_destroy(rwlock
));
2169 #endif /* __i386__ || __x86_64__ */
2171 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
2174 #if defined(__i386__) || defined(__x86_64__)
2175 else if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
2176 ret
= _new_pthread_rwlock_destroy(rwlock
);
2179 #endif /* __i386__ || __x86_64__ */
2182 /* grab the monitor lock */
2183 if ((ret
= pthread_mutex_lock(&rwlock
->lock
)) != 0)
2186 if (rwlock
->state
!= 0) {
2187 pthread_mutex_unlock(&rwlock
->lock
);
2190 pthread_mutex_unlock(&rwlock
->lock
);
2191 #endif /* __DARWIN_UNIX03 */
2193 pthread_mutex_destroy(&rwlock
->lock
);
2194 pthread_cond_destroy(&rwlock
->read_signal
);
2195 pthread_cond_destroy(&rwlock
->write_signal
);
2196 rwlock
->sig
= _PTHREAD_NO_SIG
;
2202 pthread_rwlock_init(pthread_rwlock_t
*rwlock
, const pthread_rwlockattr_t
*attr
)
2206 #if defined(__i386__) || defined(__x86_64__)
2207 if ((usenew_impl
!= 0)) {
2208 return(_new_pthread_rwlock_init(rwlock
, attr
));
2210 #endif /* __i386__ || __x86_64__ */
2213 if (attr
&& (attr
->sig
!= _PTHREAD_RWLOCK_ATTR_SIG
)) {
2216 #endif /* __DARWIN_UNIX03 */
2218 #if defined(__i386__) || defined(__x86_64__)
2219 if ((attr
!= NULL
) && (attr
->pshared
== PTHREAD_PROCESS_SHARED
)) {
2220 ret
= _new_pthread_rwlock_init(rwlock
, attr
);
2223 #endif /* __i386__ || __x86_64__ */
2226 /* if already inited check whether it is in use, then return EBUSY */
2227 if ((rwlock
->sig
== _PTHREAD_RWLOCK_SIG
) && (rwlock
->state
!=0 )) {
2230 #endif /* __DARWIN_UNIX03 */
2232 /* initialize the lock */
2233 if ((ret
= pthread_mutex_init(&rwlock
->lock
, NULL
)) != 0)
2236 /* initialize the read condition signal */
2237 ret
= pthread_cond_init(&rwlock
->read_signal
, NULL
);
2240 pthread_mutex_destroy(&rwlock
->lock
);
2243 /* initialize the write condition signal */
2244 ret
= pthread_cond_init(&rwlock
->write_signal
, NULL
);
2247 pthread_cond_destroy(&rwlock
->read_signal
);
2248 pthread_mutex_destroy(&rwlock
->lock
);
2253 rwlock
->owner
= (pthread_t
)0;
2254 rwlock
->blocked_writers
= 0;
2256 rwlock
->pshared
= attr
->pshared
;
2258 rwlock
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
2260 rwlock
->sig
= _PTHREAD_RWLOCK_SIG
;
2268 pthread_rwlock_rdlock(pthread_rwlock_t
*rwlock
)
2272 pthread_t self
= pthread_self();
2275 #if defined(__i386__) || defined(__x86_64__)
2276 if ((usenew_impl
!= 0)) {
2277 return(_new_pthread_rwlock_rdlock(rwlock
));
2279 #endif /* __i386__ || __x86_64__ */
2281 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
2282 if ((ret
= pthread_rwlock_init(rwlock
, NULL
)) != 0) {
2283 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2288 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
2289 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
2292 #if defined(__i386__) || defined(__x86_64__)
2293 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
2294 ret
= _new_pthread_rwlock_rdlock(rwlock
);
2297 #endif /* __i386__ || __x86_64__ */
2298 /* grab the monitor lock */
2299 if ((ret
= pthread_mutex_lock(&rwlock
->lock
)) != 0) {
2300 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2305 if ((rwlock
->state
< 0) && (rwlock
->owner
== self
)) {
2306 pthread_mutex_unlock(&rwlock
->lock
);
2307 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, EDEADLK
);
2310 #endif /* __DARWIN_UNIX03 */
2313 while (rwlock
->blocked_writers
|| ((rwlock
->state
< 0) && (rwlock
->owner
!= self
)))
2314 #else /* __DARWIN_UNIX03 */
2315 while (rwlock
->blocked_writers
|| rwlock
->state
< 0)
2317 #endif /* __DARWIN_UNIX03 */
2319 /* give writers priority over readers */
2320 PLOCKSTAT_RW_BLOCK(rwlock
, READ_LOCK_PLOCKSTAT
);
2321 ret
= pthread_cond_wait(&rwlock
->read_signal
, &rwlock
->lock
);
2324 /* can't do a whole lot if this fails */
2325 pthread_mutex_unlock(&rwlock
->lock
);
2326 PLOCKSTAT_RW_BLOCKED(rwlock
, READ_LOCK_PLOCKSTAT
, BLOCK_FAIL_PLOCKSTAT
);
2327 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2331 PLOCKSTAT_RW_BLOCKED(rwlock
, READ_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
2334 /* check lock count */
2335 if (rwlock
->state
== MAX_READ_LOCKS
) {
2337 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2340 ++rwlock
->state
; /* indicate we are locked for reading */
2341 PLOCKSTAT_RW_ACQUIRE(rwlock
, READ_LOCK_PLOCKSTAT
);
2345 * Something is really wrong if this call fails. Returning
2346 * error won't do because we've already obtained the read
2347 * lock. Decrementing 'state' is no good because we probably
2348 * don't have the monitor lock.
2350 pthread_mutex_unlock(&rwlock
->lock
);
2356 pthread_rwlock_tryrdlock(pthread_rwlock_t
*rwlock
)
2360 #if defined(__i386__) || defined(__x86_64__)
2361 if ((usenew_impl
!= 0)) {
2362 return(_new_pthread_rwlock_tryrdlock(rwlock
));
2364 #endif /* __i386__ || __x86_64__ */
2366 /* check for static initialization */
2367 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
2368 if ((ret
= pthread_rwlock_init(rwlock
, NULL
)) != 0) {
2369 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2374 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
2375 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
2378 #if defined(__i386__) || defined(__x86_64__)
2379 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
2380 ret
= _new_pthread_rwlock_tryrdlock(rwlock
);
2383 #endif /* __i386__ || __x86_64__ */
2385 /* grab the monitor lock */
2386 if ((ret
= pthread_mutex_lock(&rwlock
->lock
)) != 0) {
2387 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2391 /* give writers priority over readers */
2392 if (rwlock
->blocked_writers
|| rwlock
->state
< 0) {
2394 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2396 else if (rwlock
->state
== MAX_READ_LOCKS
) {
2397 ret
= EAGAIN
; /* too many read locks acquired */
2398 PLOCKSTAT_RW_ERROR(rwlock
, READ_LOCK_PLOCKSTAT
, ret
);
2401 ++rwlock
->state
; /* indicate we are locked for reading */
2402 PLOCKSTAT_RW_ACQUIRE(rwlock
, READ_LOCK_PLOCKSTAT
);
2405 /* see the comment on this in pthread_rwlock_rdlock */
2406 pthread_mutex_unlock(&rwlock
->lock
);
2412 pthread_rwlock_trywrlock(pthread_rwlock_t
*rwlock
)
2416 pthread_t self
= pthread_self();
2417 #endif /* __DARWIN_UNIX03 */
2419 #if defined(__i386__) || defined(__x86_64__)
2420 if ((usenew_impl
!= 0)) {
2421 return(_new_pthread_rwlock_trywrlock(rwlock
));
2423 #endif /* __i386__ || __x86_64__ */
2425 /* check for static initialization */
2426 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
2427 if ((ret
= pthread_rwlock_init(rwlock
, NULL
)) != 0) {
2428 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, ret
);
2433 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
2434 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
2438 #if defined(__i386__) || defined(__x86_64__)
2439 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
2440 ret
= _new_pthread_rwlock_trywrlock(rwlock
);
2443 #endif /* __i386__ || __x86_64__ */
2445 /* grab the monitor lock */
2446 if ((ret
= pthread_mutex_lock(&rwlock
->lock
)) != 0) {
2447 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, ret
);
2452 if (rwlock
->state
!= 0) {
2454 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, ret
);
2457 /* indicate we are locked for writing */
2460 rwlock
->owner
= self
;
2461 #endif /* __DARWIN_UNIX03 */
2462 PLOCKSTAT_RW_ACQUIRE(rwlock
, WRITE_LOCK_PLOCKSTAT
);
2465 /* see the comment on this in pthread_rwlock_rdlock */
2466 pthread_mutex_unlock(&rwlock
->lock
);
2472 pthread_rwlock_unlock(pthread_rwlock_t
*rwlock
)
2475 int writer
= (rwlock
< 0) ? 1:0;
2477 #if defined(__i386__) || defined(__x86_64__)
2478 if ((usenew_impl
!= 0)) {
2479 return(_new_pthread_rwlock_unlock(rwlock
));
2481 #endif /* __i386__ || __x86_64__ */
2483 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
2484 PLOCKSTAT_RW_ERROR(rwlock
, writer
, EINVAL
);
2488 #if defined(__i386__) || defined(__x86_64__)
2489 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
2490 ret
= _new_pthread_rwlock_unlock(rwlock
);
2493 #endif /* __i386__ || __x86_64__ */
2496 /* grab the monitor lock */
2497 if ((ret
= pthread_mutex_lock(&rwlock
->lock
)) != 0) {
2498 PLOCKSTAT_RW_ERROR(rwlock
, writer
, ret
);
2502 if (rwlock
->state
> 0) {
2503 if (--rwlock
->state
== 0 && rwlock
->blocked_writers
)
2504 ret
= pthread_cond_signal(&rwlock
->write_signal
);
2505 } else if (rwlock
->state
< 0) {
2508 rwlock
->owner
= (pthread_t
)0;
2509 #endif /* __DARWIN_UNIX03 */
2511 if (rwlock
->blocked_writers
)
2512 ret
= pthread_cond_signal(&rwlock
->write_signal
);
2514 ret
= pthread_cond_broadcast(&rwlock
->read_signal
);
2519 PLOCKSTAT_RW_RELEASE(rwlock
, writer
);
2521 PLOCKSTAT_RW_ERROR(rwlock
, writer
, ret
);
2524 /* see the comment on this in pthread_rwlock_rdlock */
2525 pthread_mutex_unlock(&rwlock
->lock
);
2531 pthread_rwlock_wrlock(pthread_rwlock_t
*rwlock
)
2535 pthread_t self
= pthread_self();
2536 #endif /* __DARWIN_UNIX03 */
2538 #if defined(__i386__) || defined(__x86_64__)
2539 if ((usenew_impl
!= 0)) {
2540 return(_new_pthread_rwlock_wrlock(rwlock
));
2542 #endif /* __i386__ || __x86_64__ */
2544 /* check for static initialization */
2545 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
2546 if ((ret
= pthread_rwlock_init(rwlock
, NULL
)) != 0) {
2547 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, ret
);
2552 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
2553 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
2557 #if defined(__i386__) || defined(__x86_64__)
2558 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
2559 ret
= _new_pthread_rwlock_wrlock(rwlock
);
2562 #endif /* __i386__ || __x86_64__ */
2565 /* grab the monitor lock */
2566 if ((ret
= pthread_mutex_lock(&rwlock
->lock
)) != 0) {
2567 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, ret
);
2572 if ((rwlock
->state
< 0) && (rwlock
->owner
== self
)) {
2573 pthread_mutex_unlock(&rwlock
->lock
);
2574 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, EDEADLK
);
2577 #endif /* __DARWIN_UNIX03 */
2578 while (rwlock
->state
!= 0) {
2579 ++rwlock
->blocked_writers
;
2581 PLOCKSTAT_RW_BLOCK(rwlock
, WRITE_LOCK_PLOCKSTAT
);
2582 ret
= pthread_cond_wait(&rwlock
->write_signal
, &rwlock
->lock
);
2585 --rwlock
->blocked_writers
;
2586 pthread_mutex_unlock(&rwlock
->lock
);
2587 PLOCKSTAT_RW_BLOCKED(rwlock
, WRITE_LOCK_PLOCKSTAT
, BLOCK_FAIL_PLOCKSTAT
);
2588 PLOCKSTAT_RW_ERROR(rwlock
, WRITE_LOCK_PLOCKSTAT
, ret
);
2592 PLOCKSTAT_RW_BLOCKED(rwlock
, WRITE_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
2594 --rwlock
->blocked_writers
;
2597 /* indicate we are locked for writing */
2600 rwlock
->owner
= self
;
2601 #endif /* __DARWIN_UNIX03 */
2602 PLOCKSTAT_RW_ACQUIRE(rwlock
, WRITE_LOCK_PLOCKSTAT
);
2604 /* see the comment on this in pthread_rwlock_rdlock */
2605 pthread_mutex_unlock(&rwlock
->lock
);