2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
52 * POSIX Pthread Library
53 * -- Read Write Lock support
58 #include "pthread_internals.h"
59 #include <stdio.h> /* For printf(). */
61 extern int __unix_conforming
;
64 #include "plockstat.h"
65 #else /* !PLOCKSTAT */
66 #define PLOCKSTAT_RW_ERROR(x, y, z)
67 #define PLOCKSTAT_RW_BLOCK(x, y)
68 #define PLOCKSTAT_RW_BLOCKED(x, y, z)
69 #define PLOCKSTAT_RW_ACQUIRE(x, y)
70 #define PLOCKSTAT_RW_RELEASE(x, y)
71 #endif /* PLOCKSTAT */
73 #define READ_LOCK_PLOCKSTAT 0
74 #define WRITE_LOCK_PLOCKSTAT 1
76 #define BLOCK_FAIL_PLOCKSTAT 0
77 #define BLOCK_SUCCESS_PLOCKSTAT 1
79 /* maximum number of times a read lock may be obtained */
80 #define MAX_READ_LOCKS (INT_MAX - 1)
83 #ifndef BUILDING_VARIANT /* [ */
84 __private_extern__
int usenew_impl
= 1;
85 #else /* BUILDING_VARIANT */
86 extern int usenew_impl
;
87 #endif /* BUILDING_VARIANT */
89 extern int PR_5243343_flag
;
92 #define RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr) \
94 if (rwlock->misalign != 0) { \
95 lcntaddr = &rwlock->rw_seq[1]; \
96 seqaddr = &rwlock->rw_seq[2]; \
97 ucntaddr = &rwlock->rw_seq[3]; \
99 lcntaddr = &rwlock->rw_seq[0]; \
100 seqaddr = &rwlock->rw_seq[1]; \
101 ucntaddr = &rwlock->rw_seq[2]; \
105 #define RWLOCK_GETSEQ_ADDR(rwlock, lcntaddr, ucntaddr, seqaddr) \
107 if (rwlock->misalign != 0) { \
108 lcntaddr = &rwlock->rw_seq[1]; \
109 seqaddr = &rwlock->rw_seq[2]; \
110 ucntaddr = &rwlock->rw_seq[3]; \
112 lcntaddr = &rwlock->rw_seq[0]; \
113 seqaddr = &rwlock->rw_seq[1]; \
114 ucntaddr = &rwlock->rw_seq[2]; \
117 #endif /* __LP64__ */
119 __private_extern__
int __pthread_rwlock_init(pthread_rwlock_t
*rwlock
, const pthread_rwlockattr_t
*attr
);
122 #define _KSYN_TRACE_ 0
125 #include <sys/sysctl.h>
126 #ifndef BUILDING_VARIANT /* [ */
127 static void set_enable(int);
128 #endif /* !BUILDING_VARIANT ] */
130 /* The Function qualifiers */
131 #define DBG_FUNC_START 1
132 #define DBG_FUNC_END 2
133 #define DBG_FUNC_NONE 0
135 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
137 #define _KSYN_TRACE_RW_RDLOCK 0x9000080
138 #define _KSYN_TRACE_RW_WRLOCK 0x9000084
139 #define _KSYN_TRACE_RW_UNLOCK 0x9000088
140 #define _KSYN_TRACE_RW_UNACT1 0x900808c
141 #define _KSYN_TRACE_RW_UNACT2 0x9008090
142 #define _KSYN_TRACE_RW_UNACTK 0x9008094
143 #define _KSYN_TRACE_RW_UNACTE 0x9008098
144 #define _KSYN_TRACE_RW_UNACTR 0x900809c
145 #define _KSYN_TRACE_RW_TOOMANY 0x90080a0
146 #define _KSYN_TRACE_RW_TRYWRLOCK 0x90080a4
147 #define _KSYN_TRACE_RW_TRYRDLOCK 0x90080a8
148 #endif /* _KSYN_TRACE_ */
150 __private_extern__
void rwlock_action_onreturn(pthread_rwlock_t
* rwlock
, uint32_t updateval
);
151 __private_extern__
int rw_diffgenseq(uint32_t x
, uint32_t y
);
153 #ifndef BUILDING_VARIANT /* [ */
154 static uint32_t modbits(uint32_t lgenval
, uint32_t updateval
, uint32_t savebits
);
157 pthread_rwlockattr_init(pthread_rwlockattr_t
*attr
)
159 attr
->sig
= _PTHREAD_RWLOCK_ATTR_SIG
;
160 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
165 pthread_rwlockattr_destroy(pthread_rwlockattr_t
*attr
)
167 attr
->sig
= _PTHREAD_NO_SIG
; /* Uninitialized */
173 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t
*attr
,
176 if (attr
->sig
== _PTHREAD_RWLOCK_ATTR_SIG
)
178 *pshared
= (int)attr
->pshared
;
182 return (EINVAL
); /* Not an initialized 'attribute' structure */
188 pthread_rwlockattr_setpshared(pthread_rwlockattr_t
* attr
, int pshared
)
190 if (attr
->sig
== _PTHREAD_RWLOCK_ATTR_SIG
)
193 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) || (pshared
== PTHREAD_PROCESS_SHARED
))
194 #else /* __DARWIN_UNIX03 */
195 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
196 #endif /* __DARWIN_UNIX03 */
198 attr
->pshared
= pshared
;
202 return (EINVAL
); /* Invalid parameter */
206 return (EINVAL
); /* Not an initialized 'attribute' structure */
211 __private_extern__
int
212 __pthread_rwlock_init(pthread_rwlock_t
* orwlock
, const pthread_rwlockattr_t
*attr
)
214 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
216 if ((attr
!= NULL
) && (attr
->pshared
== PTHREAD_PROCESS_SHARED
)) {
217 rwlock
->pshared
= PTHREAD_PROCESS_SHARED
;
218 rwlock
->rw_flags
= PTHRW_KERN_PROCESS_SHARED
;
220 rwlock
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
221 rwlock
->rw_flags
= PTHRW_KERN_PROCESS_PRIVATE
;
224 if (((uintptr_t)rwlock
& 0x07) != 0) {
225 rwlock
->misalign
= 1;
226 #if defined(__LP64__)
227 rwlock
->rw_lcntaddr
= &rwlock
->rw_seq
[1];
228 rwlock
->rw_seqaddr
= &rwlock
->rw_seq
[2];
229 rwlock
->rw_ucntaddr
= &rwlock
->rw_seq
[3];
230 rwlock
->rw_seq
[1]= PTHRW_RWLOCK_INIT
;
231 rwlock
->rw_seq
[2]= PTHRW_RWS_INIT
;
232 rwlock
->rw_seq
[3]= 0;
234 rwlock
->rw_lcntaddr
= &rwlock
->rw_seq
[1];
235 rwlock
->rw_seqaddr
= &rwlock
->rw_seq
[2];
236 rwlock
->rw_ucntaddr
= &rwlock
->rw_seq
[3];
237 rwlock
->rw_seq
[1]= PTHRW_RWLOCK_INIT
;
238 rwlock
->rw_seq
[2]= PTHRW_RWS_INIT
;
239 rwlock
->rw_seq
[3]= 0;
240 #endif /* __LP64__ */
243 rwlock
->misalign
= 0;
244 #if defined(__LP64__)
245 rwlock
->rw_lcntaddr
= &rwlock
->rw_seq
[0];
246 rwlock
->rw_seqaddr
= &rwlock
->rw_seq
[1];
247 rwlock
->rw_ucntaddr
= &rwlock
->rw_seq
[2];
248 rwlock
->rw_seq
[0]= PTHRW_RWLOCK_INIT
;
249 rwlock
->rw_seq
[1]= PTHRW_RWS_INIT
;
250 rwlock
->rw_seq
[2]= 0;
252 rwlock
->rw_lcntaddr
= &rwlock
->rw_seq
[0];
253 rwlock
->rw_seqaddr
= &rwlock
->rw_seq
[1];
254 rwlock
->rw_ucntaddr
= &rwlock
->rw_seq
[2];
255 rwlock
->rw_seq
[0]= PTHRW_RWLOCK_INIT
;
256 rwlock
->rw_seq
[1]= PTHRW_RWS_INIT
;
257 rwlock
->rw_seq
[2]= 0;
258 #endif /* __LP64__ */
263 rwlock
->rw_owner
= NULL
;
264 #if defined(__LP64__)
265 memset(rwlock
->rfu
, 0, PTHRW_RFU_64BIT
);
267 memset(rwlock
->rfu
, 0, PTHRW_RFU_32BIT
);
270 rwlock
->sig
= _PTHREAD_RWLOCK_SIG
;
283 mib
[1] = KERN_KDEBUG
;
284 mib
[2] = KERN_KDENABLE
;
288 /* best effort to stop the trace */
289 (void)sysctl(mib
, 4, NULL
, &needed
, NULL
, 0);
294 modbits(uint32_t lgenval
, uint32_t updateval
, uint32_t savebits
)
296 uint32_t lval
= lgenval
& PTHRW_BIT_MASK
;
297 uint32_t uval
= updateval
& PTHRW_BIT_MASK
;
298 uint32_t rval
, nlval
;
300 nlval
= (lval
| uval
) & ~(PTH_RWL_MBIT
);
302 /* reconcile bits on the lock with what kernel needs to set */
303 if ((uval
& PTH_RWL_LBIT
) != 0)
304 nlval
&= ~PTH_RWL_KBIT
;
305 else if (((uval
& PTH_RWL_KBIT
) == 0) && ((lval
& PTH_RWL_WBIT
) == 0))
306 nlval
&= ~PTH_RWL_KBIT
;
309 if (((savebits
& PTH_RWS_WSVBIT
) != 0) && ((nlval
& PTH_RWL_WBIT
) == 0) &&
310 ((nlval
& PTH_RWL_EBIT
) == 0)) {
311 if ((nlval
& PTH_RWL_LBIT
) == 0)
312 nlval
|= (PTH_RWL_WBIT
| PTH_RWL_KBIT
);
314 nlval
|= PTH_RWL_WBIT
;
316 if (((savebits
& PTH_RWS_YSVBIT
) != 0) && ((nlval
& PTH_RWL_YBIT
) == 0) &&
317 ((nlval
& PTH_RWL_EBIT
) == 0)) {
318 nlval
|= PTH_RWL_YBIT
;
320 if (((savebits
& PTH_RWS_USVBIT
) != 0) && ((nlval
& PTH_RWL_EBIT
) == 0)) {
321 if ((nlval
& PTH_RWL_LBIT
) == 0)
322 nlval
|= (PTH_RWL_UBIT
| PTH_RWL_KBIT
);
324 nlval
|= PTH_RWL_UBIT
;
327 rval
= (lgenval
& PTHRW_COUNT_MASK
) | nlval
;
332 __private_extern__
void
333 rwlock_action_onreturn(pthread_rwlock_t
* orwlock
, uint32_t updateval
)
336 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
337 uint32_t lcntval
, rw_seq
, newval
= 0, newsval
, lval
, uval
;
338 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
339 uint64_t oldval64
, newval64
;
342 uint32_t savebits
= 0;
345 /* TBD: restore U bit */
346 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
347 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
349 lcntaddr
= rwlock
->rw_lcntaddr
;
350 seqaddr
= rwlock
->rw_seqaddr
;
354 if (__pthread_lock_debug
!= 0)
355 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1
| DBG_FUNC_START
, updateval
, 0, 0, 0, 0);
358 isoverlap
= updateval
& PTH_RWL_MBIT
;
366 if (isoverlap
!= 0) {
367 /* overlap return, just increment and inspect bits */
370 /* set s word, increment by specified value */
371 newsval
= rw_seq
+ (updateval
& PTHRW_COUNT_MASK
);
372 if ((newsval
& PTHRW_RWS_SAVEMASK
) != 0) {
373 savebits
= newsval
& PTHRW_RWS_SAVEMASK
;
374 newsval
&= ~PTHRW_RWS_SAVEMASK
;
378 if (is_rws_setunlockinit(rw_seq
) != 0) {
380 /* set s word to passed in value */
381 newsval
= (rw_seq
& PTHRW_COUNT_MASK
) + (updateval
& PTHRW_COUNT_MASK
);
382 if ((rw_seq
& PTHRW_RWS_SAVEMASK
) != 0) {
383 savebits
= rw_seq
& PTHRW_RWS_SAVEMASK
;
384 newsval
&= ~PTHRW_RWS_SAVEMASK
;
392 newval
= modbits(lcntval
, updateval
, savebits
);
395 if (__pthread_lock_debug
!= 0)
396 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1
| DBG_FUNC_NONE
, rw_seq
, newsval
, 0xeeeeeeee, updateval
, 0);
397 if (__pthread_lock_debug
!= 0)
398 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1
| DBG_FUNC_NONE
, lcntval
, newval
, 0xeeeeeeee, updateval
, 0);
400 oldval64
= (((uint64_t)rw_seq
) << 32);
402 newval64
= (((uint64_t)newsval
) << 32);
405 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
407 /* Check for consistency */
408 lval
= lcntval
& PTHRW_BIT_MASK
;
409 uval
= updateval
& PTHRW_BIT_MASK
;
413 if (__pthread_lock_debug
!= 0)
414 (void)__kdebug_trace(_KSYN_TRACE_RW_UNACT1
| DBG_FUNC_END
, rw_seq
, newsval
, 0xffffffff, 0, 0);
419 /* returns are not bit shifted */
420 __private_extern__
int
421 rw_diffgenseq(uint32_t x
, uint32_t y
)
423 uint32_t lx
= (x
& PTHRW_COUNT_MASK
);
424 uint32_t ly
= (y
&PTHRW_COUNT_MASK
);
429 return((PTHRW_MAX_READERS
- y
) + lx
+ PTHRW_INC
);
435 /********************************************************** */
436 static int pthread_rwlock_upgrade_internal(pthread_rwlock_t
* orwlock
, int trylock
);
439 pthread_rwlock_longrdlock_np(pthread_rwlock_t
* orwlock
)
442 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
, updateval
;
443 int error
= 0, retry_count
= 0;
444 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
445 uint64_t oldval64
, newval64
;
446 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
449 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
451 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
452 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
453 UNLOCK(rwlock
->lock
);
454 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
457 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
458 UNLOCK(rwlock
->lock
);
459 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
462 UNLOCK(rwlock
->lock
);
465 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
466 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
468 lcntaddr
= rwlock
->rw_lcntaddr
;
469 ucntaddr
= rwlock
->rw_ucntaddr
;
470 seqaddr
= rwlock
->rw_seqaddr
;
478 if (can_rwl_longreadinuser(lcntval
))
482 if (is_rwl_ebit_set(lcntval
)) {
483 self
= pthread_self();
484 if(rwlock
->rw_owner
== self
) {
489 #endif /* __DARWIN_UNIX03 */
491 /* need to block in kernel */
492 newval
= (lcntval
+ PTHRW_INC
);
495 if (is_rws_setseq(rw_seq
)) {
496 newsval
&= PTHRW_SW_Reset_BIT_MASK
;
497 newsval
|= (newval
& PTHRW_COUNT_MASK
);
500 /* update lock seq and block in kernel */
502 oldval64
= (((uint64_t)rw_seq
) << 32);
505 newval64
= (((uint64_t)(newsval
)) << 32);
508 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
511 updateval
= __psynch_rw_longrdlock(orwlock
, newval
, ucntval
, newsval
, rwlock
->rw_flags
);
512 if (updateval
== (uint32_t)-1) {
521 rwlock_action_onreturn(orwlock
, updateval
);
522 if ( is_rwl_lbit_clear(updateval
)) {
525 #endif /* _KSYN_TRACE_ */
526 (void)pthread_threadid_np(pthread_self(), &myid
);
527 LIBC_ABORT("yieldwrlock from kernel without EBit %x: tid %x\n", updateval
, (uint32_t)myid
);
528 /* kernel cannot wakeup without granting E bit */
534 #endif /* _KSYN_TRACE_ */
535 (void)pthread_threadid_np(pthread_self(), &myid
);
536 LIBC_ABORT("yieldwrlock from kernel with unknown error %x: tid %x\n", updateval
, (uint32_t)myid
);
541 if (rw_diffgenseq(lcntval
, ucntval
) >= PTHRW_MAX_READERS
) {
542 /* since ucntval may be newer, just redo */
544 if (retry_count
> 1024) {
547 if (__pthread_lock_debug
!= 0)
548 (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0XEEEEEEEE, lcntval
, ucntval
, 0);
558 /* Need to update L and S word */
559 newval
= (lcntval
+ PTHRW_INC
) | PTH_RWL_LBIT
;
560 newsval
= (rw_seq
+ PTHRW_INC
);
562 oldval64
= (((uint64_t)rw_seq
) << 32);
564 newval64
= (((uint64_t)newsval
) << 32);
567 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
571 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
574 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
579 pthread_rwlock_yieldwrlock_np(pthread_rwlock_t
* orwlock
)
581 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
, updateval
;
584 pthread_t self
= pthread_self();
585 #endif /* __DARWIN_UNIX03 */
586 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
587 uint64_t oldval64
, newval64
;
588 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
591 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
593 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
594 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
595 UNLOCK(rwlock
->lock
);
596 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
599 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
600 UNLOCK(rwlock
->lock
);
601 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
604 UNLOCK(rwlock
->lock
);
607 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
608 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
610 lcntaddr
= rwlock
->rw_lcntaddr
;
611 ucntaddr
= rwlock
->rw_ucntaddr
;
612 seqaddr
= rwlock
->rw_seqaddr
;
621 if (is_rwl_ebit_set(lcntval
)) {
622 if (rwlock
->rw_owner
== self
) {
627 #endif /* __DARWIN_UNIX03 */
629 if (lcntval
== PTHRW_RWL_INIT
) {
630 /* if we can acquire set L and S word */
631 lcntval
= PTHRW_RWL_INIT
;
632 newval
= PTHRW_RWL_INIT
| PTHRW_INC
| PTH_RWL_KBIT
| PTH_RWL_EBIT
;
633 newsval
= rw_seq
+ PTHRW_INC
;
635 oldval64
= (((uint64_t)rw_seq
) << 32);
638 newval64
= (((uint64_t)newsval
) << 32);
641 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) == TRUE
) {
647 newval
= (lcntval
+ PTHRW_INC
)| PTH_RWL_YBIT
;
650 if (is_rws_setseq(rw_seq
)) {
651 newsval
&= PTHRW_SW_Reset_BIT_MASK
;
652 newsval
|= (newval
& PTHRW_COUNT_MASK
);
655 oldval64
= (((uint64_t)rw_seq
) << 32);
658 newval64
= (((uint64_t)(newsval
)) << 32);
661 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
664 PLOCKSTAT_RW_BLOCK(orwlock
, WRITE_LOCK_PLOCKSTAT
);
666 updateval
= __psynch_rw_yieldwrlock(orwlock
, newval
, ucntval
, newsval
, rwlock
->rw_flags
);
667 if (updateval
== (uint32_t)-1) {
676 PLOCKSTAT_RW_BLOCKED(orwlock
, WRITE_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
680 #endif /* _KSYN_TRACE_ */
681 (void)pthread_threadid_np(pthread_self(), &myid
);
682 LIBC_ABORT("yieldwrlock from kernel with unknown error %x: tid %x\n", updateval
, (uint32_t)myid
);
689 rwlock_action_onreturn(orwlock
, updateval
);
690 if ( is_rwl_ebit_clear(updateval
)) {
693 #endif /* _KSYN_TRACE_ */
694 (void)pthread_threadid_np(pthread_self(), &myid
);
695 LIBC_ABORT("yieldwrlock from kernel without EBit %x: tid %x\n", updateval
, (uint32_t)myid
);
698 rwlock
->rw_owner
= self
;
699 #endif /* __DARWIN_UNIX03 */
700 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
703 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
708 pthread_rwlock_downgrade_np(pthread_rwlock_t
* orwlock
)
710 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
, updateval
;
711 int error
= 0, haswbit
= 0, hasubit
= 0, hasybit
= 0;
713 pthread_t self
= pthread_self();
714 #endif /* __DARWIN_UNIX03 */
715 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
716 uint64_t oldval64
, newval64
;
717 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
720 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
722 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
723 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
724 UNLOCK(rwlock
->lock
);
725 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
728 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
729 UNLOCK(rwlock
->lock
);
730 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
733 UNLOCK(rwlock
->lock
);
735 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
736 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
738 lcntaddr
= rwlock
->rw_lcntaddr
;
739 ucntaddr
= rwlock
->rw_ucntaddr
;
740 seqaddr
= rwlock
->rw_seqaddr
;
749 /* if not holding exclusive lock, return */
750 if ((is_rwl_ebit_set(lcntval
)== 0) || (rwlock
->rw_owner
!= self
)) {
754 /* no other waiters and be granted in user space? ? */
755 if ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
+ PTHRW_INC
)) {
757 /* should have no write waiters pending */
758 if (is_rwl_wbit_set(lcntval
) != 0) {
761 #endif /* _KSYN_TRACE_ */
762 (void)pthread_threadid_np(pthread_self(), &myid
);
763 LIBC_ABORT("downgrade in user mode but W bit set %x: tid %x\n", lcntval
, (uint32_t)myid
);
766 /* preserve count and remove ke bits */
767 newval
= lcntval
& ~(PTH_RWL_EBIT
| PTH_RWL_KBIT
);
768 /* if we can acquire set L and S word */
771 oldval64
= (((uint64_t)rw_seq
) << 32);
774 newval64
= (((uint64_t)newsval
) << 32);
777 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) == TRUE
) {
779 rwlock
->rw_owner
= (pthread_t
)0;
780 #endif /* __DARWIN_UNIX03 */
786 haswbit
= lcntval
& PTH_RWL_WBIT
;
787 hasubit
= lcntval
& PTH_RWL_UBIT
;
788 hasybit
= lcntval
& PTH_RWL_YBIT
;
790 /* reset all bits and set k */
791 newval
= (lcntval
& PTHRW_COUNT_MASK
) | PTH_RWL_KBIT
;
792 /* set I bit on S word */
793 newsval
= rw_seq
| PTH_RWS_IBIT
;
795 newsval
|= PTH_RWS_WSVBIT
;
797 newsval
|= PTH_RWS_USVBIT
;
799 newsval
|= PTH_RWS_YSVBIT
;
801 oldval64
= (((uint64_t)rw_seq
) << 32);
804 newval64
= (((uint64_t)newsval
) << 32);
807 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
811 rwlock
->rw_owner
= 0;
812 #endif /* __DARWIN_UNIX03 */
815 updateval
= __psynch_rw_downgrade(orwlock
, newval
, ucntval
, newsval
, rwlock
->rw_flags
);
816 if (updateval
== (uint32_t)-1) {
821 /* TBD: what to do with the error, EINTR ?? */
826 rwlock_action_onreturn(orwlock
, updateval
);
831 #endif /* _KSYN_TRACE_ */
832 (void)pthread_threadid_np(pthread_self(), &myid
);
833 LIBC_ABORT("downgrade from kernel with unknown error %x with tid %x\n", updateval
, (uint32_t)myid
);
841 pthread_rwlock_upgrade_np(pthread_rwlock_t
* orwlock
)
843 return(pthread_rwlock_upgrade_internal(orwlock
, 0));
847 pthread_rwlock_tryupgrade_np(pthread_rwlock_t
*orwlock
)
849 return(pthread_rwlock_upgrade_internal(orwlock
, 1));
853 pthread_rwlock_upgrade_internal(pthread_rwlock_t
* orwlock
, int trylock
)
855 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
, updateval
;
856 int error
= 0, flags
;
858 pthread_t self
= pthread_self();
859 #endif /* __DARWIN_UNIX03 */
860 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
861 uint64_t oldval64
, newval64
;
862 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
865 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
866 /* check for static initialization */
868 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
869 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
870 UNLOCK(rwlock
->lock
);
873 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
874 UNLOCK(rwlock
->lock
);
877 UNLOCK(rwlock
->lock
);
879 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
880 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
882 lcntaddr
= rwlock
->rw_lcntaddr
;
883 ucntaddr
= rwlock
->rw_ucntaddr
;
884 seqaddr
= rwlock
->rw_seqaddr
;
892 if (is_rwl_eubit_set(lcntval
) !=0) {
896 /* set U and K bit and go to kernel */
897 newval
= (lcntval
| (PTH_RWL_UBIT
| PTH_RWL_KBIT
));
900 if (is_rws_setseq(rw_seq
)) {
901 newsval
&= PTHRW_SW_Reset_BIT_MASK
;
902 newsval
|= (newval
& PTHRW_COUNT_MASK
);
906 /* update lock seq and block in kernel */
908 oldval64
= (((uint64_t)rw_seq
) << 32);
911 newval64
= (((uint64_t)(newsval
)) << 32);
914 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
916 flags
= rwlock
->rw_flags
;
918 flags
|= _PTHREAD_RWLOCK_UPGRADE_TRY
;
921 updateval
= __psynch_rw_upgrade(orwlock
, newval
, ucntval
, newsval
, rwlock
->rw_flags
);
922 if (updateval
== (uint32_t)-1) {
932 rwlock_action_onreturn(orwlock
, updateval
);
933 if ( is_rwl_ebit_clear(updateval
)) {
936 #endif /* _KSYN_TRACE_ */
937 (void)pthread_threadid_np(pthread_self(), &myid
);
938 LIBC_ABORT("upgrade from kernel without EBit %x: tid %x\n", updateval
, (uint32_t)myid
);
941 rwlock
->rw_owner
= self
;
942 #endif /* __DARWIN_UNIX03 */
953 /* Returns true if the rwlock is held for reading by any thread or held for writing by the current thread */
955 pthread_rwlock_held_np(pthread_rwlock_t
* orwlock
)
957 uint32_t lcntval
, ucntval
, rw_seq
;
959 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
960 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
962 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
964 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
965 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
966 UNLOCK(rwlock
->lock
);
969 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
970 UNLOCK(rwlock
->lock
);
973 UNLOCK(rwlock
->lock
);
976 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
977 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
979 lcntaddr
= rwlock
->rw_lcntaddr
;
980 ucntaddr
= rwlock
->rw_ucntaddr
;
981 seqaddr
= rwlock
->rw_seqaddr
;
988 if ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
& PTHRW_COUNT_MASK
))
994 /* Returns true if the rwlock is held for reading by any thread */
996 pthread_rwlock_rdheld_np(pthread_rwlock_t
* orwlock
)
998 uint32_t lcntval
, ucntval
, rw_seq
;
1000 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1001 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1003 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1005 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1006 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1007 UNLOCK(rwlock
->lock
);
1010 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
1011 UNLOCK(rwlock
->lock
);
1014 UNLOCK(rwlock
->lock
);
1018 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1019 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1021 lcntaddr
= rwlock
->rw_lcntaddr
;
1022 ucntaddr
= rwlock
->rw_ucntaddr
;
1023 seqaddr
= rwlock
->rw_seqaddr
;
1026 lcntval
= *lcntaddr
;
1027 ucntval
= *ucntaddr
;
1030 if ((lcntval
& PTHRW_COUNT_MASK
) == (ucntval
& PTHRW_COUNT_MASK
))
1033 if (is_rwl_ebit_set(lcntval
) !=0) {
1039 /* Returns true if the rwlock is held for writing by the current thread */
1041 pthread_rwlock_wrheld_np(pthread_rwlock_t
* orwlock
)
1043 uint32_t lcntval
, ucntval
, rw_seq
;
1044 pthread_t self
= pthread_self();
1045 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1046 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1049 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1051 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1052 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1053 UNLOCK(rwlock
->lock
);
1056 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
1057 UNLOCK(rwlock
->lock
);
1060 UNLOCK(rwlock
->lock
);
1063 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1064 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1066 lcntaddr
= rwlock
->rw_lcntaddr
;
1067 ucntaddr
= rwlock
->rw_ucntaddr
;
1068 seqaddr
= rwlock
->rw_seqaddr
;
1071 lcntval
= *lcntaddr
;
1072 ucntval
= *ucntaddr
;
1075 if ((is_rwl_ebit_set(lcntval
)) && (rwlock
->rw_owner
== self
)) {
1080 /******************************************************/
1084 #endif /* !BUILDING_VARIANT ] */
1087 pthread_rwlock_destroy(pthread_rwlock_t
*orwlock
)
1089 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1091 uint32_t rw_lcnt
, rw_ucnt
;
1092 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1093 #endif /* __DARWIN_UNIX03 */
1095 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
&& rwlock
->sig
!= _PTHREAD_RWLOCK_SIG_init
)
1097 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG
) {
1099 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1100 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1102 lcntaddr
= rwlock
->rw_lcntaddr
;
1103 ucntaddr
= rwlock
->rw_ucntaddr
;
1104 seqaddr
= rwlock
->rw_seqaddr
;
1107 rw_lcnt
= *lcntaddr
;
1108 rw_ucnt
= *ucntaddr
;
1110 if((rw_lcnt
& PTHRW_COUNT_MASK
) != rw_ucnt
)
1113 #endif /* __DARWIN_UNIX03 */
1114 //bzero(rwlock, sizeof(npthread_rwlock_t));
1115 rwlock
->sig
= _PTHREAD_NO_SIG
;
1117 } else if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1118 rwlock
->sig
= _PTHREAD_NO_SIG
;
1126 pthread_rwlock_init(pthread_rwlock_t
* orwlock
, const pthread_rwlockattr_t
*attr
)
1128 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1130 uint32_t rw_lcnt
, rw_ucnt
;
1131 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1132 #endif /* __DARWIN_UNIX03 */
1135 if (attr
&& (attr
->sig
!= _PTHREAD_RWLOCK_ATTR_SIG
)) {
1139 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG
) {
1140 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1141 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1143 lcntaddr
= rwlock
->rw_lcntaddr
;
1144 ucntaddr
= rwlock
->rw_ucntaddr
;
1145 seqaddr
= rwlock
->rw_seqaddr
;
1148 rw_lcnt
= *lcntaddr
;
1149 rw_ucnt
= *ucntaddr
;
1151 if ((rw_lcnt
& PTHRW_COUNT_MASK
) != rw_ucnt
)
1156 LOCK_INIT(rwlock
->lock
);
1157 return(__pthread_rwlock_init(orwlock
, attr
));
1162 pthread_rwlock_rdlock(pthread_rwlock_t
* orwlock
)
1166 #endif /* __DARWIN_UNIX03 */
1167 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1168 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
, updateval
;
1169 int error
= 0, retry_count
= 0;
1170 uint64_t oldval64
, newval64
;
1171 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1174 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1176 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1177 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1178 UNLOCK(rwlock
->lock
);
1179 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
1182 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
1183 UNLOCK(rwlock
->lock
);
1184 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
1187 UNLOCK(rwlock
->lock
);
1190 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1191 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1193 lcntaddr
= rwlock
->rw_lcntaddr
;
1194 ucntaddr
= rwlock
->rw_ucntaddr
;
1195 seqaddr
= rwlock
->rw_seqaddr
;
1199 if (__pthread_lock_debug
!= 0)
1200 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, 0, 0, 0, 0);
1203 lcntval
= *lcntaddr
;
1204 ucntval
= *ucntaddr
;
1207 if (__pthread_lock_debug
!= 0)
1208 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, lcntval
, (ucntval
| 0xee), rw_seq
, 0);
1211 /* if l bit is on or u and k bit is clear, acquire lock in userland */
1212 if (can_rwl_readinuser(lcntval
))
1216 if (is_rwl_ebit_set(lcntval
)) {
1217 self
= pthread_self();
1218 if(rwlock
->rw_owner
== self
) {
1223 #endif /* __DARWIN_UNIX03 */
1226 /* Need to block in kernel , remove Rbit */
1227 newval
= (lcntval
+ PTHRW_INC
) & PTH_RWLOCK_RESET_RBIT
;
1230 if (is_rws_setseq(rw_seq
)) {
1231 newsval
&= PTHRW_SW_Reset_BIT_MASK
;
1232 newsval
|= (newval
& PTHRW_COUNT_MASK
);
1235 oldval64
= (((uint64_t)rw_seq
) << 32);
1236 oldval64
|= lcntval
;
1238 newval64
= (((uint64_t)newsval
) << 32);
1241 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
1244 /* give writers priority over readers */
1245 PLOCKSTAT_RW_BLOCK(orwlock
, READ_LOCK_PLOCKSTAT
);
1248 if (__pthread_lock_debug
!= 0)
1249 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, lcntval
, newval
, newsval
, 0);
1253 updateval
= __psynch_rw_rdlock(orwlock
, newval
, ucntval
, newsval
, rwlock
->rw_flags
);
1255 if (updateval
== (uint32_t)-1) {
1264 rwlock_action_onreturn(orwlock
, updateval
);
1265 PLOCKSTAT_RW_BLOCKED(orwlock
, READ_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
1266 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
1269 PLOCKSTAT_RW_BLOCKED(orwlock
, READ_LOCK_PLOCKSTAT
, BLOCK_FAIL_PLOCKSTAT
);
1272 #endif /* _KSYN_TRACE_ */
1273 (void)pthread_threadid_np(pthread_self(), &myid
);
1274 LIBC_ABORT("rdlock from kernel with unknown error %x with tid %x\n", updateval
, (uint32_t)myid
);
1280 if (rw_diffgenseq(lcntval
, ucntval
) >= PTHRW_MAX_READERS
) {
1281 /* since ucntval may be newer, just redo */
1283 if (retry_count
> 1024) {
1286 if (__pthread_lock_debug
!= 0)
1287 (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0XEEEEEEEE, lcntval
, ucntval
, 0);
1297 /* Need to update L (remove R bit) and S word */
1298 newval
= (lcntval
+ PTHRW_INC
) & PTH_RWLOCK_RESET_RBIT
;
1299 newsval
= (rw_seq
+ PTHRW_INC
);
1301 oldval64
= (((uint64_t)rw_seq
) << 32);
1302 oldval64
|= lcntval
;
1303 newval64
= (((uint64_t)newsval
) << 32);
1306 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
1310 if (__pthread_lock_debug
!= 0)
1311 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, lcntval
, newval
, 0);
1314 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
1316 if (__pthread_lock_debug
!= 0)
1317 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, 0, 0, 0);
1321 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
1323 if (__pthread_lock_debug
!= 0)
1324 (void)__kdebug_trace(_KSYN_TRACE_RW_RDLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
1330 pthread_rwlock_tryrdlock(pthread_rwlock_t
* orwlock
)
1332 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1333 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
;
1334 int error
= 0, retry_count
= 0;
1335 uint64_t oldval64
, newval64
;
1336 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1338 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1340 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1341 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1342 UNLOCK(rwlock
->lock
);
1343 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
1346 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
1347 UNLOCK(rwlock
->lock
);
1348 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, EINVAL
);
1351 UNLOCK(rwlock
->lock
);
1354 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1355 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1357 lcntaddr
= rwlock
->rw_lcntaddr
;
1358 ucntaddr
= rwlock
->rw_ucntaddr
;
1359 seqaddr
= rwlock
->rw_seqaddr
;
1363 lcntval
= *lcntaddr
;
1364 ucntval
= *ucntaddr
;
1367 if (__pthread_lock_debug
!= 0)
1368 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, lcntval
, ucntval
, rw_seq
, 0);
1371 /* if l bit is on or u and k bit is clear, acquire lock in userland */
1372 if (can_rwl_readinuser(lcntval
))
1379 if (rw_diffgenseq(lcntval
, ucntval
) >= PTHRW_MAX_READERS
) {
1380 /* since ucntval may be newer, just redo */
1382 if (retry_count
> 1024) {
1385 if (__pthread_lock_debug
!= 0)
1386 (void)__kdebug_trace(_KSYN_TRACE_RW_TOOMANY
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0XEEEEEEEE, lcntval
, ucntval
, 0);
1396 /* Need to update L(remove Rbit ) and S word */
1397 newval
= (lcntval
+ PTHRW_INC
) & PTH_RWLOCK_RESET_RBIT
;
1398 newsval
= (rw_seq
+ PTHRW_INC
);
1400 oldval64
= (((uint64_t)rw_seq
) << 32);
1401 oldval64
|= lcntval
;
1402 newval64
= (((uint64_t)newsval
) << 32);
1405 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
1409 if (__pthread_lock_debug
!= 0)
1410 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, lcntval
, newval
, 0);
1413 PLOCKSTAT_RW_ACQUIRE(orwlock
, READ_LOCK_PLOCKSTAT
);
1417 PLOCKSTAT_RW_ERROR(orwlock
, READ_LOCK_PLOCKSTAT
, error
);
1419 if (__pthread_lock_debug
!= 0)
1420 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYRDLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, error
, 0, 0);
1426 pthread_rwlock_trywrlock(pthread_rwlock_t
* orwlock
)
1429 pthread_t self
= pthread_self();
1430 #endif /* __DARWIN_UNIX03 */
1431 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1432 uint32_t lcntval
, rw_seq
, newval
, newsval
;
1433 int error
= 0, gotlock
= 0;
1434 uint64_t oldval64
, newval64
;
1435 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1437 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1439 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1440 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1441 UNLOCK(rwlock
->lock
);
1442 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
1445 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
1446 UNLOCK(rwlock
->lock
);
1447 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
1450 UNLOCK(rwlock
->lock
);
1453 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1454 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1456 lcntaddr
= rwlock
->rw_lcntaddr
;
1457 ucntaddr
= rwlock
->rw_ucntaddr
;
1458 seqaddr
= rwlock
->rw_seqaddr
;
1462 if (__pthread_lock_debug
!= 0)
1463 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, 0, 0, 0, 0);
1466 lcntval
= *lcntaddr
;
1469 /* can we acquire in userland? */
1470 if ((lcntval
& PTH_RWL_RBIT
) != 0) {
1471 newval
= ((lcntval
+ PTHRW_INC
) & PTHRW_COUNT_MASK
) | PTH_RWL_IBIT
| PTH_RWL_KBIT
| PTH_RWL_EBIT
;
1472 newsval
= rw_seq
+ PTHRW_INC
;
1478 oldval64
= (((uint64_t)rw_seq
) << 32);
1479 oldval64
|= lcntval
;
1482 newval64
= (((uint64_t)newsval
) << 32);
1485 newval64
= oldval64
;
1487 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
) {
1492 rwlock
->rw_owner
= self
;
1493 #endif /* __DARWIN_UNIX03 */
1494 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
1496 if (__pthread_lock_debug
!= 0)
1497 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0, 0, 0, 0);
1502 if (__pthread_lock_debug
!= 0)
1503 (void)__kdebug_trace(_KSYN_TRACE_RW_TRYWRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, EBUSY
, 0, 0);
1506 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EBUSY
);
1512 pthread_rwlock_wrlock(pthread_rwlock_t
* orwlock
)
1515 pthread_t self
= pthread_self();
1516 #endif /* __DARWIN_UNIX03 */
1517 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1518 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
, updateval
;
1519 int error
= 0, gotlock
= 0;
1520 uint64_t oldval64
, newval64
;
1521 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1524 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1526 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1527 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1528 UNLOCK(rwlock
->lock
);
1529 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
1532 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
1533 UNLOCK(rwlock
->lock
);
1534 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, EINVAL
);
1537 UNLOCK(rwlock
->lock
);
1540 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1541 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1543 lcntaddr
= rwlock
->rw_lcntaddr
;
1544 ucntaddr
= rwlock
->rw_ucntaddr
;
1545 seqaddr
= rwlock
->rw_seqaddr
;
1549 if (__pthread_lock_debug
!= 0)
1550 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, 0, 0, 0, 0);
1553 lcntval
= *lcntaddr
;
1554 ucntval
= *ucntaddr
;
1558 if (__pthread_lock_debug
!= 0)
1559 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, lcntval
, ucntval
, rw_seq
, 0);
1563 if (is_rwl_ebit_set(lcntval
)) {
1564 if(rwlock
->rw_owner
== self
) {
1569 #endif /* __DARWIN_UNIX03 */
1572 if ((lcntval
& PTH_RWL_RBIT
) != 0) {
1573 /* lock is restart state, writer can acquire the lock */
1574 newval
= ((lcntval
+ PTHRW_INC
) & PTHRW_COUNT_MASK
) | PTH_RWL_IBIT
| PTH_RWL_KBIT
| PTH_RWL_EBIT
;
1576 newsval
= rw_seq
+ PTHRW_INC
;
1580 if (is_rwl_lbit_set(lcntval
))
1581 newval
= (lcntval
+ PTHRW_INC
)| PTH_RWL_WBIT
;
1583 newval
= (lcntval
+ PTHRW_INC
) | PTH_RWL_KBIT
| PTH_RWL_WBIT
;
1586 if (is_rws_setseq(rw_seq
)) {
1587 newsval
&= PTHRW_SW_Reset_BIT_MASK
;
1588 newsval
|= (newval
& PTHRW_COUNT_MASK
);
1593 /* update lock seq */
1594 oldval64
= (((uint64_t)rw_seq
) << 32);
1595 oldval64
|= lcntval
;
1597 newval64
= (((uint64_t)newsval
) << 32);
1601 if (__pthread_lock_debug
!= 0)
1602 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555555, lcntval
, newval
, 0);
1604 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
1607 /* lock acquired in userland itself? */
1611 /* unable to acquire in userland, transition to kernel */
1613 PLOCKSTAT_RW_BLOCK(orwlock
, WRITE_LOCK_PLOCKSTAT
);
1615 updateval
= __psynch_rw_wrlock(orwlock
, newval
, ucntval
, newsval
, rwlock
->rw_flags
);
1616 if (updateval
== (uint32_t)-1) {
1621 if (error
== EINTR
) {
1628 #endif /* _KSYN_TRACE_ */
1629 (void)pthread_threadid_np(pthread_self(), &myid
);
1630 LIBC_ABORT("wrlock from kernel with unknown error %x: tid %x\n", updateval
, (uint32_t)myid
);
1634 if (__pthread_lock_debug
!= 0)
1635 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x33333333, newval
, updateval
, 0);
1637 PLOCKSTAT_RW_BLOCKED(orwlock
, WRITE_LOCK_PLOCKSTAT
, BLOCK_SUCCESS_PLOCKSTAT
);
1639 rwlock_action_onreturn(orwlock
, updateval
);
1642 rwlock
->rw_owner
= self
;
1643 #endif /* __DARWIN_UNIX03 */
1644 PLOCKSTAT_RW_ACQUIRE(orwlock
, WRITE_LOCK_PLOCKSTAT
);
1646 if (__pthread_lock_debug
!= 0)
1647 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
1653 #endif /* __DARWIN_UNIX03 */
1654 PLOCKSTAT_RW_ERROR(orwlock
, WRITE_LOCK_PLOCKSTAT
, error
);
1656 if (__pthread_lock_debug
!= 0)
1657 (void)__kdebug_trace(_KSYN_TRACE_RW_WRLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);
1664 pthread_rwlock_unlock(pthread_rwlock_t
* orwlock
)
1666 npthread_rwlock_t
* rwlock
= (npthread_rwlock_t
*)orwlock
;
1667 uint32_t lcntval
, ucntval
, rw_seq
, newval
, newsval
, updateval
, ulval
;
1668 int error
= 0, wrlock
= 0, haswbit
= 0, hasubit
= 0, hasybit
= 0;
1669 uint64_t oldval64
, newval64
;
1670 volatile uint32_t * lcntaddr
, *ucntaddr
, *seqaddr
;
1673 if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
) {
1675 if (rwlock
->sig
== _PTHREAD_RWLOCK_SIG_init
) {
1676 if ((error
= __pthread_rwlock_init(orwlock
, NULL
)) != 0) {
1677 UNLOCK(rwlock
->lock
);
1678 PLOCKSTAT_RW_ERROR(orwlock
, wrlock
, error
);
1681 } else if (rwlock
->sig
!= _PTHREAD_RWLOCK_SIG
){
1682 UNLOCK(rwlock
->lock
);
1683 PLOCKSTAT_RW_ERROR(orwlock
, wrlock
, EINVAL
);
1686 UNLOCK(rwlock
->lock
);
1689 if (rwlock
->pshared
== PTHREAD_PROCESS_SHARED
) {
1690 RWLOCK_GETSEQ_ADDR(rwlock
, lcntaddr
, ucntaddr
, seqaddr
);
1692 lcntaddr
= rwlock
->rw_lcntaddr
;
1693 ucntaddr
= rwlock
->rw_ucntaddr
;
1694 seqaddr
= rwlock
->rw_seqaddr
;
1698 if (__pthread_lock_debug
!= 0)
1699 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_START
, (uint32_t)rwlock
, 0, 0, 0, 0);
1702 lcntval
= *lcntaddr
;
1703 ucntval
= *ucntaddr
;
1709 if (__pthread_lock_debug
!= 0)
1710 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x51515151, lcntval
, ucntval
, 0);
1711 if (__pthread_lock_debug
!= 0)
1712 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x51515151, rw_seq
, 0, 0);
1714 /* check for spurious unlocks */
1715 if ((lcntval
& PTH_RWL_RBIT
) != 0) {
1719 oldval64
= (((uint64_t)rw_seq
) << 32);
1720 oldval64
|= lcntval
;
1722 newval64
= (((uint64_t)newsval
) << 32);
1725 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) == TRUE
) {
1726 /* spurious unlock, return */
1729 if (__pthread_lock_debug
!= 0)
1730 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x1a1b1c1d, lcntval
, ucntval
, 0);
1737 if (is_rwl_ebit_set(lcntval
)) {
1740 rwlock
->rw_owner
= (pthread_t
)0;
1741 #endif /* __DARWIN_UNIX03 */
1746 ulval
= (ucntval
+ PTHRW_INC
);
1748 if (OSAtomicCompareAndSwap32Barrier(ucntval
, ulval
, (volatile int32_t *)ucntaddr
) != TRUE
)
1752 /* just validate the l and S values */
1753 oldval64
= (((uint64_t)rw_seq
) << 32);
1754 oldval64
|= lcntval
;
1756 newval64
= oldval64
;
1758 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
) {
1759 lcntval
= *lcntaddr
;
1765 if (__pthread_lock_debug
!= 0)
1766 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xd1d2d3d4, lcntval
, rw_seq
, 0);
1767 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xd1d2d3d4, ulval
, 0, 0);
1770 /* last unlock, note U is already updated ? */
1771 if((lcntval
& PTHRW_COUNT_MASK
) == (ulval
& PTHRW_COUNT_MASK
)) {
1774 if (__pthread_lock_debug
!= 0)
1775 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xbbbbbbbb, lcntval
, ucntval
, 0);
1777 /* Set L with R and init bits and set S to L */
1778 newval
= (lcntval
& PTHRW_COUNT_MASK
)| PTHRW_RWLOCK_INIT
;
1779 newsval
= (lcntval
& PTHRW_COUNT_MASK
)| PTHRW_RWS_INIT
;
1781 oldval64
= (((uint64_t)rw_seq
) << 32);
1782 oldval64
|= lcntval
;
1784 newval64
= (((uint64_t)newsval
) << 32);
1787 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
) {
1789 if (__pthread_lock_debug
!= 0)
1790 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xcccccccc, 0, 0, 0);
1792 lcntval
= *lcntaddr
;
1797 if (__pthread_lock_debug
!= 0)
1798 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0xdddddddd, lcntval
, ucntval
, 0);
1803 /* if it is not exclusive or no Writer/yield pending, skip */
1804 if ((lcntval
& (PTH_RWL_EBIT
| PTH_RWL_WBIT
| PTH_RWL_YBIT
| PTH_RWL_KBIT
)) == 0) {
1808 /* kernel transition needed? */
1810 if ((ulval
+ PTHRW_INC
) != (rw_seq
& PTHRW_COUNT_MASK
)) {
1811 if ((lcntval
& PTH_RWL_UBIT
) != 0) {
1812 /* if U bit is set U + 2 == S ? */
1813 if ((ulval
+ PTHRW_INC
+ PTHRW_INC
) != (rw_seq
& PTHRW_COUNT_MASK
))
1819 haswbit
= lcntval
& PTH_RWL_WBIT
;
1820 hasubit
= lcntval
& PTH_RWL_UBIT
;
1821 hasybit
= lcntval
& PTH_RWL_YBIT
;
1823 /* reset all bits and set k */
1824 newval
= (lcntval
& PTHRW_COUNT_MASK
) | PTH_RWL_KBIT
;
1825 /* set I bit on S word */
1826 newsval
= rw_seq
| PTH_RWS_IBIT
;
1828 newsval
|= PTH_RWS_WSVBIT
;
1830 newsval
|= PTH_RWS_USVBIT
;
1832 newsval
|= PTH_RWS_YSVBIT
;
1834 oldval64
= (((uint64_t)rw_seq
) << 32);
1835 oldval64
|= lcntval
;
1837 newval64
= (((uint64_t)newsval
) << 32);
1840 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lcntaddr
) != TRUE
)
1844 if (__pthread_lock_debug
!= 0)
1845 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555511, 1, ulval
, 0);
1847 updateval
= __psynch_rw_unlock(orwlock
, lcntval
, ulval
, newsval
, rwlock
->rw_flags
);
1848 if (updateval
== (uint32_t)-1) {
1855 /* not sure what is the scenario */
1856 if(error
!= EINTR
) {
1859 #endif /* _KSYN_TRACE_ */
1860 (void)pthread_threadid_np(pthread_self(), &myid
);
1861 LIBC_ABORT("rwunlock from kernel with unknown error %x: tid %x\n", error
, (uint32_t)myid
);
1868 if (__pthread_lock_debug
!= 0)
1869 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)rwlock
, 0x55555522, 3, lcntval
, 0);
1873 PLOCKSTAT_RW_RELEASE(orwlock
, wrlock
);
1875 if (__pthread_lock_debug
!= 0)
1876 (void)__kdebug_trace(_KSYN_TRACE_RW_UNLOCK
| DBG_FUNC_END
, (uint32_t)rwlock
, 0xAAAAAAAA, error
, 0, 0);