]> git.saurik.com Git - apple/libpthread.git/blame - src/pthread_rwlock.c
libpthread-218.60.3.tar.gz
[apple/libpthread.git] / src / pthread_rwlock.c
CommitLineData
f1a1da6c
A
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*-
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
49 */
50
51/*
52 * POSIX Pthread Library
53 * -- Read Write Lock support
54 * 4/24/02: A. Ramesh
55 * Ported from FreeBSD
56 */
57
58#include "internal.h"
59#include <stdio.h> /* For printf(). */
60
61extern int __unix_conforming;
62
63#ifdef PLOCKSTAT
64#include "plockstat.h"
65#else /* !PLOCKSTAT */
66#define PLOCKSTAT_RW_ERROR(x, y, z)
67#define PLOCKSTAT_RW_BLOCK(x, y)
68#define PLOCKSTAT_RW_BLOCKED(x, y, z)
69#define PLOCKSTAT_RW_ACQUIRE(x, y)
70#define PLOCKSTAT_RW_RELEASE(x, y)
71#endif /* PLOCKSTAT */
72
73#define READ_LOCK_PLOCKSTAT 0
74#define WRITE_LOCK_PLOCKSTAT 1
75
76#define BLOCK_FAIL_PLOCKSTAT 0
77#define BLOCK_SUCCESS_PLOCKSTAT 1
78
79/* maximum number of times a read lock may be obtained */
80#define MAX_READ_LOCKS (INT_MAX - 1)
81
82#include <platform/string.h>
83#include <platform/compat.h>
84
85__private_extern__ int __pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr);
86__private_extern__ void _pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval);
87
88static void
89RWLOCK_GETSEQ_ADDR(_pthread_rwlock *rwlock,
90 volatile uint32_t **lcntaddr,
91 volatile uint32_t **ucntaddr,
92 volatile uint32_t **seqaddr)
93{
94 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
95 if (rwlock->misalign) {
96 *lcntaddr = &rwlock->rw_seq[1];
97 *seqaddr = &rwlock->rw_seq[2];
98 *ucntaddr = &rwlock->rw_seq[3];
99 } else {
100 *lcntaddr = &rwlock->rw_seq[0];
101 *seqaddr = &rwlock->rw_seq[1];
102 *ucntaddr = &rwlock->rw_seq[2];
103 }
104 } else {
105 *lcntaddr = rwlock->rw_lcntaddr;
106 *seqaddr = rwlock->rw_seqaddr;
107 *ucntaddr = rwlock->rw_ucntaddr;
108 }
109}
110
111#ifndef BUILDING_VARIANT /* [ */
112static uint32_t modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits);
113
114int
115pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
116{
117 attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
118 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
119 return 0;
120}
121
122int
123pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
124{
125 attr->sig = _PTHREAD_NO_SIG;
126 attr->pshared = 0;
127 return 0;
128}
129
130int
131pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, int *pshared)
132{
133 int res = EINVAL;
134 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
135 *pshared = (int)attr->pshared;
136 res = 0;
137 }
138 return res;
139}
140
141int
142pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
143{
144 int res = EINVAL;
145 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
146#if __DARWIN_UNIX03
147 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
148#else /* __DARWIN_UNIX03 */
149 if ( pshared == PTHREAD_PROCESS_PRIVATE)
150#endif /* __DARWIN_UNIX03 */
151 {
152 attr->pshared = pshared ;
153 res = 0;
154 }
155 }
156 return res;
157}
158
159__private_extern__ int
160__pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr)
161{
162 // Force RWLOCK_GETSEQ_ADDR to calculate addresses by setting pshared.
163 rwlock->pshared = PTHREAD_PROCESS_SHARED;
164 rwlock->misalign = (((uintptr_t)&rwlock->rw_seq[0]) & 0x7) != 0;
165 RWLOCK_GETSEQ_ADDR(rwlock, &rwlock->rw_lcntaddr, &rwlock->rw_ucntaddr, &rwlock->rw_seqaddr);
166 *rwlock->rw_lcntaddr = PTHRW_RWLOCK_INIT;
167 *rwlock->rw_seqaddr = PTHRW_RWS_INIT;
168 *rwlock->rw_ucntaddr = 0;
169
170 if (attr != NULL && attr->pshared == PTHREAD_PROCESS_SHARED) {
171 rwlock->pshared = PTHREAD_PROCESS_SHARED;
172 rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
173 } else {
174 rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
175 rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
176 }
177
178 rwlock->rw_owner = NULL;
179 bzero(rwlock->_reserved, sizeof(rwlock->_reserved));
180
181 // Ensure all contents are properly set before setting signature.
182 OSMemoryBarrier();
183 rwlock->sig = _PTHREAD_RWLOCK_SIG;
184
185 return 0;
186}
187
188static uint32_t
189modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
190{
191 uint32_t lval = lgenval & PTHRW_BIT_MASK;
192 uint32_t uval = updateval & PTHRW_BIT_MASK;
193 uint32_t rval, nlval;
194
195 nlval = (lval | uval) & ~(PTH_RWL_MBIT);
196
197 /* reconcile bits on the lock with what kernel needs to set */
198 if ((uval & PTH_RWL_KBIT) == 0 && (lval & PTH_RWL_WBIT) == 0) {
199 nlval &= ~PTH_RWL_KBIT;
200 }
201
202 if (savebits != 0) {
203 if ((savebits & PTH_RWS_WSVBIT) != 0 && (nlval & PTH_RWL_WBIT) == 0 && (nlval & PTH_RWL_EBIT) == 0) {
204 nlval |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
205 }
206 }
207 rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
208 return(rval);
209}
210
211__private_extern__ void
212_pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval)
213{
214 bool isoverlap = (updateval & PTH_RWL_MBIT) != 0;
215
216 uint64_t oldval64, newval64;
217 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
218
219 /* TBD: restore U bit */
220 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
221
222 do {
223 uint32_t lcntval = *lcntaddr;
224 uint32_t rw_seq = *seqaddr;
225
226 uint32_t newval, newsval;
227 if (isoverlap || is_rws_setunlockinit(rw_seq) != 0) {
228 // Set S word to the specified value
229 uint32_t savebits = (rw_seq & PTHRW_RWS_SAVEMASK);
230 newval = modbits(lcntval, updateval, savebits);
231 newsval = rw_seq + (updateval & PTHRW_COUNT_MASK);
232 if (!isoverlap) {
233 newsval &= PTHRW_COUNT_MASK;
234 }
235 newsval &= ~PTHRW_RWS_SAVEMASK;
236 } else {
237 newval = lcntval;
238 newsval = rw_seq;
239 }
240
241 oldval64 = (((uint64_t)rw_seq) << 32);
242 oldval64 |= lcntval;
243 newval64 = (((uint64_t)newsval) << 32);
244 newval64 |= newval;
245 } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE);
246}
247
248#endif /* !BUILDING_VARIANT ] */
249
250static int
251_pthread_rwlock_check_busy(_pthread_rwlock *rwlock)
252{
253 int res = 0;
254
255 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
256
257 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
258
259 uint32_t rw_lcnt = *lcntaddr;
260 uint32_t rw_ucnt = *ucntaddr;
261
262 if ((rw_lcnt & PTHRW_COUNT_MASK) != rw_ucnt) {
263 res = EBUSY;
264 }
265
266 return res;
267}
268
269int
270pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
271{
272 int res = 0;
273 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
274
275 if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
276#if __DARWIN_UNIX03
277 res = _pthread_rwlock_check_busy(rwlock);
278#endif /* __DARWIN_UNIX03 */
279 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG_init) {
280 res = EINVAL;
281 }
282 if (res == 0) {
283 rwlock->sig = _PTHREAD_NO_SIG;
284 }
285 return res;
286}
287
288
289int
290pthread_rwlock_init(pthread_rwlock_t *orwlock, const pthread_rwlockattr_t *attr)
291{
292 int res = 0;
293 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
294
295#if __DARWIN_UNIX03
296 if (attr && attr->sig != _PTHREAD_RWLOCK_ATTR_SIG) {
297 res = EINVAL;
298 }
299
300 if (res == 0 && rwlock->sig == _PTHREAD_RWLOCK_SIG) {
301 res = _pthread_rwlock_check_busy(rwlock);
302 }
303#endif
304 if (res == 0) {
2546420a 305 _PTHREAD_LOCK_INIT(rwlock->lock);
f1a1da6c
A
306 res = __pthread_rwlock_init(rwlock, attr);
307 }
308 return res;
309}
310
964d3577
A
311PTHREAD_NOINLINE
312static int
313_pthread_rwlock_check_init_slow(pthread_rwlock_t *orwlock)
314{
315 int res = EINVAL;
316 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
317
318 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
2546420a 319 _PTHREAD_LOCK(rwlock->lock);
964d3577
A
320 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
321 res = __pthread_rwlock_init(rwlock, NULL);
322 } else if (rwlock->sig == _PTHREAD_RWLOCK_SIG){
323 res = 0;
324 }
2546420a 325 _PTHREAD_UNLOCK(rwlock->lock);
964d3577
A
326 } else if (rwlock->sig == _PTHREAD_RWLOCK_SIG){
327 res = 0;
328 }
329 if (res != 0) {
330 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, res);
331 }
332 return res;
333}
334
335PTHREAD_ALWAYS_INLINE
f1a1da6c
A
336static int
337_pthread_rwlock_check_init(pthread_rwlock_t *orwlock)
338{
339 int res = 0;
340 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
964d3577 341
f1a1da6c 342 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
964d3577 343 return _pthread_rwlock_check_init_slow(orwlock);
f1a1da6c
A
344 }
345 return res;
346}
347
348static int
349_pthread_rwlock_lock(pthread_rwlock_t *orwlock, bool readlock, bool trylock)
350{
351 int res;
352 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
353
354 res = _pthread_rwlock_check_init(orwlock);
355 if (res != 0) {
356 return res;
357 }
358
359 uint64_t oldval64, newval64;
360 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
361 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
362
363 uint32_t newval, newsval;
364 uint32_t lcntval, ucntval, rw_seq;
365
366 bool gotlock;
367 bool retry;
368 int retry_count = 0;
369
370 do {
371 res = 0;
372 retry = false;
373
374 lcntval = *lcntaddr;
375 ucntval = *ucntaddr;
376 rw_seq = *seqaddr;
377
378#if __DARWIN_UNIX03
379 if (is_rwl_ebit_set(lcntval)) {
380 if (rwlock->rw_owner == pthread_self()) {
381 res = EDEADLK;
382 break;
383 }
384 }
385#endif /* __DARWIN_UNIX03 */
386
387 oldval64 = (((uint64_t)rw_seq) << 32);
388 oldval64 |= lcntval;
389
390 /* if l bit is on or u and k bit is clear, acquire lock in userland */
391 if (readlock) {
392 gotlock = can_rwl_readinuser(lcntval);
393 } else {
394 gotlock = (lcntval & PTH_RWL_RBIT) != 0;
395 }
396
397 uint32_t bits = 0;
398 uint32_t mask = ~0ul;
399
400 newval = lcntval + PTHRW_INC;
401
402 if (gotlock) {
403 if (readlock) {
404 if (diff_genseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
405 /* since ucntval may be newer, just redo */
406 retry_count++;
407 if (retry_count > 1024) {
408 res = EAGAIN;
409 break;
410 } else {
411 sched_yield();
412 retry = true;
413 continue;
414 }
415 }
416
417 // Need to update L (remove R bit) and S word
418 mask = PTH_RWLOCK_RESET_RBIT;
419 } else {
420 mask = PTHRW_COUNT_MASK;
421 bits = PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
422 }
423 newsval = rw_seq + PTHRW_INC;
424 } else if (trylock) {
425 res = EBUSY;
426 break;
427 } else {
428 if (readlock) {
429 // Need to block in kernel. Remove R bit.
430 mask = PTH_RWLOCK_RESET_RBIT;
431 } else {
432 bits = PTH_RWL_KBIT | PTH_RWL_WBIT;
433 }
434 newsval = rw_seq;
435 if (is_rws_setseq(rw_seq)) {
436 newsval &= PTHRW_SW_Reset_BIT_MASK;
437 newsval |= (newval & PTHRW_COUNT_MASK);
438 }
439 }
440 newval = (newval & mask) | bits;
441 newval64 = (((uint64_t)newsval) << 32);
442 newval64 |= newval;
443
444 } while (retry || OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE);
445
446#ifdef PLOCKSTAT
447 int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
448#endif
449
450 // Unable to acquire in userland, transition to kernel.
451 if (res == 0 && !gotlock) {
452 uint32_t updateval;
453
454 PLOCKSTAT_RW_BLOCK(orwlock, plockstat);
455
456 do {
457 if (readlock) {
458 updateval = __psynch_rw_rdlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
459 } else {
460 updateval = __psynch_rw_wrlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
461 }
462 if (updateval == (uint32_t)-1) {
463 res = errno;
464 } else {
465 res = 0;
466 }
467 } while (res == EINTR);
468
469 if (res == 0) {
470 _pthread_rwlock_updateval(rwlock, updateval);
471 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_SUCCESS_PLOCKSTAT);
472 } else {
473 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_FAIL_PLOCKSTAT);
474 uint64_t myid;
475 (void)pthread_threadid_np(pthread_self(), &myid);
476 PTHREAD_ABORT("kernel lock returned unknown error %x with tid %x\n", updateval, (uint32_t)myid);
477 }
478 }
479
480 if (res == 0) {
481#if __DARWIN_UNIX03
482 if (!readlock) {
483 rwlock->rw_owner = pthread_self();
484 }
485#endif /* __DARWIN_UNIX03 */
486 PLOCKSTAT_RW_ACQUIRE(orwlock, plockstat);
487 } else {
488 PLOCKSTAT_RW_ERROR(orwlock, plockstat, res);
489 }
490
491 return res;
492}
493
494int
495pthread_rwlock_rdlock(pthread_rwlock_t *orwlock)
496{
497 // read lock, no try
498 return _pthread_rwlock_lock(orwlock, true, false);
499}
500
501int
502pthread_rwlock_tryrdlock(pthread_rwlock_t *orwlock)
503{
504 // read lock, try lock
505 return _pthread_rwlock_lock(orwlock, true, true);
506}
507
508int
509pthread_rwlock_wrlock(pthread_rwlock_t *orwlock)
510{
511 // write lock, no try
512 return _pthread_rwlock_lock(orwlock, false, false);
513}
514
515int
516pthread_rwlock_trywrlock(pthread_rwlock_t *orwlock)
517{
518 // write lock, try lock
519 return _pthread_rwlock_lock(orwlock, false, true);
520}
521
522int
523pthread_rwlock_unlock(pthread_rwlock_t *orwlock)
524{
525 int res;
526 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
527#ifdef PLOCKSTAT
528 int wrlock = 0;
529#endif
530
531 res = _pthread_rwlock_check_init(orwlock);
532 if (res != 0) {
533 return res;
534 }
535
536 uint64_t oldval64 = 0, newval64 = 0;
537 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
538 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
539
540 bool droplock;
541 bool reload;
542 bool incr_ucnt = true;
543 bool check_spurious = true;
544 uint32_t lcntval, ucntval, rw_seq, ulval = 0, newval, newsval;
545
546 do {
547 reload = false;
548 droplock = true;
549
550 lcntval = *lcntaddr;
551 ucntval = *ucntaddr;
552 rw_seq = *seqaddr;
553
554 oldval64 = (((uint64_t)rw_seq) << 32);
555 oldval64 |= lcntval;
556
557 // check for spurious unlocks
558 if (check_spurious) {
559 if ((lcntval & PTH_RWL_RBIT) != 0) {
560 droplock = false;
561
562 newval64 = oldval64;
563 continue;
564 }
565 check_spurious = false;
566 }
567
568 if (is_rwl_ebit_set(lcntval)) {
569#ifdef PLOCKSTAT
570 wrlock = 1;
571#endif
572#if __DARWIN_UNIX03
573 rwlock->rw_owner = NULL;
574#endif /* __DARWIN_UNIX03 */
575 }
576
577 // update U
578 if (incr_ucnt) {
579 ulval = (ucntval + PTHRW_INC);
580 incr_ucnt = (OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)ucntaddr) != TRUE);
581 newval64 = oldval64;
582 reload = true;
583 continue;
584 }
585
586 // last unlock, note U is already updated ?
587 if ((lcntval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
588 /* Set L with R and init bits and set S to L */
589 newval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWLOCK_INIT;
590 newsval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWS_INIT;
591
592 droplock = false;
593 } else {
594 /* if it is not exclusive or no Writer/yield pending, skip */
595 if ((lcntval & (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0) {
596 droplock = false;
597 break;
598 }
599
600 /* kernel transition needed? */
601 /* U+1 == S? */
602 if ((ulval + PTHRW_INC) != (rw_seq & PTHRW_COUNT_MASK)) {
603 droplock = false;
604 break;
605 }
606
607 /* reset all bits and set k */
608 newval = (lcntval & PTHRW_COUNT_MASK) | PTH_RWL_KBIT;
609 /* set I bit on S word */
610 newsval = rw_seq | PTH_RWS_IBIT;
611 if ((lcntval & PTH_RWL_WBIT) != 0) {
612 newsval |= PTH_RWS_WSVBIT;
613 }
614 }
615
616 newval64 = (((uint64_t)newsval) << 32);
617 newval64 |= newval;
618
619 } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE || reload);
620
621 if (droplock) {
622 uint32_t updateval;
623 do {
624 updateval = __psynch_rw_unlock(orwlock, lcntval, ulval, newsval, rwlock->rw_flags);
625 if (updateval == (uint32_t)-1) {
626 res = errno;
627 } else {
628 res = 0;
629 }
630 } while (res == EINTR);
631
632 if (res != 0) {
633 uint64_t myid = 0;
634 (void)pthread_threadid_np(pthread_self(), &myid);
635 PTHREAD_ABORT("rwunlock from kernel with unknown error %x: tid %x\n", res, (uint32_t)myid);
636 }
637 }
638
639 PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
640
641 return res;
642}
643