]> git.saurik.com Git - apple/libpthread.git/blob - src/pthread_rwlock.c
libpthread-105.40.1.tar.gz
[apple/libpthread.git] / src / pthread_rwlock.c
1 /*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*-
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
49 */
50
51 /*
52 * POSIX Pthread Library
53 * -- Read Write Lock support
54 * 4/24/02: A. Ramesh
55 * Ported from FreeBSD
56 */
57
58 #include "internal.h"
59 #include <stdio.h> /* For printf(). */
60
61 extern int __unix_conforming;
62
63 #ifdef PLOCKSTAT
64 #include "plockstat.h"
65 #else /* !PLOCKSTAT */
66 #define PLOCKSTAT_RW_ERROR(x, y, z)
67 #define PLOCKSTAT_RW_BLOCK(x, y)
68 #define PLOCKSTAT_RW_BLOCKED(x, y, z)
69 #define PLOCKSTAT_RW_ACQUIRE(x, y)
70 #define PLOCKSTAT_RW_RELEASE(x, y)
71 #endif /* PLOCKSTAT */
72
73 #define READ_LOCK_PLOCKSTAT 0
74 #define WRITE_LOCK_PLOCKSTAT 1
75
76 #define BLOCK_FAIL_PLOCKSTAT 0
77 #define BLOCK_SUCCESS_PLOCKSTAT 1
78
79 /* maximum number of times a read lock may be obtained */
80 #define MAX_READ_LOCKS (INT_MAX - 1)
81
82 #include <platform/string.h>
83 #include <platform/compat.h>
84
85 __private_extern__ int __pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr);
86 __private_extern__ void _pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval);
87
88 static void
89 RWLOCK_GETSEQ_ADDR(_pthread_rwlock *rwlock,
90 volatile uint32_t **lcntaddr,
91 volatile uint32_t **ucntaddr,
92 volatile uint32_t **seqaddr)
93 {
94 if (rwlock->pshared == PTHREAD_PROCESS_SHARED) {
95 if (rwlock->misalign) {
96 *lcntaddr = &rwlock->rw_seq[1];
97 *seqaddr = &rwlock->rw_seq[2];
98 *ucntaddr = &rwlock->rw_seq[3];
99 } else {
100 *lcntaddr = &rwlock->rw_seq[0];
101 *seqaddr = &rwlock->rw_seq[1];
102 *ucntaddr = &rwlock->rw_seq[2];
103 }
104 } else {
105 *lcntaddr = rwlock->rw_lcntaddr;
106 *seqaddr = rwlock->rw_seqaddr;
107 *ucntaddr = rwlock->rw_ucntaddr;
108 }
109 }
110
111 #ifndef BUILDING_VARIANT /* [ */
112 static uint32_t modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits);
113
114 int
115 pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
116 {
117 attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
118 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
119 return 0;
120 }
121
122 int
123 pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
124 {
125 attr->sig = _PTHREAD_NO_SIG;
126 attr->pshared = 0;
127 return 0;
128 }
129
130 int
131 pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, int *pshared)
132 {
133 int res = EINVAL;
134 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
135 *pshared = (int)attr->pshared;
136 res = 0;
137 }
138 return res;
139 }
140
141 int
142 pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
143 {
144 int res = EINVAL;
145 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
146 #if __DARWIN_UNIX03
147 if (( pshared == PTHREAD_PROCESS_PRIVATE) || (pshared == PTHREAD_PROCESS_SHARED))
148 #else /* __DARWIN_UNIX03 */
149 if ( pshared == PTHREAD_PROCESS_PRIVATE)
150 #endif /* __DARWIN_UNIX03 */
151 {
152 attr->pshared = pshared ;
153 res = 0;
154 }
155 }
156 return res;
157 }
158
159 __private_extern__ int
160 __pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr)
161 {
162 // Force RWLOCK_GETSEQ_ADDR to calculate addresses by setting pshared.
163 rwlock->pshared = PTHREAD_PROCESS_SHARED;
164 rwlock->misalign = (((uintptr_t)&rwlock->rw_seq[0]) & 0x7) != 0;
165 RWLOCK_GETSEQ_ADDR(rwlock, &rwlock->rw_lcntaddr, &rwlock->rw_ucntaddr, &rwlock->rw_seqaddr);
166 *rwlock->rw_lcntaddr = PTHRW_RWLOCK_INIT;
167 *rwlock->rw_seqaddr = PTHRW_RWS_INIT;
168 *rwlock->rw_ucntaddr = 0;
169
170 if (attr != NULL && attr->pshared == PTHREAD_PROCESS_SHARED) {
171 rwlock->pshared = PTHREAD_PROCESS_SHARED;
172 rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
173 } else {
174 rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
175 rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
176 }
177
178 rwlock->rw_owner = NULL;
179 bzero(rwlock->_reserved, sizeof(rwlock->_reserved));
180
181 // Ensure all contents are properly set before setting signature.
182 OSMemoryBarrier();
183 rwlock->sig = _PTHREAD_RWLOCK_SIG;
184
185 return 0;
186 }
187
188 static uint32_t
189 modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
190 {
191 uint32_t lval = lgenval & PTHRW_BIT_MASK;
192 uint32_t uval = updateval & PTHRW_BIT_MASK;
193 uint32_t rval, nlval;
194
195 nlval = (lval | uval) & ~(PTH_RWL_MBIT);
196
197 /* reconcile bits on the lock with what kernel needs to set */
198 if ((uval & PTH_RWL_KBIT) == 0 && (lval & PTH_RWL_WBIT) == 0) {
199 nlval &= ~PTH_RWL_KBIT;
200 }
201
202 if (savebits != 0) {
203 if ((savebits & PTH_RWS_WSVBIT) != 0 && (nlval & PTH_RWL_WBIT) == 0 && (nlval & PTH_RWL_EBIT) == 0) {
204 nlval |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
205 }
206 }
207 rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
208 return(rval);
209 }
210
211 __private_extern__ void
212 _pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval)
213 {
214 bool isoverlap = (updateval & PTH_RWL_MBIT) != 0;
215
216 uint64_t oldval64, newval64;
217 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
218
219 /* TBD: restore U bit */
220 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
221
222 do {
223 uint32_t lcntval = *lcntaddr;
224 uint32_t rw_seq = *seqaddr;
225
226 uint32_t newval, newsval;
227 if (isoverlap || is_rws_setunlockinit(rw_seq) != 0) {
228 // Set S word to the specified value
229 uint32_t savebits = (rw_seq & PTHRW_RWS_SAVEMASK);
230 newval = modbits(lcntval, updateval, savebits);
231 newsval = rw_seq + (updateval & PTHRW_COUNT_MASK);
232 if (!isoverlap) {
233 newsval &= PTHRW_COUNT_MASK;
234 }
235 newsval &= ~PTHRW_RWS_SAVEMASK;
236 } else {
237 newval = lcntval;
238 newsval = rw_seq;
239 }
240
241 oldval64 = (((uint64_t)rw_seq) << 32);
242 oldval64 |= lcntval;
243 newval64 = (((uint64_t)newsval) << 32);
244 newval64 |= newval;
245 } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE);
246 }
247
248 #endif /* !BUILDING_VARIANT ] */
249
250 static int
251 _pthread_rwlock_check_busy(_pthread_rwlock *rwlock)
252 {
253 int res = 0;
254
255 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
256
257 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
258
259 uint32_t rw_lcnt = *lcntaddr;
260 uint32_t rw_ucnt = *ucntaddr;
261
262 if ((rw_lcnt & PTHRW_COUNT_MASK) != rw_ucnt) {
263 res = EBUSY;
264 }
265
266 return res;
267 }
268
269 int
270 pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
271 {
272 int res = 0;
273 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
274
275 if (rwlock->sig == _PTHREAD_RWLOCK_SIG) {
276 #if __DARWIN_UNIX03
277 res = _pthread_rwlock_check_busy(rwlock);
278 #endif /* __DARWIN_UNIX03 */
279 } else if (rwlock->sig != _PTHREAD_RWLOCK_SIG_init) {
280 res = EINVAL;
281 }
282 if (res == 0) {
283 rwlock->sig = _PTHREAD_NO_SIG;
284 }
285 return res;
286 }
287
288
289 int
290 pthread_rwlock_init(pthread_rwlock_t *orwlock, const pthread_rwlockattr_t *attr)
291 {
292 int res = 0;
293 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
294
295 #if __DARWIN_UNIX03
296 if (attr && attr->sig != _PTHREAD_RWLOCK_ATTR_SIG) {
297 res = EINVAL;
298 }
299
300 if (res == 0 && rwlock->sig == _PTHREAD_RWLOCK_SIG) {
301 res = _pthread_rwlock_check_busy(rwlock);
302 }
303 #endif
304 if (res == 0) {
305 LOCK_INIT(rwlock->lock);
306 res = __pthread_rwlock_init(rwlock, attr);
307 }
308 return res;
309 }
310
311 static int
312 _pthread_rwlock_check_init(pthread_rwlock_t *orwlock)
313 {
314 int res = 0;
315 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
316 if (rwlock->sig != _PTHREAD_RWLOCK_SIG) {
317 res = EINVAL;
318 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
319 LOCK(rwlock->lock);
320 if (rwlock->sig == _PTHREAD_RWLOCK_SIG_init) {
321 res = __pthread_rwlock_init(rwlock, NULL);
322 } else if (rwlock->sig == _PTHREAD_RWLOCK_SIG){
323 res = 0;
324 }
325 UNLOCK(rwlock->lock);
326 }
327 if (res != 0) {
328 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, res);
329 }
330 }
331 return res;
332 }
333
334 static int
335 _pthread_rwlock_lock(pthread_rwlock_t *orwlock, bool readlock, bool trylock)
336 {
337 int res;
338 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
339
340 res = _pthread_rwlock_check_init(orwlock);
341 if (res != 0) {
342 return res;
343 }
344
345 uint64_t oldval64, newval64;
346 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
347 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
348
349 uint32_t newval, newsval;
350 uint32_t lcntval, ucntval, rw_seq;
351
352 bool gotlock;
353 bool retry;
354 int retry_count = 0;
355
356 do {
357 res = 0;
358 retry = false;
359
360 lcntval = *lcntaddr;
361 ucntval = *ucntaddr;
362 rw_seq = *seqaddr;
363
364 #if __DARWIN_UNIX03
365 if (is_rwl_ebit_set(lcntval)) {
366 if (rwlock->rw_owner == pthread_self()) {
367 res = EDEADLK;
368 break;
369 }
370 }
371 #endif /* __DARWIN_UNIX03 */
372
373 oldval64 = (((uint64_t)rw_seq) << 32);
374 oldval64 |= lcntval;
375
376 /* if l bit is on or u and k bit is clear, acquire lock in userland */
377 if (readlock) {
378 gotlock = can_rwl_readinuser(lcntval);
379 } else {
380 gotlock = (lcntval & PTH_RWL_RBIT) != 0;
381 }
382
383 uint32_t bits = 0;
384 uint32_t mask = ~0ul;
385
386 newval = lcntval + PTHRW_INC;
387
388 if (gotlock) {
389 if (readlock) {
390 if (diff_genseq(lcntval, ucntval) >= PTHRW_MAX_READERS) {
391 /* since ucntval may be newer, just redo */
392 retry_count++;
393 if (retry_count > 1024) {
394 res = EAGAIN;
395 break;
396 } else {
397 sched_yield();
398 retry = true;
399 continue;
400 }
401 }
402
403 // Need to update L (remove R bit) and S word
404 mask = PTH_RWLOCK_RESET_RBIT;
405 } else {
406 mask = PTHRW_COUNT_MASK;
407 bits = PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
408 }
409 newsval = rw_seq + PTHRW_INC;
410 } else if (trylock) {
411 res = EBUSY;
412 break;
413 } else {
414 if (readlock) {
415 // Need to block in kernel. Remove R bit.
416 mask = PTH_RWLOCK_RESET_RBIT;
417 } else {
418 bits = PTH_RWL_KBIT | PTH_RWL_WBIT;
419 }
420 newsval = rw_seq;
421 if (is_rws_setseq(rw_seq)) {
422 newsval &= PTHRW_SW_Reset_BIT_MASK;
423 newsval |= (newval & PTHRW_COUNT_MASK);
424 }
425 }
426 newval = (newval & mask) | bits;
427 newval64 = (((uint64_t)newsval) << 32);
428 newval64 |= newval;
429
430 } while (retry || OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE);
431
432 #ifdef PLOCKSTAT
433 int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
434 #endif
435
436 // Unable to acquire in userland, transition to kernel.
437 if (res == 0 && !gotlock) {
438 uint32_t updateval;
439
440 PLOCKSTAT_RW_BLOCK(orwlock, plockstat);
441
442 do {
443 if (readlock) {
444 updateval = __psynch_rw_rdlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
445 } else {
446 updateval = __psynch_rw_wrlock(orwlock, newval, ucntval, newsval, rwlock->rw_flags);
447 }
448 if (updateval == (uint32_t)-1) {
449 res = errno;
450 } else {
451 res = 0;
452 }
453 } while (res == EINTR);
454
455 if (res == 0) {
456 _pthread_rwlock_updateval(rwlock, updateval);
457 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_SUCCESS_PLOCKSTAT);
458 } else {
459 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_FAIL_PLOCKSTAT);
460 uint64_t myid;
461 (void)pthread_threadid_np(pthread_self(), &myid);
462 PTHREAD_ABORT("kernel lock returned unknown error %x with tid %x\n", updateval, (uint32_t)myid);
463 }
464 }
465
466 if (res == 0) {
467 #if __DARWIN_UNIX03
468 if (!readlock) {
469 rwlock->rw_owner = pthread_self();
470 }
471 #endif /* __DARWIN_UNIX03 */
472 PLOCKSTAT_RW_ACQUIRE(orwlock, plockstat);
473 } else {
474 PLOCKSTAT_RW_ERROR(orwlock, plockstat, res);
475 }
476
477 return res;
478 }
479
480 int
481 pthread_rwlock_rdlock(pthread_rwlock_t *orwlock)
482 {
483 // read lock, no try
484 return _pthread_rwlock_lock(orwlock, true, false);
485 }
486
487 int
488 pthread_rwlock_tryrdlock(pthread_rwlock_t *orwlock)
489 {
490 // read lock, try lock
491 return _pthread_rwlock_lock(orwlock, true, true);
492 }
493
494 int
495 pthread_rwlock_wrlock(pthread_rwlock_t *orwlock)
496 {
497 // write lock, no try
498 return _pthread_rwlock_lock(orwlock, false, false);
499 }
500
501 int
502 pthread_rwlock_trywrlock(pthread_rwlock_t *orwlock)
503 {
504 // write lock, try lock
505 return _pthread_rwlock_lock(orwlock, false, true);
506 }
507
508 int
509 pthread_rwlock_unlock(pthread_rwlock_t *orwlock)
510 {
511 int res;
512 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
513 #ifdef PLOCKSTAT
514 int wrlock = 0;
515 #endif
516
517 res = _pthread_rwlock_check_init(orwlock);
518 if (res != 0) {
519 return res;
520 }
521
522 uint64_t oldval64 = 0, newval64 = 0;
523 volatile uint32_t *lcntaddr, *ucntaddr, *seqaddr;
524 RWLOCK_GETSEQ_ADDR(rwlock, &lcntaddr, &ucntaddr, &seqaddr);
525
526 bool droplock;
527 bool reload;
528 bool incr_ucnt = true;
529 bool check_spurious = true;
530 uint32_t lcntval, ucntval, rw_seq, ulval = 0, newval, newsval;
531
532 do {
533 reload = false;
534 droplock = true;
535
536 lcntval = *lcntaddr;
537 ucntval = *ucntaddr;
538 rw_seq = *seqaddr;
539
540 oldval64 = (((uint64_t)rw_seq) << 32);
541 oldval64 |= lcntval;
542
543 // check for spurious unlocks
544 if (check_spurious) {
545 if ((lcntval & PTH_RWL_RBIT) != 0) {
546 droplock = false;
547
548 newval64 = oldval64;
549 continue;
550 }
551 check_spurious = false;
552 }
553
554 if (is_rwl_ebit_set(lcntval)) {
555 #ifdef PLOCKSTAT
556 wrlock = 1;
557 #endif
558 #if __DARWIN_UNIX03
559 rwlock->rw_owner = NULL;
560 #endif /* __DARWIN_UNIX03 */
561 }
562
563 // update U
564 if (incr_ucnt) {
565 ulval = (ucntval + PTHRW_INC);
566 incr_ucnt = (OSAtomicCompareAndSwap32Barrier(ucntval, ulval, (volatile int32_t *)ucntaddr) != TRUE);
567 newval64 = oldval64;
568 reload = true;
569 continue;
570 }
571
572 // last unlock, note U is already updated ?
573 if ((lcntval & PTHRW_COUNT_MASK) == (ulval & PTHRW_COUNT_MASK)) {
574 /* Set L with R and init bits and set S to L */
575 newval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWLOCK_INIT;
576 newsval = (lcntval & PTHRW_COUNT_MASK)| PTHRW_RWS_INIT;
577
578 droplock = false;
579 } else {
580 /* if it is not exclusive or no Writer/yield pending, skip */
581 if ((lcntval & (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0) {
582 droplock = false;
583 break;
584 }
585
586 /* kernel transition needed? */
587 /* U+1 == S? */
588 if ((ulval + PTHRW_INC) != (rw_seq & PTHRW_COUNT_MASK)) {
589 droplock = false;
590 break;
591 }
592
593 /* reset all bits and set k */
594 newval = (lcntval & PTHRW_COUNT_MASK) | PTH_RWL_KBIT;
595 /* set I bit on S word */
596 newsval = rw_seq | PTH_RWS_IBIT;
597 if ((lcntval & PTH_RWL_WBIT) != 0) {
598 newsval |= PTH_RWS_WSVBIT;
599 }
600 }
601
602 newval64 = (((uint64_t)newsval) << 32);
603 newval64 |= newval;
604
605 } while (OSAtomicCompareAndSwap64Barrier(oldval64, newval64, (volatile int64_t *)lcntaddr) != TRUE || reload);
606
607 if (droplock) {
608 uint32_t updateval;
609 do {
610 updateval = __psynch_rw_unlock(orwlock, lcntval, ulval, newsval, rwlock->rw_flags);
611 if (updateval == (uint32_t)-1) {
612 res = errno;
613 } else {
614 res = 0;
615 }
616 } while (res == EINTR);
617
618 if (res != 0) {
619 uint64_t myid = 0;
620 (void)pthread_threadid_np(pthread_self(), &myid);
621 PTHREAD_ABORT("rwunlock from kernel with unknown error %x: tid %x\n", res, (uint32_t)myid);
622 }
623 }
624
625 PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
626
627 return res;
628 }
629