]> git.saurik.com Git - apple/libpthread.git/blame - src/pthread_rwlock.c
libpthread-301.50.1.tar.gz
[apple/libpthread.git] / src / pthread_rwlock.c
CommitLineData
f1a1da6c
A
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
a0619f9c 5 *
f1a1da6c
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
a0619f9c 20 *
f1a1da6c
A
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*-
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
49 */
50
a0619f9c
A
51/*
52 * POSIX Pthread Library
f1a1da6c
A
53 * -- Read Write Lock support
54 * 4/24/02: A. Ramesh
55 * Ported from FreeBSD
56 */
57
a0619f9c 58#include "resolver.h"
f1a1da6c 59#include "internal.h"
a0619f9c
A
60#if DEBUG
61#include <platform/compat.h> // for bzero
62#endif
f1a1da6c
A
63
64extern int __unix_conforming;
65
66#ifdef PLOCKSTAT
67#include "plockstat.h"
68#else /* !PLOCKSTAT */
69#define PLOCKSTAT_RW_ERROR(x, y, z)
70#define PLOCKSTAT_RW_BLOCK(x, y)
71#define PLOCKSTAT_RW_BLOCKED(x, y, z)
72#define PLOCKSTAT_RW_ACQUIRE(x, y)
73#define PLOCKSTAT_RW_RELEASE(x, y)
74#endif /* PLOCKSTAT */
75
76#define READ_LOCK_PLOCKSTAT 0
77#define WRITE_LOCK_PLOCKSTAT 1
78
79#define BLOCK_FAIL_PLOCKSTAT 0
80#define BLOCK_SUCCESS_PLOCKSTAT 1
81
a0619f9c
A
82#define PTHREAD_RWLOCK_INIT_UNUSED 1
83
84// maximum number of times a read lock may be obtained
85#define MAX_READ_LOCKS (INT_MAX - 1)
86
87union rwlock_seq; // forward declaration
88enum rwlock_seqfields; // forward declaration
89
90PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
91int _pthread_rwlock_lock_slow(pthread_rwlock_t *orwlock, bool readlock,
92 bool trylock);
93
94PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
95int _pthread_rwlock_unlock_slow(pthread_rwlock_t *orwlock,
96 enum rwlock_seqfields updated_seqfields);
97
98
99#if defined(__LP64__)
100#define RWLOCK_USE_INT128 1
101#endif
102
103typedef union rwlock_seq {
104 uint32_t seq[4];
105 struct { uint32_t lcntval; uint32_t rw_seq; uint32_t ucntval; };
106 struct { uint32_t lgen; uint32_t rw_wc; uint32_t ugen; };
107#if RWLOCK_USE_INT128
108 unsigned __int128 seq_LSU;
109 unsigned __int128 _Atomic atomic_seq_LSU;
110#endif
111 struct {
112 uint64_t seq_LS;
113 uint32_t seq_U;
114 uint32_t _pad;
115 };
116 struct {
117 uint64_t _Atomic atomic_seq_LS;
118 uint32_t _Atomic atomic_seq_U;
119 uint32_t _Atomic _atomic_pad;
120 };
121} rwlock_seq;
122
123_Static_assert(sizeof(rwlock_seq) == 4 * sizeof(uint32_t),
124 "Incorrect rwlock_seq size");
125
126typedef enum rwlock_seqfields {
127 RWLOCK_SEQ_NONE = 0,
128 RWLOCK_SEQ_LS = 1,
129 RWLOCK_SEQ_U = 2,
130 RWLOCK_SEQ_LSU = RWLOCK_SEQ_LS | RWLOCK_SEQ_U,
131} rwlock_seqfields;
132
133#if PTHREAD_DEBUG_LOG
134#define RWLOCK_DEBUG_SEQ(op, rwlock, oldseq, newseq, updateval, f) \
135 if (_pthread_debuglog >= 0) { \
136 _simple_dprintf(_pthread_debuglog, "rw_" #op " %p tck %7llu thr %llx " \
137 "L %x -> %x S %x -> %x U %x -> %x updt %x\n", rwlock, \
138 mach_absolute_time() - _pthread_debugstart, _pthread_selfid_direct(), \
139 (f) & RWLOCK_SEQ_LS ? (oldseq).lcntval : 0, \
140 (f) & RWLOCK_SEQ_LS ? (newseq).lcntval : 0, \
141 (f) & RWLOCK_SEQ_LS ? (oldseq).rw_seq : 0, \
142 (f) & RWLOCK_SEQ_LS ? (newseq).rw_seq : 0, \
143 (f) & RWLOCK_SEQ_U ? (oldseq).ucntval : 0, \
144 (f) & RWLOCK_SEQ_U ? (newseq).ucntval : 0, updateval); }
145#else
146#define RWLOCK_DEBUG_SEQ(m, rwlock, oldseq, newseq, updateval, f)
147#endif
148
149#if !__LITTLE_ENDIAN__
150#error RWLOCK_GETSEQ_ADDR assumes little endian layout of sequence words
151#endif
152
153PTHREAD_ALWAYS_INLINE
154static inline void
155RWLOCK_GETSEQ_ADDR(_pthread_rwlock *rwlock, rwlock_seq **seqaddr)
156{
157 // 128-bit aligned address inside rw_seq & rw_mis arrays
158 *seqaddr = (void*)(((uintptr_t)rwlock->rw_seq + 0xful) & ~0xful);
159}
f1a1da6c 160
a0619f9c
A
161PTHREAD_ALWAYS_INLINE
162static inline void
163RWLOCK_GETTID_ADDR(_pthread_rwlock *rwlock, uint64_t **tidaddr)
164{
165 // 64-bit aligned address inside rw_tid array (&rw_tid[0] for aligned lock)
166 *tidaddr = (void*)(((uintptr_t)rwlock->rw_tid + 0x7ul) & ~0x7ul);
167}
f1a1da6c 168
a0619f9c
A
169PTHREAD_ALWAYS_INLINE
170static inline void
171rwlock_seq_load(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
172 const rwlock_seqfields seqfields)
173{
174 switch (seqfields) {
175 case RWLOCK_SEQ_LSU:
176#if RWLOCK_USE_INT128
177 oldseqval->seq_LSU = seqaddr->seq_LSU;
178#else
179 oldseqval->seq_LS = seqaddr->seq_LS;
180 oldseqval->seq_U = seqaddr->seq_U;
181#endif
182 break;
183 case RWLOCK_SEQ_LS:
184 oldseqval->seq_LS = seqaddr->seq_LS;
185 break;
186#if DEBUG // unused
187 case RWLOCK_SEQ_U:
188 oldseqval->seq_U = seqaddr->seq_U;
189 break;
190#endif // unused
191 default:
192 __builtin_trap();
193 }
194}
f1a1da6c 195
a0619f9c
A
196PTHREAD_ALWAYS_INLINE
197static inline void
198rwlock_seq_atomic_load_relaxed(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
199 const rwlock_seqfields seqfields)
f1a1da6c 200{
a0619f9c
A
201 switch (seqfields) {
202 case RWLOCK_SEQ_LSU:
203#if RWLOCK_USE_INT128
204 oldseqval->seq_LSU = os_atomic_load(&seqaddr->atomic_seq_LSU, relaxed);
205#else
206 oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS, relaxed);
207 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
208#endif
209 break;
210 case RWLOCK_SEQ_LS:
211 oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS, relaxed);
212 break;
213#if DEBUG // unused
214 case RWLOCK_SEQ_U:
215 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
216 break;
217#endif // unused
218 default:
219 __builtin_trap();
220 }
221}
222
223#define rwlock_seq_atomic_load(seqaddr, oldseqval, seqfields, m) \
224 rwlock_seq_atomic_load_##m(seqaddr, oldseqval, seqfields)
225
226PTHREAD_ALWAYS_INLINE
227static inline rwlock_seqfields
228rwlock_seq_atomic_cmpxchgv_relaxed(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
229 rwlock_seq *newseqval, const rwlock_seqfields seqfields)
230{
231 bool r;
232 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
233 switch (seqfields) {
234#if DEBUG // unused
235 case RWLOCK_SEQ_LSU:
236#if RWLOCK_USE_INT128
237 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
238 newseqval->seq_LSU, &oldseqval->seq_LSU, relaxed);
239 if (r) updated_seqfields = RWLOCK_SEQ_LSU;
240#else
241 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
242 newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
243 if (r) {
244 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
245 newseqval->seq_U, &oldseqval->seq_U, relaxed);
246 if (!r) oldseqval->seq_LS = newseqval->seq_LS;
247 updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_LS;
f1a1da6c 248 } else {
a0619f9c 249 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
f1a1da6c 250 }
a0619f9c
A
251#endif
252 break;
253 case RWLOCK_SEQ_U:
254 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
255 newseqval->seq_U, &oldseqval->seq_U, relaxed);
256 if (r) updated_seqfields = RWLOCK_SEQ_U;
257 break;
258#endif // unused
259 case RWLOCK_SEQ_LS:
260 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
261 newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
262 if (r) updated_seqfields = RWLOCK_SEQ_LS;
263 break;
264 default:
265 __builtin_trap();
266 }
267 return updated_seqfields;
268}
269
270PTHREAD_ALWAYS_INLINE
271static inline rwlock_seqfields
272rwlock_seq_atomic_cmpxchgv_acquire(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
273 rwlock_seq *newseqval, const rwlock_seqfields seqfields)
274{
275 bool r;
276 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
277 switch (seqfields) {
278#if DEBUG // unused
279 case RWLOCK_SEQ_LSU:
280#if RWLOCK_USE_INT128
281 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
282 newseqval->seq_LSU, &oldseqval->seq_LSU, acquire);
283 if (r) updated_seqfields = RWLOCK_SEQ_LSU;
284#else
285 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
286 newseqval->seq_LS, &oldseqval->seq_LS, acquire);
287 if (r) {
288 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
289 newseqval->seq_U, &oldseqval->seq_U, relaxed);
290 if (!r) oldseqval->seq_LS = newseqval->seq_LS;
291 updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_LS;
292 } else {
293 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
294 }
295#endif
296 break;
297 case RWLOCK_SEQ_U:
298 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
299 newseqval->seq_U, &oldseqval->seq_U, acquire);
300 if (r) updated_seqfields = RWLOCK_SEQ_U;
301 break;
302#endif // unused
303 case RWLOCK_SEQ_LS:
304 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
305 newseqval->seq_LS, &oldseqval->seq_LS, acquire);
306 if (r) updated_seqfields = RWLOCK_SEQ_LS;
307 break;
308 default:
309 __builtin_trap();
f1a1da6c 310 }
a0619f9c 311 return updated_seqfields;
f1a1da6c
A
312}
313
a0619f9c
A
314PTHREAD_ALWAYS_INLINE
315static inline rwlock_seqfields
316rwlock_seq_atomic_cmpxchgv_release(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
317 rwlock_seq *newseqval, const rwlock_seqfields seqfields)
318{
319 bool r;
320 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
321 switch (seqfields) {
322 case RWLOCK_SEQ_LSU:
323#if RWLOCK_USE_INT128
324 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
325 newseqval->seq_LSU, &oldseqval->seq_LSU, release);
326 if (r) updated_seqfields = RWLOCK_SEQ_LSU;
327#else
328 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
329 newseqval->seq_U, &oldseqval->seq_U, release);
330 if (r) {
331 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
332 newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
333 if (!r) oldseqval->seq_U = newseqval->seq_U;
334 updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_U;
335 } else {
336 oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS,relaxed);
337 }
338#endif
339 break;
340 case RWLOCK_SEQ_LS:
341 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
342 newseqval->seq_LS, &oldseqval->seq_LS, release);
343 if (r) updated_seqfields = RWLOCK_SEQ_LS;
344 break;
345#if DEBUG // unused
346 case RWLOCK_SEQ_U:
347 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
348 newseqval->seq_U, &oldseqval->seq_U, release);
349 if (r) updated_seqfields = RWLOCK_SEQ_U;
350 break;
351#endif // unused
352 default:
353 __builtin_trap();
354 }
355 return updated_seqfields;
356}
357
358#define rwlock_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, seqfields, m)\
359 rwlock_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval, seqfields)
360
f1a1da6c 361#ifndef BUILDING_VARIANT /* [ */
f1a1da6c
A
362
363int
364pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
365{
366 attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
367 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
368 return 0;
369}
370
a0619f9c 371int
f1a1da6c
A
372pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
373{
374 attr->sig = _PTHREAD_NO_SIG;
375 attr->pshared = 0;
376 return 0;
377}
378
379int
380pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, int *pshared)
381{
382 int res = EINVAL;
383 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
384 *pshared = (int)attr->pshared;
385 res = 0;
386 }
387 return res;
388}
389
390int
391pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
392{
393 int res = EINVAL;
394 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
395#if __DARWIN_UNIX03
a0619f9c
A
396 if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
397 (pshared == PTHREAD_PROCESS_SHARED))
f1a1da6c
A
398#else /* __DARWIN_UNIX03 */
399 if ( pshared == PTHREAD_PROCESS_PRIVATE)
400#endif /* __DARWIN_UNIX03 */
401 {
402 attr->pshared = pshared ;
403 res = 0;
404 }
405 }
406 return res;
407}
408
a0619f9c
A
409#endif /* !BUILDING_VARIANT ] */
410
411PTHREAD_ALWAYS_INLINE
412static inline int
413_pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr)
f1a1da6c 414{
a0619f9c
A
415 uint64_t *tidaddr;
416 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
417
418 rwlock_seq *seqaddr;
419 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
420
421#if PTHREAD_RWLOCK_INIT_UNUSED
422 if ((uint32_t*)tidaddr != rwlock->rw_tid) {
423 rwlock->misalign = 1;
424 __builtin_memset(rwlock->rw_tid, 0xff, sizeof(rwlock->rw_tid));
425 }
426 if ((uint32_t*)seqaddr != rwlock->rw_seq) {
427 __builtin_memset(rwlock->rw_seq, 0xff, sizeof(rwlock->rw_seq));
428 }
429 __builtin_memset(rwlock->rw_mis, 0xff, sizeof(rwlock->rw_mis));
430#endif // PTHREAD_MUTEX_INIT_UNUSED
431 *tidaddr = 0;
432 *seqaddr = (rwlock_seq){
433 .lcntval = PTHRW_RWLOCK_INIT,
434 .rw_seq = PTHRW_RWS_INIT,
435 .ucntval = 0,
436 };
f1a1da6c
A
437
438 if (attr != NULL && attr->pshared == PTHREAD_PROCESS_SHARED) {
439 rwlock->pshared = PTHREAD_PROCESS_SHARED;
440 rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
441 } else {
442 rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
443 rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
444 }
a0619f9c
A
445
446 long sig = _PTHREAD_RWLOCK_SIG;
447
448#if DEBUG
f1a1da6c 449 bzero(rwlock->_reserved, sizeof(rwlock->_reserved));
a0619f9c
A
450#endif
451#if PTHREAD_RWLOCK_INIT_UNUSED
452 // For detecting copied rwlocks and smashes during debugging
453 uint32_t sig32 = (uint32_t)sig;
454 uintptr_t guard = ~(uintptr_t)rwlock; // use ~ to hide from leaks
455 __builtin_memcpy(rwlock->_reserved, &guard, sizeof(guard));
456#define countof(x) (sizeof(x) / sizeof(x[0]))
457 rwlock->_reserved[countof(rwlock->_reserved) - 1] = sig32;
458#if defined(__LP64__)
459 rwlock->_pad = sig32;
460#endif
461#endif // PTHREAD_RWLOCK_INIT_UNUSED
f1a1da6c
A
462
463 // Ensure all contents are properly set before setting signature.
a0619f9c
A
464#if defined(__LP64__)
465 // For binary compatibility reasons we cannot require natural alignment of
466 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
467 uint32_t *sig32_ptr = (uint32_t*)&rwlock->sig;
468 uint32_t *sig32_val = (uint32_t*)&sig;
469 *(sig32_ptr + 1) = *(sig32_val + 1);
470 os_atomic_store(sig32_ptr, *sig32_val, release);
471#else
472 os_atomic_store2o(rwlock, sig, sig, release);
473#endif
474
f1a1da6c
A
475 return 0;
476}
477
478static uint32_t
a0619f9c 479_pthread_rwlock_modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
f1a1da6c
A
480{
481 uint32_t lval = lgenval & PTHRW_BIT_MASK;
482 uint32_t uval = updateval & PTHRW_BIT_MASK;
483 uint32_t rval, nlval;
484
485 nlval = (lval | uval) & ~(PTH_RWL_MBIT);
a0619f9c
A
486
487 // reconcile bits on the lock with what kernel needs to set
f1a1da6c
A
488 if ((uval & PTH_RWL_KBIT) == 0 && (lval & PTH_RWL_WBIT) == 0) {
489 nlval &= ~PTH_RWL_KBIT;
490 }
491
492 if (savebits != 0) {
a0619f9c
A
493 if ((savebits & PTH_RWS_WSVBIT) != 0 && (nlval & PTH_RWL_WBIT) == 0 &&
494 (nlval & PTH_RWL_EBIT) == 0) {
f1a1da6c
A
495 nlval |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
496 }
497 }
498 rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
499 return(rval);
500}
501
a0619f9c
A
502PTHREAD_ALWAYS_INLINE
503static inline void
f1a1da6c
A
504_pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval)
505{
506 bool isoverlap = (updateval & PTH_RWL_MBIT) != 0;
507
a0619f9c
A
508 // TBD: restore U bit
509 rwlock_seq *seqaddr;
510 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
f1a1da6c 511
a0619f9c
A
512 rwlock_seq oldseq, newseq;
513 rwlock_seq_load(seqaddr, &oldseq, RWLOCK_SEQ_LS);
f1a1da6c 514 do {
a0619f9c
A
515 newseq = oldseq;
516 if (isoverlap || is_rws_setunlockinit(oldseq.rw_seq) != 0) {
f1a1da6c 517 // Set S word to the specified value
a0619f9c
A
518 uint32_t savebits = (oldseq.rw_seq & PTHRW_RWS_SAVEMASK);
519 newseq.lcntval = _pthread_rwlock_modbits(oldseq.lcntval, updateval,
520 savebits);
521 newseq.rw_seq += (updateval & PTHRW_COUNT_MASK);
f1a1da6c 522 if (!isoverlap) {
a0619f9c 523 newseq.rw_seq &= PTHRW_COUNT_MASK;
f1a1da6c 524 }
a0619f9c 525 newseq.rw_seq &= ~PTHRW_RWS_SAVEMASK;
f1a1da6c 526 }
a0619f9c
A
527 } while (!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
528 RWLOCK_SEQ_LS, relaxed));
529 RWLOCK_DEBUG_SEQ(update, rwlock, oldseq, newseq, updateval, RWLOCK_SEQ_LS);
f1a1da6c
A
530}
531
a0619f9c
A
532#if __DARWIN_UNIX03
533PTHREAD_ALWAYS_INLINE
534static inline int
f1a1da6c
A
535_pthread_rwlock_check_busy(_pthread_rwlock *rwlock)
536{
537 int res = 0;
a0619f9c
A
538
539 rwlock_seq *seqaddr;
540 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
541
542 rwlock_seq seq;
543 rwlock_seq_atomic_load(seqaddr, &seq, RWLOCK_SEQ_LSU, relaxed);
544 if ((seq.lcntval & PTHRW_COUNT_MASK) != seq.ucntval) {
f1a1da6c
A
545 res = EBUSY;
546 }
a0619f9c 547
f1a1da6c
A
548 return res;
549}
a0619f9c 550#endif /* __DARWIN_UNIX03 */
f1a1da6c 551
a0619f9c 552PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
553int
554pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
555{
556 int res = 0;
557 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
558
a0619f9c
A
559 _PTHREAD_LOCK(rwlock->lock);
560 if (_pthread_rwlock_check_signature(rwlock)) {
f1a1da6c
A
561#if __DARWIN_UNIX03
562 res = _pthread_rwlock_check_busy(rwlock);
563#endif /* __DARWIN_UNIX03 */
a0619f9c 564 } else if (!_pthread_rwlock_check_signature_init(rwlock)) {
f1a1da6c
A
565 res = EINVAL;
566 }
567 if (res == 0) {
568 rwlock->sig = _PTHREAD_NO_SIG;
569 }
a0619f9c 570 _PTHREAD_UNLOCK(rwlock->lock);
f1a1da6c
A
571 return res;
572}
573
a0619f9c 574PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
575int
576pthread_rwlock_init(pthread_rwlock_t *orwlock, const pthread_rwlockattr_t *attr)
577{
578 int res = 0;
579 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
a0619f9c 580
f1a1da6c
A
581#if __DARWIN_UNIX03
582 if (attr && attr->sig != _PTHREAD_RWLOCK_ATTR_SIG) {
583 res = EINVAL;
584 }
585
a0619f9c 586 if (res == 0 && _pthread_rwlock_check_signature(rwlock)) {
f1a1da6c
A
587 res = _pthread_rwlock_check_busy(rwlock);
588 }
589#endif
590 if (res == 0) {
2546420a 591 _PTHREAD_LOCK_INIT(rwlock->lock);
a0619f9c 592 res = _pthread_rwlock_init(rwlock, attr);
f1a1da6c
A
593 }
594 return res;
595}
596
964d3577
A
597PTHREAD_NOINLINE
598static int
599_pthread_rwlock_check_init_slow(pthread_rwlock_t *orwlock)
600{
601 int res = EINVAL;
602 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
603
a0619f9c 604 if (_pthread_rwlock_check_signature_init(rwlock)) {
2546420a 605 _PTHREAD_LOCK(rwlock->lock);
a0619f9c
A
606 if (_pthread_rwlock_check_signature_init(rwlock)) {
607 res = _pthread_rwlock_init(rwlock, NULL);
608 } else if (_pthread_rwlock_check_signature(rwlock)){
964d3577
A
609 res = 0;
610 }
2546420a 611 _PTHREAD_UNLOCK(rwlock->lock);
a0619f9c 612 } else if (_pthread_rwlock_check_signature(rwlock)){
964d3577
A
613 res = 0;
614 }
615 if (res != 0) {
616 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, res);
617 }
618 return res;
619}
620
621PTHREAD_ALWAYS_INLINE
a0619f9c 622static inline int
f1a1da6c
A
623_pthread_rwlock_check_init(pthread_rwlock_t *orwlock)
624{
625 int res = 0;
626 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
964d3577 627
a0619f9c 628 if (!_pthread_rwlock_check_signature(rwlock)) {
964d3577 629 return _pthread_rwlock_check_init_slow(orwlock);
f1a1da6c
A
630 }
631 return res;
632}
633
a0619f9c 634PTHREAD_NOINLINE
f1a1da6c 635static int
a0619f9c
A
636_pthread_rwlock_lock_wait(pthread_rwlock_t *orwlock, bool readlock,
637 rwlock_seq newseq)
f1a1da6c
A
638{
639 int res;
640 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
641
a0619f9c
A
642#ifdef PLOCKSTAT
643 int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
644#endif
f1a1da6c 645
a0619f9c
A
646 if (readlock) {
647 RWLOCK_DEBUG_SEQ(rdlock, rwlock, oldseq, newseq, gotlock,
648 RWLOCK_SEQ_LSU);
649 } else {
650 RWLOCK_DEBUG_SEQ(wrlock, rwlock, oldseq, newseq, gotlock,
651 RWLOCK_SEQ_LSU);
652 }
f1a1da6c 653
a0619f9c 654 uint32_t updateval;
f1a1da6c 655
a0619f9c 656 PLOCKSTAT_RW_BLOCK(orwlock, plockstat);
f1a1da6c
A
657
658 do {
a0619f9c
A
659 if (readlock) {
660 updateval = __psynch_rw_rdlock(orwlock, newseq.lcntval,
661 newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
662 } else {
663 updateval = __psynch_rw_wrlock(orwlock, newseq.lcntval,
664 newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
665 }
666 if (updateval == (uint32_t)-1) {
667 res = errno;
668 } else {
669 res = 0;
670 }
671 } while (res == EINTR);
672
673 if (res == 0) {
674 _pthread_rwlock_updateval(rwlock, updateval);
675 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_SUCCESS_PLOCKSTAT);
676 } else {
677 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_FAIL_PLOCKSTAT);
678 PTHREAD_ABORT("kernel rwlock returned unknown error %x: "
679 "tid %llx\n", res, _pthread_selfid_direct());
680 }
681
682 return res;
683}
684
685PTHREAD_NOEXPORT PTHREAD_NOINLINE
686int
687_pthread_rwlock_lock_slow(pthread_rwlock_t *orwlock, bool readlock,
688 bool trylock)
689{
690 int res;
691 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
692
693#ifdef PLOCKSTAT
694 int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
695#endif
696
697 res = _pthread_rwlock_check_init(orwlock);
698 if (res != 0) return res;
699
700 rwlock_seq *seqaddr;
701 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
702
703 rwlock_seq oldseq, newseq;
704 rwlock_seq_atomic_load(seqaddr, &oldseq, RWLOCK_SEQ_LSU, relaxed);
f1a1da6c
A
705
706#if __DARWIN_UNIX03
a0619f9c
A
707 uint64_t *tidaddr;
708 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
709 uint64_t selfid = _pthread_selfid_direct();
710 if (is_rwl_ebit_set(oldseq.lcntval)) {
711 if (os_atomic_load(tidaddr, relaxed) == selfid) return EDEADLK;
712 }
f1a1da6c
A
713#endif /* __DARWIN_UNIX03 */
714
a0619f9c
A
715 int retry_count;
716 bool gotlock;
717 do {
718 retry_count = 0;
719retry:
720 newseq = oldseq;
f1a1da6c 721
a0619f9c 722 // if W and K bit are clear or U bit is on, acquire lock in userland
f1a1da6c 723 if (readlock) {
a0619f9c 724 gotlock = (oldseq.lcntval & (PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0;
f1a1da6c 725 } else {
a0619f9c 726 gotlock = (oldseq.lcntval & PTH_RWL_UBIT) != 0;
f1a1da6c
A
727 }
728
a0619f9c
A
729 if (trylock && !gotlock) {
730 // A trylock on a held lock will fail immediately. But since
731 // we did not load the sequence words atomically, perform a
732 // no-op CAS to ensure that nobody has unlocked concurrently.
733 } else if (gotlock) {
f1a1da6c 734 if (readlock) {
a0619f9c
A
735 if (diff_genseq(oldseq.lcntval, oldseq.ucntval) >=
736 PTHRW_MAX_READERS) {
737 // since ucntval may be newer, just redo
f1a1da6c
A
738 retry_count++;
739 if (retry_count > 1024) {
a0619f9c 740 gotlock = false;
f1a1da6c 741 res = EAGAIN;
a0619f9c 742 goto out;
f1a1da6c
A
743 } else {
744 sched_yield();
a0619f9c
A
745 rwlock_seq_atomic_load(seqaddr, &oldseq,
746 RWLOCK_SEQ_LSU, relaxed);
747 goto retry;
f1a1da6c
A
748 }
749 }
a0619f9c
A
750 // Need to update L (remove U bit) and S word
751 newseq.lcntval &= ~PTH_RWL_UBIT;
f1a1da6c 752 } else {
a0619f9c
A
753 newseq.lcntval &= PTHRW_COUNT_MASK;
754 newseq.lcntval |= PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
f1a1da6c 755 }
a0619f9c
A
756 newseq.lcntval += PTHRW_INC;
757 newseq.rw_seq += PTHRW_INC;
f1a1da6c
A
758 } else {
759 if (readlock) {
a0619f9c
A
760 // Need to block in kernel. Remove U bit.
761 newseq.lcntval &= ~PTH_RWL_UBIT;
f1a1da6c 762 } else {
a0619f9c 763 newseq.lcntval |= PTH_RWL_KBIT | PTH_RWL_WBIT;
f1a1da6c 764 }
a0619f9c
A
765 newseq.lcntval += PTHRW_INC;
766 if (is_rws_setseq(oldseq.rw_seq)) {
767 // Clear the S bit and set S to L
768 newseq.rw_seq &= (PTHRW_BIT_MASK & ~PTH_RWS_SBIT);
769 newseq.rw_seq |= (oldseq.lcntval & PTHRW_COUNT_MASK);
f1a1da6c
A
770 }
771 }
a0619f9c
A
772 } while (!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
773 RWLOCK_SEQ_LS, acquire));
f1a1da6c 774
a0619f9c
A
775 if (gotlock) {
776#if __DARWIN_UNIX03
777 if (!readlock) os_atomic_store(tidaddr, selfid, relaxed);
778#endif /* __DARWIN_UNIX03 */
779 res = 0;
780 } else if (trylock) {
781 res = EBUSY;
782 } else {
783 res = _pthread_rwlock_lock_wait(orwlock, readlock, newseq);
784 }
f1a1da6c 785
a0619f9c 786out:
f1a1da6c 787#ifdef PLOCKSTAT
a0619f9c
A
788 if (res == 0) {
789 PLOCKSTAT_RW_ACQUIRE(orwlock, plockstat);
790 } else {
791 PLOCKSTAT_RW_ERROR(orwlock, plockstat, res);
792 }
793#endif
794
795 return res;
796}
797
798PTHREAD_ALWAYS_INLINE
799static inline int
800_pthread_rwlock_lock(pthread_rwlock_t *orwlock, bool readlock, bool trylock)
801{
802 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
803#if PLOCKSTAT
804 if (PLOCKSTAT_RW_ACQUIRE_ENABLED() || PLOCKSTAT_RW_ERROR_ENABLED()) {
805 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
806 }
f1a1da6c
A
807#endif
808
a0619f9c
A
809 if (os_unlikely(!_pthread_rwlock_check_signature(rwlock))) {
810 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
811 }
812
813 rwlock_seq *seqaddr;
814 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
815
816 rwlock_seq oldseq, newseq;
817 // no need to perform a single-copy-atomic 128-bit load in the fastpath,
818 // if stores to L and U are seen out of order, we will fallback to the
819 // slowpath below (which has rwlock_seq_atomic_load)
820 rwlock_seq_load(seqaddr, &oldseq, RWLOCK_SEQ_LSU);
f1a1da6c 821
a0619f9c
A
822#if __DARWIN_UNIX03
823 if (os_unlikely(is_rwl_ebit_set(oldseq.lcntval))) {
824 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
825 }
826#endif /* __DARWIN_UNIX03 */
827
828 bool gotlock;
829 do {
830 newseq = oldseq;
831
832 // if W and K bit are clear or U bit is on, acquire lock in userland
833 if (readlock) {
834 gotlock = (oldseq.lcntval & (PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0;
835 } else {
836 gotlock = (oldseq.lcntval & PTH_RWL_UBIT) != 0;
837 }
838
839 if (trylock && !gotlock) {
840 // A trylock on a held lock will fail immediately. But since
841 // we did not load the sequence words atomically, perform a
842 // no-op CAS to ensure that nobody has unlocked concurrently.
843 } else if (os_likely(gotlock)) {
f1a1da6c 844 if (readlock) {
a0619f9c
A
845 if (os_unlikely(diff_genseq(oldseq.lcntval, oldseq.ucntval) >=
846 PTHRW_MAX_READERS)) {
847 return _pthread_rwlock_lock_slow(orwlock, readlock,trylock);
848 }
849 // Need to update L (remove U bit) and S word
850 newseq.lcntval &= ~PTH_RWL_UBIT;
f1a1da6c 851 } else {
a0619f9c
A
852 newseq.lcntval &= PTHRW_COUNT_MASK;
853 newseq.lcntval |= PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
f1a1da6c 854 }
a0619f9c
A
855 newseq.lcntval += PTHRW_INC;
856 newseq.rw_seq += PTHRW_INC;
f1a1da6c 857 } else {
a0619f9c 858 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
f1a1da6c 859 }
a0619f9c
A
860 } while (os_unlikely(!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
861 RWLOCK_SEQ_LS, acquire)));
862
863 if (os_likely(gotlock)) {
f1a1da6c
A
864#if __DARWIN_UNIX03
865 if (!readlock) {
a0619f9c
A
866 uint64_t *tidaddr;
867 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
868 uint64_t selfid = _pthread_selfid_direct();
869 os_atomic_store(tidaddr, selfid, relaxed);
f1a1da6c
A
870 }
871#endif /* __DARWIN_UNIX03 */
a0619f9c
A
872 return 0;
873 } else if (trylock) {
874 return EBUSY;
f1a1da6c 875 } else {
a0619f9c 876 __builtin_trap();
f1a1da6c 877 }
f1a1da6c
A
878}
879
a0619f9c 880PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
881int
882pthread_rwlock_rdlock(pthread_rwlock_t *orwlock)
883{
884 // read lock, no try
885 return _pthread_rwlock_lock(orwlock, true, false);
886}
887
a0619f9c 888PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
889int
890pthread_rwlock_tryrdlock(pthread_rwlock_t *orwlock)
891{
892 // read lock, try lock
893 return _pthread_rwlock_lock(orwlock, true, true);
894}
895
a0619f9c 896PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
897int
898pthread_rwlock_wrlock(pthread_rwlock_t *orwlock)
899{
900 // write lock, no try
901 return _pthread_rwlock_lock(orwlock, false, false);
902}
903
a0619f9c 904PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
905int
906pthread_rwlock_trywrlock(pthread_rwlock_t *orwlock)
907{
908 // write lock, try lock
909 return _pthread_rwlock_lock(orwlock, false, true);
910}
911
a0619f9c
A
912PTHREAD_NOINLINE
913static int
914_pthread_rwlock_unlock_drop(pthread_rwlock_t *orwlock, rwlock_seq oldseq,
915 rwlock_seq newseq)
f1a1da6c
A
916{
917 int res;
918 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
f1a1da6c 919
a0619f9c
A
920 RWLOCK_DEBUG_SEQ(unlock, rwlock, oldseq, newseq, !droplock, RWLOCK_SEQ_LSU);
921 uint32_t updateval;
922 do {
923 updateval = __psynch_rw_unlock(orwlock, oldseq.lcntval,
924 newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
925 if (updateval == (uint32_t)-1) {
926 res = errno;
927 } else {
928 res = 0;
929 RWLOCK_DEBUG_SEQ(wakeup, rwlock, oldseq, newseq, updateval,
930 RWLOCK_SEQ_LSU);
931 }
932 } while (res == EINTR);
933
f1a1da6c 934 if (res != 0) {
a0619f9c
A
935 PTHREAD_ABORT("kernel rwunlock returned unknown error %x: "
936 "tid %llx\n", res, _pthread_selfid_direct());
f1a1da6c
A
937 }
938
a0619f9c
A
939 return res;
940}
f1a1da6c 941
a0619f9c
A
942PTHREAD_NOEXPORT PTHREAD_NOINLINE
943int
944_pthread_rwlock_unlock_slow(pthread_rwlock_t *orwlock,
945 rwlock_seqfields updated_seqfields)
946{
947 int res;
948 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
949 rwlock_seqfields seqfields = RWLOCK_SEQ_LSU;
950#ifdef PLOCKSTAT
951 int wrlock = 0;
952#endif
f1a1da6c 953
a0619f9c
A
954 res = _pthread_rwlock_check_init(orwlock);
955 if (res != 0) return res;
f1a1da6c 956
a0619f9c
A
957 rwlock_seq *seqaddr;
958 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
f1a1da6c 959
a0619f9c
A
960 rwlock_seq oldseq, newseq;
961 rwlock_seq_load(seqaddr, &oldseq, seqfields);
f1a1da6c 962
a0619f9c
A
963 if ((oldseq.lcntval & PTH_RWL_UBIT) != 0) {
964 // spurious unlock (unlock of unlocked lock)
965 return 0;
966 }
f1a1da6c 967
a0619f9c 968 if (is_rwl_ebit_set(oldseq.lcntval)) {
f1a1da6c 969#ifdef PLOCKSTAT
a0619f9c 970 wrlock = 1;
f1a1da6c
A
971#endif
972#if __DARWIN_UNIX03
a0619f9c
A
973 uint64_t *tidaddr;
974 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
975 os_atomic_store(tidaddr, 0, relaxed);
f1a1da6c 976#endif /* __DARWIN_UNIX03 */
a0619f9c 977 }
f1a1da6c 978
a0619f9c
A
979 bool droplock;
980 do {
981 // stop loading & updating fields that have successfully been stored
982 seqfields &= ~updated_seqfields;
f1a1da6c 983
a0619f9c
A
984 newseq = oldseq;
985 if (seqfields & RWLOCK_SEQ_U) {
986 newseq.ucntval += PTHRW_INC;
987 }
f1a1da6c 988
a0619f9c
A
989 droplock = false;
990 uint32_t oldlcnt = (oldseq.lcntval & PTHRW_COUNT_MASK);
991 if (newseq.ucntval == oldlcnt) {
992 // last unlock, set L with U and init bits and set S to L with S bit
993 newseq.lcntval = oldlcnt | PTHRW_RWLOCK_INIT;
994 newseq.rw_seq = oldlcnt | PTHRW_RWS_INIT;
f1a1da6c 995 } else {
a0619f9c
A
996 // no L/S update if lock is not exclusive or no writer pending
997 if ((oldseq.lcntval &
998 (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0) {
999 continue;
f1a1da6c
A
1000 }
1001
a0619f9c
A
1002 // kernel transition only needed if U == S
1003 if (newseq.ucntval != (oldseq.rw_seq & PTHRW_COUNT_MASK)) {
1004 continue;
f1a1da6c
A
1005 }
1006
a0619f9c
A
1007 droplock = true;
1008 // reset all bits and set K
1009 newseq.lcntval = oldlcnt | PTH_RWL_KBIT;
1010 // set I bit on S word
1011 newseq.rw_seq |= PTH_RWS_IBIT;
1012 if ((oldseq.lcntval & PTH_RWL_WBIT) != 0) {
1013 newseq.rw_seq |= PTH_RWS_WSVBIT;
f1a1da6c
A
1014 }
1015 }
a0619f9c
A
1016 } while (seqfields != (updated_seqfields = rwlock_seq_atomic_cmpxchgv(
1017 seqaddr, &oldseq, &newseq, seqfields, release)));
f1a1da6c
A
1018
1019 if (droplock) {
a0619f9c 1020 res = _pthread_rwlock_unlock_drop(orwlock, oldseq, newseq);
f1a1da6c
A
1021 }
1022
1023 PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
1024
1025 return res;
1026}
1027
a0619f9c
A
1028PTHREAD_NOEXPORT_VARIANT
1029int
1030pthread_rwlock_unlock(pthread_rwlock_t *orwlock)
1031{
1032 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
1033 rwlock_seqfields seqfields = RWLOCK_SEQ_LSU;
1034 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
1035
1036#if PLOCKSTAT
1037 if (PLOCKSTAT_RW_RELEASE_ENABLED() || PLOCKSTAT_RW_ERROR_ENABLED()) {
1038 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1039 }
1040#endif
1041
1042 if (os_unlikely(!_pthread_rwlock_check_signature(rwlock))) {
1043 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1044 }
1045
1046 rwlock_seq *seqaddr;
1047 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
1048
1049 rwlock_seq oldseq, newseq;
1050 rwlock_seq_load(seqaddr, &oldseq, seqfields);
1051
1052 if (os_unlikely(oldseq.lcntval & PTH_RWL_UBIT)) {
1053 // spurious unlock (unlock of unlocked lock)
1054 return 0;
1055 }
1056
1057 if (is_rwl_ebit_set(oldseq.lcntval)) {
1058#if __DARWIN_UNIX03
1059 uint64_t *tidaddr;
1060 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
1061 os_atomic_store(tidaddr, 0, relaxed);
1062#endif /* __DARWIN_UNIX03 */
1063 }
1064
1065 do {
1066 if (updated_seqfields) {
1067 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1068 }
1069
1070 newseq = oldseq;
1071 if (seqfields & RWLOCK_SEQ_U) {
1072 newseq.ucntval += PTHRW_INC;
1073 }
1074
1075 uint32_t oldlcnt = (oldseq.lcntval & PTHRW_COUNT_MASK);
1076 if (os_likely(newseq.ucntval == oldlcnt)) {
1077 // last unlock, set L with U and init bits and set S to L with S bit
1078 newseq.lcntval = oldlcnt | PTHRW_RWLOCK_INIT;
1079 newseq.rw_seq = oldlcnt | PTHRW_RWS_INIT;
1080 } else {
1081 if (os_likely((oldseq.lcntval &
1082 (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0 ||
1083 newseq.ucntval != (oldseq.rw_seq & PTHRW_COUNT_MASK))) {
1084 // no L/S update if lock is not exclusive or no writer pending
1085 // kernel transition only needed if U == S
1086 } else {
1087 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1088 }
1089 }
1090 } while (os_unlikely(seqfields != (updated_seqfields =
1091 rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, seqfields,
1092 release))));
1093
1094 return 0;
1095}
1096