]> git.saurik.com Git - apple/libpthread.git/blame - src/pthread_rwlock.c
libpthread-416.60.2.tar.gz
[apple/libpthread.git] / src / pthread_rwlock.c
CommitLineData
f1a1da6c
A
1/*
2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
a0619f9c 5 *
f1a1da6c
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
a0619f9c 20 *
f1a1da6c
A
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*-
24 * Copyright (c) 1998 Alex Nash
25 * All rights reserved.
26 *
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
29 * are met:
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
35 *
36 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
46 * SUCH DAMAGE.
47 *
48 * $FreeBSD: src/lib/libc_r/uthread/uthread_rwlock.c,v 1.6 2001/04/10 04:19:20 deischen Exp $
49 */
50
a0619f9c
A
51/*
52 * POSIX Pthread Library
f1a1da6c
A
53 * -- Read Write Lock support
54 * 4/24/02: A. Ramesh
55 * Ported from FreeBSD
56 */
57
a0619f9c 58#include "resolver.h"
f1a1da6c 59#include "internal.h"
a0619f9c
A
60#if DEBUG
61#include <platform/compat.h> // for bzero
62#endif
f1a1da6c 63
f1a1da6c
A
64#ifdef PLOCKSTAT
65#include "plockstat.h"
66#else /* !PLOCKSTAT */
67#define PLOCKSTAT_RW_ERROR(x, y, z)
68#define PLOCKSTAT_RW_BLOCK(x, y)
69#define PLOCKSTAT_RW_BLOCKED(x, y, z)
70#define PLOCKSTAT_RW_ACQUIRE(x, y)
71#define PLOCKSTAT_RW_RELEASE(x, y)
72#endif /* PLOCKSTAT */
73
74#define READ_LOCK_PLOCKSTAT 0
75#define WRITE_LOCK_PLOCKSTAT 1
76
77#define BLOCK_FAIL_PLOCKSTAT 0
78#define BLOCK_SUCCESS_PLOCKSTAT 1
79
a0619f9c
A
80#define PTHREAD_RWLOCK_INIT_UNUSED 1
81
82// maximum number of times a read lock may be obtained
83#define MAX_READ_LOCKS (INT_MAX - 1)
84
85union rwlock_seq; // forward declaration
86enum rwlock_seqfields; // forward declaration
87
88PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
89int _pthread_rwlock_lock_slow(pthread_rwlock_t *orwlock, bool readlock,
90 bool trylock);
91
92PTHREAD_NOEXPORT PTHREAD_WEAK // prevent inlining of return value into callers
93int _pthread_rwlock_unlock_slow(pthread_rwlock_t *orwlock,
94 enum rwlock_seqfields updated_seqfields);
95
96
97#if defined(__LP64__)
98#define RWLOCK_USE_INT128 1
99#endif
100
101typedef union rwlock_seq {
102 uint32_t seq[4];
103 struct { uint32_t lcntval; uint32_t rw_seq; uint32_t ucntval; };
104 struct { uint32_t lgen; uint32_t rw_wc; uint32_t ugen; };
105#if RWLOCK_USE_INT128
106 unsigned __int128 seq_LSU;
107 unsigned __int128 _Atomic atomic_seq_LSU;
108#endif
109 struct {
110 uint64_t seq_LS;
111 uint32_t seq_U;
112 uint32_t _pad;
113 };
114 struct {
115 uint64_t _Atomic atomic_seq_LS;
116 uint32_t _Atomic atomic_seq_U;
117 uint32_t _Atomic _atomic_pad;
118 };
119} rwlock_seq;
120
121_Static_assert(sizeof(rwlock_seq) == 4 * sizeof(uint32_t),
122 "Incorrect rwlock_seq size");
123
124typedef enum rwlock_seqfields {
125 RWLOCK_SEQ_NONE = 0,
126 RWLOCK_SEQ_LS = 1,
127 RWLOCK_SEQ_U = 2,
128 RWLOCK_SEQ_LSU = RWLOCK_SEQ_LS | RWLOCK_SEQ_U,
129} rwlock_seqfields;
130
131#if PTHREAD_DEBUG_LOG
132#define RWLOCK_DEBUG_SEQ(op, rwlock, oldseq, newseq, updateval, f) \
133 if (_pthread_debuglog >= 0) { \
134 _simple_dprintf(_pthread_debuglog, "rw_" #op " %p tck %7llu thr %llx " \
135 "L %x -> %x S %x -> %x U %x -> %x updt %x\n", rwlock, \
136 mach_absolute_time() - _pthread_debugstart, _pthread_selfid_direct(), \
137 (f) & RWLOCK_SEQ_LS ? (oldseq).lcntval : 0, \
138 (f) & RWLOCK_SEQ_LS ? (newseq).lcntval : 0, \
139 (f) & RWLOCK_SEQ_LS ? (oldseq).rw_seq : 0, \
140 (f) & RWLOCK_SEQ_LS ? (newseq).rw_seq : 0, \
141 (f) & RWLOCK_SEQ_U ? (oldseq).ucntval : 0, \
142 (f) & RWLOCK_SEQ_U ? (newseq).ucntval : 0, updateval); }
143#else
144#define RWLOCK_DEBUG_SEQ(m, rwlock, oldseq, newseq, updateval, f)
145#endif
146
147#if !__LITTLE_ENDIAN__
148#error RWLOCK_GETSEQ_ADDR assumes little endian layout of sequence words
149#endif
150
151PTHREAD_ALWAYS_INLINE
152static inline void
153RWLOCK_GETSEQ_ADDR(_pthread_rwlock *rwlock, rwlock_seq **seqaddr)
154{
155 // 128-bit aligned address inside rw_seq & rw_mis arrays
156 *seqaddr = (void*)(((uintptr_t)rwlock->rw_seq + 0xful) & ~0xful);
157}
f1a1da6c 158
a0619f9c
A
159PTHREAD_ALWAYS_INLINE
160static inline void
161RWLOCK_GETTID_ADDR(_pthread_rwlock *rwlock, uint64_t **tidaddr)
162{
163 // 64-bit aligned address inside rw_tid array (&rw_tid[0] for aligned lock)
164 *tidaddr = (void*)(((uintptr_t)rwlock->rw_tid + 0x7ul) & ~0x7ul);
165}
f1a1da6c 166
a0619f9c
A
167PTHREAD_ALWAYS_INLINE
168static inline void
169rwlock_seq_load(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
170 const rwlock_seqfields seqfields)
171{
172 switch (seqfields) {
173 case RWLOCK_SEQ_LSU:
174#if RWLOCK_USE_INT128
175 oldseqval->seq_LSU = seqaddr->seq_LSU;
176#else
177 oldseqval->seq_LS = seqaddr->seq_LS;
178 oldseqval->seq_U = seqaddr->seq_U;
179#endif
180 break;
181 case RWLOCK_SEQ_LS:
182 oldseqval->seq_LS = seqaddr->seq_LS;
183 break;
184#if DEBUG // unused
185 case RWLOCK_SEQ_U:
186 oldseqval->seq_U = seqaddr->seq_U;
187 break;
188#endif // unused
189 default:
190 __builtin_trap();
191 }
192}
f1a1da6c 193
a0619f9c
A
194PTHREAD_ALWAYS_INLINE
195static inline void
196rwlock_seq_atomic_load_relaxed(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
197 const rwlock_seqfields seqfields)
f1a1da6c 198{
a0619f9c
A
199 switch (seqfields) {
200 case RWLOCK_SEQ_LSU:
201#if RWLOCK_USE_INT128
c6e5f90c
A
202#if defined(__arm64__) && defined(__ARM_ARCH_8_2__)
203 // Workaround clang armv81 codegen bug for 128bit os_atomic_load
204 // rdar://problem/31213932
205 oldseqval->seq_LSU = seqaddr->seq_LSU;
206 while (!os_atomic_cmpxchgvw(&seqaddr->atomic_seq_LSU,
207 oldseqval->seq_LSU, oldseqval->seq_LSU, &oldseqval->seq_LSU,
208 relaxed));
209#else
a0619f9c 210 oldseqval->seq_LSU = os_atomic_load(&seqaddr->atomic_seq_LSU, relaxed);
c6e5f90c 211#endif
a0619f9c
A
212#else
213 oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS, relaxed);
214 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
215#endif
216 break;
217 case RWLOCK_SEQ_LS:
218 oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS, relaxed);
219 break;
220#if DEBUG // unused
221 case RWLOCK_SEQ_U:
222 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
223 break;
224#endif // unused
225 default:
226 __builtin_trap();
227 }
228}
229
230#define rwlock_seq_atomic_load(seqaddr, oldseqval, seqfields, m) \
231 rwlock_seq_atomic_load_##m(seqaddr, oldseqval, seqfields)
232
233PTHREAD_ALWAYS_INLINE
234static inline rwlock_seqfields
235rwlock_seq_atomic_cmpxchgv_relaxed(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
236 rwlock_seq *newseqval, const rwlock_seqfields seqfields)
237{
238 bool r;
239 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
240 switch (seqfields) {
241#if DEBUG // unused
242 case RWLOCK_SEQ_LSU:
243#if RWLOCK_USE_INT128
244 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
245 newseqval->seq_LSU, &oldseqval->seq_LSU, relaxed);
246 if (r) updated_seqfields = RWLOCK_SEQ_LSU;
247#else
248 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
249 newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
250 if (r) {
251 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
252 newseqval->seq_U, &oldseqval->seq_U, relaxed);
253 if (!r) oldseqval->seq_LS = newseqval->seq_LS;
254 updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_LS;
f1a1da6c 255 } else {
a0619f9c 256 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
f1a1da6c 257 }
a0619f9c
A
258#endif
259 break;
260 case RWLOCK_SEQ_U:
261 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
262 newseqval->seq_U, &oldseqval->seq_U, relaxed);
263 if (r) updated_seqfields = RWLOCK_SEQ_U;
264 break;
265#endif // unused
266 case RWLOCK_SEQ_LS:
267 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
268 newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
269 if (r) updated_seqfields = RWLOCK_SEQ_LS;
270 break;
271 default:
272 __builtin_trap();
273 }
274 return updated_seqfields;
275}
276
277PTHREAD_ALWAYS_INLINE
278static inline rwlock_seqfields
279rwlock_seq_atomic_cmpxchgv_acquire(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
280 rwlock_seq *newseqval, const rwlock_seqfields seqfields)
281{
282 bool r;
283 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
284 switch (seqfields) {
285#if DEBUG // unused
286 case RWLOCK_SEQ_LSU:
287#if RWLOCK_USE_INT128
288 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
289 newseqval->seq_LSU, &oldseqval->seq_LSU, acquire);
290 if (r) updated_seqfields = RWLOCK_SEQ_LSU;
291#else
292 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
293 newseqval->seq_LS, &oldseqval->seq_LS, acquire);
294 if (r) {
295 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
296 newseqval->seq_U, &oldseqval->seq_U, relaxed);
297 if (!r) oldseqval->seq_LS = newseqval->seq_LS;
298 updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_LS;
299 } else {
300 oldseqval->seq_U = os_atomic_load(&seqaddr->atomic_seq_U, relaxed);
301 }
302#endif
303 break;
304 case RWLOCK_SEQ_U:
305 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
306 newseqval->seq_U, &oldseqval->seq_U, acquire);
307 if (r) updated_seqfields = RWLOCK_SEQ_U;
308 break;
309#endif // unused
310 case RWLOCK_SEQ_LS:
311 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
312 newseqval->seq_LS, &oldseqval->seq_LS, acquire);
313 if (r) updated_seqfields = RWLOCK_SEQ_LS;
314 break;
315 default:
316 __builtin_trap();
f1a1da6c 317 }
a0619f9c 318 return updated_seqfields;
f1a1da6c
A
319}
320
a0619f9c
A
321PTHREAD_ALWAYS_INLINE
322static inline rwlock_seqfields
323rwlock_seq_atomic_cmpxchgv_release(rwlock_seq *seqaddr, rwlock_seq *oldseqval,
324 rwlock_seq *newseqval, const rwlock_seqfields seqfields)
325{
326 bool r;
327 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
328 switch (seqfields) {
329 case RWLOCK_SEQ_LSU:
330#if RWLOCK_USE_INT128
331 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LSU, oldseqval->seq_LSU,
332 newseqval->seq_LSU, &oldseqval->seq_LSU, release);
333 if (r) updated_seqfields = RWLOCK_SEQ_LSU;
334#else
335 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
336 newseqval->seq_U, &oldseqval->seq_U, release);
337 if (r) {
338 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
339 newseqval->seq_LS, &oldseqval->seq_LS, relaxed);
340 if (!r) oldseqval->seq_U = newseqval->seq_U;
341 updated_seqfields = r ? RWLOCK_SEQ_LSU : RWLOCK_SEQ_U;
342 } else {
343 oldseqval->seq_LS = os_atomic_load(&seqaddr->atomic_seq_LS,relaxed);
344 }
345#endif
346 break;
347 case RWLOCK_SEQ_LS:
348 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_LS, oldseqval->seq_LS,
349 newseqval->seq_LS, &oldseqval->seq_LS, release);
350 if (r) updated_seqfields = RWLOCK_SEQ_LS;
351 break;
352#if DEBUG // unused
353 case RWLOCK_SEQ_U:
354 r = os_atomic_cmpxchgv(&seqaddr->atomic_seq_U, oldseqval->seq_U,
355 newseqval->seq_U, &oldseqval->seq_U, release);
356 if (r) updated_seqfields = RWLOCK_SEQ_U;
357 break;
358#endif // unused
359 default:
360 __builtin_trap();
361 }
362 return updated_seqfields;
363}
364
365#define rwlock_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, seqfields, m)\
366 rwlock_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval, seqfields)
367
f1a1da6c 368#ifndef BUILDING_VARIANT /* [ */
f1a1da6c
A
369
370int
371pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
372{
373 attr->sig = _PTHREAD_RWLOCK_ATTR_SIG;
374 attr->pshared = _PTHREAD_DEFAULT_PSHARED;
375 return 0;
376}
377
a0619f9c 378int
f1a1da6c
A
379pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr)
380{
381 attr->sig = _PTHREAD_NO_SIG;
382 attr->pshared = 0;
383 return 0;
384}
385
386int
387pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, int *pshared)
388{
389 int res = EINVAL;
390 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
391 *pshared = (int)attr->pshared;
392 res = 0;
393 }
394 return res;
395}
396
397int
398pthread_rwlockattr_setpshared(pthread_rwlockattr_t * attr, int pshared)
399{
400 int res = EINVAL;
401 if (attr->sig == _PTHREAD_RWLOCK_ATTR_SIG) {
402#if __DARWIN_UNIX03
a0619f9c
A
403 if (( pshared == PTHREAD_PROCESS_PRIVATE) ||
404 (pshared == PTHREAD_PROCESS_SHARED))
f1a1da6c
A
405#else /* __DARWIN_UNIX03 */
406 if ( pshared == PTHREAD_PROCESS_PRIVATE)
407#endif /* __DARWIN_UNIX03 */
408 {
409 attr->pshared = pshared ;
410 res = 0;
411 }
412 }
413 return res;
414}
415
a0619f9c
A
416#endif /* !BUILDING_VARIANT ] */
417
418PTHREAD_ALWAYS_INLINE
419static inline int
420_pthread_rwlock_init(_pthread_rwlock *rwlock, const pthread_rwlockattr_t *attr)
f1a1da6c 421{
a0619f9c
A
422 uint64_t *tidaddr;
423 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
424
425 rwlock_seq *seqaddr;
426 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
427
428#if PTHREAD_RWLOCK_INIT_UNUSED
429 if ((uint32_t*)tidaddr != rwlock->rw_tid) {
430 rwlock->misalign = 1;
431 __builtin_memset(rwlock->rw_tid, 0xff, sizeof(rwlock->rw_tid));
432 }
433 if ((uint32_t*)seqaddr != rwlock->rw_seq) {
434 __builtin_memset(rwlock->rw_seq, 0xff, sizeof(rwlock->rw_seq));
435 }
436 __builtin_memset(rwlock->rw_mis, 0xff, sizeof(rwlock->rw_mis));
437#endif // PTHREAD_MUTEX_INIT_UNUSED
438 *tidaddr = 0;
439 *seqaddr = (rwlock_seq){
440 .lcntval = PTHRW_RWLOCK_INIT,
441 .rw_seq = PTHRW_RWS_INIT,
442 .ucntval = 0,
443 };
f1a1da6c
A
444
445 if (attr != NULL && attr->pshared == PTHREAD_PROCESS_SHARED) {
446 rwlock->pshared = PTHREAD_PROCESS_SHARED;
447 rwlock->rw_flags = PTHRW_KERN_PROCESS_SHARED;
448 } else {
449 rwlock->pshared = _PTHREAD_DEFAULT_PSHARED;
450 rwlock->rw_flags = PTHRW_KERN_PROCESS_PRIVATE;
451 }
a0619f9c
A
452
453 long sig = _PTHREAD_RWLOCK_SIG;
454
455#if DEBUG
f1a1da6c 456 bzero(rwlock->_reserved, sizeof(rwlock->_reserved));
a0619f9c
A
457#endif
458#if PTHREAD_RWLOCK_INIT_UNUSED
459 // For detecting copied rwlocks and smashes during debugging
460 uint32_t sig32 = (uint32_t)sig;
461 uintptr_t guard = ~(uintptr_t)rwlock; // use ~ to hide from leaks
462 __builtin_memcpy(rwlock->_reserved, &guard, sizeof(guard));
463#define countof(x) (sizeof(x) / sizeof(x[0]))
464 rwlock->_reserved[countof(rwlock->_reserved) - 1] = sig32;
465#if defined(__LP64__)
466 rwlock->_pad = sig32;
467#endif
468#endif // PTHREAD_RWLOCK_INIT_UNUSED
f1a1da6c
A
469
470 // Ensure all contents are properly set before setting signature.
a0619f9c
A
471#if defined(__LP64__)
472 // For binary compatibility reasons we cannot require natural alignment of
473 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
474 uint32_t *sig32_ptr = (uint32_t*)&rwlock->sig;
475 uint32_t *sig32_val = (uint32_t*)&sig;
476 *(sig32_ptr + 1) = *(sig32_val + 1);
477 os_atomic_store(sig32_ptr, *sig32_val, release);
478#else
479 os_atomic_store2o(rwlock, sig, sig, release);
480#endif
481
f1a1da6c
A
482 return 0;
483}
484
485static uint32_t
a0619f9c 486_pthread_rwlock_modbits(uint32_t lgenval, uint32_t updateval, uint32_t savebits)
f1a1da6c
A
487{
488 uint32_t lval = lgenval & PTHRW_BIT_MASK;
489 uint32_t uval = updateval & PTHRW_BIT_MASK;
490 uint32_t rval, nlval;
491
492 nlval = (lval | uval) & ~(PTH_RWL_MBIT);
a0619f9c
A
493
494 // reconcile bits on the lock with what kernel needs to set
f1a1da6c
A
495 if ((uval & PTH_RWL_KBIT) == 0 && (lval & PTH_RWL_WBIT) == 0) {
496 nlval &= ~PTH_RWL_KBIT;
497 }
498
499 if (savebits != 0) {
a0619f9c
A
500 if ((savebits & PTH_RWS_WSVBIT) != 0 && (nlval & PTH_RWL_WBIT) == 0 &&
501 (nlval & PTH_RWL_EBIT) == 0) {
f1a1da6c
A
502 nlval |= (PTH_RWL_WBIT | PTH_RWL_KBIT);
503 }
504 }
505 rval = (lgenval & PTHRW_COUNT_MASK) | nlval;
506 return(rval);
507}
508
a0619f9c
A
509PTHREAD_ALWAYS_INLINE
510static inline void
f1a1da6c
A
511_pthread_rwlock_updateval(_pthread_rwlock *rwlock, uint32_t updateval)
512{
513 bool isoverlap = (updateval & PTH_RWL_MBIT) != 0;
514
a0619f9c
A
515 // TBD: restore U bit
516 rwlock_seq *seqaddr;
517 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
f1a1da6c 518
a0619f9c
A
519 rwlock_seq oldseq, newseq;
520 rwlock_seq_load(seqaddr, &oldseq, RWLOCK_SEQ_LS);
f1a1da6c 521 do {
a0619f9c 522 newseq = oldseq;
214d78a2 523 if (isoverlap || is_rws_unlockinit_set(oldseq.rw_seq)) {
f1a1da6c 524 // Set S word to the specified value
a0619f9c
A
525 uint32_t savebits = (oldseq.rw_seq & PTHRW_RWS_SAVEMASK);
526 newseq.lcntval = _pthread_rwlock_modbits(oldseq.lcntval, updateval,
527 savebits);
528 newseq.rw_seq += (updateval & PTHRW_COUNT_MASK);
f1a1da6c 529 if (!isoverlap) {
a0619f9c 530 newseq.rw_seq &= PTHRW_COUNT_MASK;
f1a1da6c 531 }
a0619f9c 532 newseq.rw_seq &= ~PTHRW_RWS_SAVEMASK;
f1a1da6c 533 }
a0619f9c
A
534 } while (!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
535 RWLOCK_SEQ_LS, relaxed));
536 RWLOCK_DEBUG_SEQ(update, rwlock, oldseq, newseq, updateval, RWLOCK_SEQ_LS);
f1a1da6c
A
537}
538
a0619f9c
A
539#if __DARWIN_UNIX03
540PTHREAD_ALWAYS_INLINE
541static inline int
f1a1da6c
A
542_pthread_rwlock_check_busy(_pthread_rwlock *rwlock)
543{
544 int res = 0;
a0619f9c
A
545
546 rwlock_seq *seqaddr;
547 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
548
549 rwlock_seq seq;
550 rwlock_seq_atomic_load(seqaddr, &seq, RWLOCK_SEQ_LSU, relaxed);
551 if ((seq.lcntval & PTHRW_COUNT_MASK) != seq.ucntval) {
f1a1da6c
A
552 res = EBUSY;
553 }
a0619f9c 554
f1a1da6c
A
555 return res;
556}
a0619f9c 557#endif /* __DARWIN_UNIX03 */
f1a1da6c 558
a0619f9c 559PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
560int
561pthread_rwlock_destroy(pthread_rwlock_t *orwlock)
562{
563 int res = 0;
564 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
565
a0619f9c
A
566 _PTHREAD_LOCK(rwlock->lock);
567 if (_pthread_rwlock_check_signature(rwlock)) {
f1a1da6c
A
568#if __DARWIN_UNIX03
569 res = _pthread_rwlock_check_busy(rwlock);
570#endif /* __DARWIN_UNIX03 */
a0619f9c 571 } else if (!_pthread_rwlock_check_signature_init(rwlock)) {
f1a1da6c
A
572 res = EINVAL;
573 }
574 if (res == 0) {
575 rwlock->sig = _PTHREAD_NO_SIG;
576 }
a0619f9c 577 _PTHREAD_UNLOCK(rwlock->lock);
f1a1da6c
A
578 return res;
579}
580
a0619f9c 581PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
582int
583pthread_rwlock_init(pthread_rwlock_t *orwlock, const pthread_rwlockattr_t *attr)
584{
585 int res = 0;
586 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
a0619f9c 587
f1a1da6c
A
588#if __DARWIN_UNIX03
589 if (attr && attr->sig != _PTHREAD_RWLOCK_ATTR_SIG) {
590 res = EINVAL;
591 }
592
a0619f9c 593 if (res == 0 && _pthread_rwlock_check_signature(rwlock)) {
f1a1da6c
A
594 res = _pthread_rwlock_check_busy(rwlock);
595 }
596#endif
597 if (res == 0) {
2546420a 598 _PTHREAD_LOCK_INIT(rwlock->lock);
a0619f9c 599 res = _pthread_rwlock_init(rwlock, attr);
f1a1da6c
A
600 }
601 return res;
602}
603
964d3577
A
604PTHREAD_NOINLINE
605static int
606_pthread_rwlock_check_init_slow(pthread_rwlock_t *orwlock)
607{
608 int res = EINVAL;
609 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
610
a0619f9c 611 if (_pthread_rwlock_check_signature_init(rwlock)) {
2546420a 612 _PTHREAD_LOCK(rwlock->lock);
a0619f9c
A
613 if (_pthread_rwlock_check_signature_init(rwlock)) {
614 res = _pthread_rwlock_init(rwlock, NULL);
615 } else if (_pthread_rwlock_check_signature(rwlock)){
964d3577
A
616 res = 0;
617 }
2546420a 618 _PTHREAD_UNLOCK(rwlock->lock);
a0619f9c 619 } else if (_pthread_rwlock_check_signature(rwlock)){
964d3577
A
620 res = 0;
621 }
622 if (res != 0) {
623 PLOCKSTAT_RW_ERROR(orwlock, READ_LOCK_PLOCKSTAT, res);
624 }
625 return res;
626}
627
628PTHREAD_ALWAYS_INLINE
a0619f9c 629static inline int
f1a1da6c
A
630_pthread_rwlock_check_init(pthread_rwlock_t *orwlock)
631{
632 int res = 0;
633 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
964d3577 634
a0619f9c 635 if (!_pthread_rwlock_check_signature(rwlock)) {
964d3577 636 return _pthread_rwlock_check_init_slow(orwlock);
f1a1da6c
A
637 }
638 return res;
639}
640
a0619f9c 641PTHREAD_NOINLINE
f1a1da6c 642static int
a0619f9c
A
643_pthread_rwlock_lock_wait(pthread_rwlock_t *orwlock, bool readlock,
644 rwlock_seq newseq)
f1a1da6c
A
645{
646 int res;
647 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
648
a0619f9c
A
649#ifdef PLOCKSTAT
650 int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
651#endif
f1a1da6c 652
a0619f9c
A
653 if (readlock) {
654 RWLOCK_DEBUG_SEQ(rdlock, rwlock, oldseq, newseq, gotlock,
655 RWLOCK_SEQ_LSU);
656 } else {
657 RWLOCK_DEBUG_SEQ(wrlock, rwlock, oldseq, newseq, gotlock,
658 RWLOCK_SEQ_LSU);
659 }
f1a1da6c 660
a0619f9c 661 uint32_t updateval;
f1a1da6c 662
a0619f9c 663 PLOCKSTAT_RW_BLOCK(orwlock, plockstat);
f1a1da6c
A
664
665 do {
a0619f9c
A
666 if (readlock) {
667 updateval = __psynch_rw_rdlock(orwlock, newseq.lcntval,
668 newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
669 } else {
670 updateval = __psynch_rw_wrlock(orwlock, newseq.lcntval,
671 newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
672 }
673 if (updateval == (uint32_t)-1) {
674 res = errno;
675 } else {
676 res = 0;
677 }
678 } while (res == EINTR);
679
680 if (res == 0) {
681 _pthread_rwlock_updateval(rwlock, updateval);
682 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_SUCCESS_PLOCKSTAT);
683 } else {
684 PLOCKSTAT_RW_BLOCKED(orwlock, plockstat, BLOCK_FAIL_PLOCKSTAT);
c6e5f90c 685 PTHREAD_INTERNAL_CRASH(res, "kernel rwlock returned unknown error");
a0619f9c
A
686 }
687
688 return res;
689}
690
691PTHREAD_NOEXPORT PTHREAD_NOINLINE
692int
693_pthread_rwlock_lock_slow(pthread_rwlock_t *orwlock, bool readlock,
694 bool trylock)
695{
696 int res;
697 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
698
699#ifdef PLOCKSTAT
700 int plockstat = readlock ? READ_LOCK_PLOCKSTAT : WRITE_LOCK_PLOCKSTAT;
701#endif
702
703 res = _pthread_rwlock_check_init(orwlock);
704 if (res != 0) return res;
705
706 rwlock_seq *seqaddr;
707 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
708
709 rwlock_seq oldseq, newseq;
710 rwlock_seq_atomic_load(seqaddr, &oldseq, RWLOCK_SEQ_LSU, relaxed);
f1a1da6c
A
711
712#if __DARWIN_UNIX03
a0619f9c
A
713 uint64_t *tidaddr;
714 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
715 uint64_t selfid = _pthread_selfid_direct();
716 if (is_rwl_ebit_set(oldseq.lcntval)) {
717 if (os_atomic_load(tidaddr, relaxed) == selfid) return EDEADLK;
718 }
f1a1da6c
A
719#endif /* __DARWIN_UNIX03 */
720
a0619f9c
A
721 int retry_count;
722 bool gotlock;
723 do {
724 retry_count = 0;
725retry:
726 newseq = oldseq;
f1a1da6c 727
a0619f9c 728 // if W and K bit are clear or U bit is on, acquire lock in userland
f1a1da6c 729 if (readlock) {
a0619f9c 730 gotlock = (oldseq.lcntval & (PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0;
f1a1da6c 731 } else {
a0619f9c 732 gotlock = (oldseq.lcntval & PTH_RWL_UBIT) != 0;
f1a1da6c
A
733 }
734
a0619f9c
A
735 if (trylock && !gotlock) {
736 // A trylock on a held lock will fail immediately. But since
737 // we did not load the sequence words atomically, perform a
738 // no-op CAS to ensure that nobody has unlocked concurrently.
739 } else if (gotlock) {
f1a1da6c 740 if (readlock) {
a0619f9c
A
741 if (diff_genseq(oldseq.lcntval, oldseq.ucntval) >=
742 PTHRW_MAX_READERS) {
743 // since ucntval may be newer, just redo
f1a1da6c
A
744 retry_count++;
745 if (retry_count > 1024) {
a0619f9c 746 gotlock = false;
f1a1da6c 747 res = EAGAIN;
a0619f9c 748 goto out;
f1a1da6c
A
749 } else {
750 sched_yield();
a0619f9c
A
751 rwlock_seq_atomic_load(seqaddr, &oldseq,
752 RWLOCK_SEQ_LSU, relaxed);
753 goto retry;
f1a1da6c
A
754 }
755 }
a0619f9c
A
756 // Need to update L (remove U bit) and S word
757 newseq.lcntval &= ~PTH_RWL_UBIT;
f1a1da6c 758 } else {
a0619f9c
A
759 newseq.lcntval &= PTHRW_COUNT_MASK;
760 newseq.lcntval |= PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
f1a1da6c 761 }
a0619f9c
A
762 newseq.lcntval += PTHRW_INC;
763 newseq.rw_seq += PTHRW_INC;
f1a1da6c
A
764 } else {
765 if (readlock) {
a0619f9c
A
766 // Need to block in kernel. Remove U bit.
767 newseq.lcntval &= ~PTH_RWL_UBIT;
f1a1da6c 768 } else {
a0619f9c 769 newseq.lcntval |= PTH_RWL_KBIT | PTH_RWL_WBIT;
f1a1da6c 770 }
a0619f9c 771 newseq.lcntval += PTHRW_INC;
214d78a2 772 if (is_rws_sbit_set(oldseq.rw_seq)) {
a0619f9c
A
773 // Clear the S bit and set S to L
774 newseq.rw_seq &= (PTHRW_BIT_MASK & ~PTH_RWS_SBIT);
775 newseq.rw_seq |= (oldseq.lcntval & PTHRW_COUNT_MASK);
f1a1da6c
A
776 }
777 }
a0619f9c
A
778 } while (!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
779 RWLOCK_SEQ_LS, acquire));
f1a1da6c 780
a0619f9c
A
781 if (gotlock) {
782#if __DARWIN_UNIX03
783 if (!readlock) os_atomic_store(tidaddr, selfid, relaxed);
784#endif /* __DARWIN_UNIX03 */
785 res = 0;
786 } else if (trylock) {
787 res = EBUSY;
788 } else {
789 res = _pthread_rwlock_lock_wait(orwlock, readlock, newseq);
790 }
f1a1da6c 791
a0619f9c 792out:
f1a1da6c 793#ifdef PLOCKSTAT
a0619f9c
A
794 if (res == 0) {
795 PLOCKSTAT_RW_ACQUIRE(orwlock, plockstat);
796 } else {
797 PLOCKSTAT_RW_ERROR(orwlock, plockstat, res);
798 }
799#endif
800
801 return res;
802}
803
804PTHREAD_ALWAYS_INLINE
805static inline int
806_pthread_rwlock_lock(pthread_rwlock_t *orwlock, bool readlock, bool trylock)
807{
808 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
809#if PLOCKSTAT
810 if (PLOCKSTAT_RW_ACQUIRE_ENABLED() || PLOCKSTAT_RW_ERROR_ENABLED()) {
811 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
812 }
f1a1da6c
A
813#endif
814
a0619f9c
A
815 if (os_unlikely(!_pthread_rwlock_check_signature(rwlock))) {
816 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
817 }
818
819 rwlock_seq *seqaddr;
820 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
821
822 rwlock_seq oldseq, newseq;
823 // no need to perform a single-copy-atomic 128-bit load in the fastpath,
824 // if stores to L and U are seen out of order, we will fallback to the
825 // slowpath below (which has rwlock_seq_atomic_load)
826 rwlock_seq_load(seqaddr, &oldseq, RWLOCK_SEQ_LSU);
f1a1da6c 827
a0619f9c
A
828#if __DARWIN_UNIX03
829 if (os_unlikely(is_rwl_ebit_set(oldseq.lcntval))) {
830 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
831 }
832#endif /* __DARWIN_UNIX03 */
833
834 bool gotlock;
835 do {
836 newseq = oldseq;
837
838 // if W and K bit are clear or U bit is on, acquire lock in userland
839 if (readlock) {
840 gotlock = (oldseq.lcntval & (PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0;
841 } else {
842 gotlock = (oldseq.lcntval & PTH_RWL_UBIT) != 0;
843 }
844
845 if (trylock && !gotlock) {
846 // A trylock on a held lock will fail immediately. But since
847 // we did not load the sequence words atomically, perform a
848 // no-op CAS to ensure that nobody has unlocked concurrently.
849 } else if (os_likely(gotlock)) {
f1a1da6c 850 if (readlock) {
a0619f9c
A
851 if (os_unlikely(diff_genseq(oldseq.lcntval, oldseq.ucntval) >=
852 PTHRW_MAX_READERS)) {
853 return _pthread_rwlock_lock_slow(orwlock, readlock,trylock);
854 }
855 // Need to update L (remove U bit) and S word
856 newseq.lcntval &= ~PTH_RWL_UBIT;
f1a1da6c 857 } else {
a0619f9c
A
858 newseq.lcntval &= PTHRW_COUNT_MASK;
859 newseq.lcntval |= PTH_RWL_IBIT | PTH_RWL_KBIT | PTH_RWL_EBIT;
f1a1da6c 860 }
a0619f9c
A
861 newseq.lcntval += PTHRW_INC;
862 newseq.rw_seq += PTHRW_INC;
f1a1da6c 863 } else {
a0619f9c 864 return _pthread_rwlock_lock_slow(orwlock, readlock, trylock);
f1a1da6c 865 }
a0619f9c
A
866 } while (os_unlikely(!rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq,
867 RWLOCK_SEQ_LS, acquire)));
868
869 if (os_likely(gotlock)) {
f1a1da6c
A
870#if __DARWIN_UNIX03
871 if (!readlock) {
a0619f9c
A
872 uint64_t *tidaddr;
873 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
874 uint64_t selfid = _pthread_selfid_direct();
875 os_atomic_store(tidaddr, selfid, relaxed);
f1a1da6c
A
876 }
877#endif /* __DARWIN_UNIX03 */
a0619f9c
A
878 return 0;
879 } else if (trylock) {
880 return EBUSY;
f1a1da6c 881 } else {
a0619f9c 882 __builtin_trap();
f1a1da6c 883 }
f1a1da6c
A
884}
885
a0619f9c 886PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
887int
888pthread_rwlock_rdlock(pthread_rwlock_t *orwlock)
889{
890 // read lock, no try
891 return _pthread_rwlock_lock(orwlock, true, false);
892}
893
a0619f9c 894PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
895int
896pthread_rwlock_tryrdlock(pthread_rwlock_t *orwlock)
897{
898 // read lock, try lock
899 return _pthread_rwlock_lock(orwlock, true, true);
900}
901
a0619f9c 902PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
903int
904pthread_rwlock_wrlock(pthread_rwlock_t *orwlock)
905{
906 // write lock, no try
907 return _pthread_rwlock_lock(orwlock, false, false);
908}
909
a0619f9c 910PTHREAD_NOEXPORT_VARIANT
f1a1da6c
A
911int
912pthread_rwlock_trywrlock(pthread_rwlock_t *orwlock)
913{
914 // write lock, try lock
915 return _pthread_rwlock_lock(orwlock, false, true);
916}
917
a0619f9c
A
918PTHREAD_NOINLINE
919static int
920_pthread_rwlock_unlock_drop(pthread_rwlock_t *orwlock, rwlock_seq oldseq,
921 rwlock_seq newseq)
f1a1da6c
A
922{
923 int res;
924 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
f1a1da6c 925
a0619f9c
A
926 RWLOCK_DEBUG_SEQ(unlock, rwlock, oldseq, newseq, !droplock, RWLOCK_SEQ_LSU);
927 uint32_t updateval;
928 do {
929 updateval = __psynch_rw_unlock(orwlock, oldseq.lcntval,
930 newseq.ucntval, newseq.rw_seq, rwlock->rw_flags);
931 if (updateval == (uint32_t)-1) {
932 res = errno;
933 } else {
934 res = 0;
935 RWLOCK_DEBUG_SEQ(wakeup, rwlock, oldseq, newseq, updateval,
936 RWLOCK_SEQ_LSU);
937 }
938 } while (res == EINTR);
939
f1a1da6c 940 if (res != 0) {
c6e5f90c 941 PTHREAD_INTERNAL_CRASH(res, "kernel rwunlock returned unknown error");
f1a1da6c
A
942 }
943
a0619f9c
A
944 return res;
945}
f1a1da6c 946
a0619f9c
A
947PTHREAD_NOEXPORT PTHREAD_NOINLINE
948int
949_pthread_rwlock_unlock_slow(pthread_rwlock_t *orwlock,
950 rwlock_seqfields updated_seqfields)
951{
952 int res;
953 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
954 rwlock_seqfields seqfields = RWLOCK_SEQ_LSU;
955#ifdef PLOCKSTAT
956 int wrlock = 0;
957#endif
f1a1da6c 958
a0619f9c
A
959 res = _pthread_rwlock_check_init(orwlock);
960 if (res != 0) return res;
f1a1da6c 961
a0619f9c
A
962 rwlock_seq *seqaddr;
963 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
f1a1da6c 964
a0619f9c
A
965 rwlock_seq oldseq, newseq;
966 rwlock_seq_load(seqaddr, &oldseq, seqfields);
f1a1da6c 967
a0619f9c
A
968 if ((oldseq.lcntval & PTH_RWL_UBIT) != 0) {
969 // spurious unlock (unlock of unlocked lock)
970 return 0;
971 }
f1a1da6c 972
a0619f9c 973 if (is_rwl_ebit_set(oldseq.lcntval)) {
f1a1da6c 974#ifdef PLOCKSTAT
a0619f9c 975 wrlock = 1;
f1a1da6c
A
976#endif
977#if __DARWIN_UNIX03
a0619f9c
A
978 uint64_t *tidaddr;
979 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
980 os_atomic_store(tidaddr, 0, relaxed);
f1a1da6c 981#endif /* __DARWIN_UNIX03 */
a0619f9c 982 }
f1a1da6c 983
a0619f9c
A
984 bool droplock;
985 do {
986 // stop loading & updating fields that have successfully been stored
987 seqfields &= ~updated_seqfields;
f1a1da6c 988
a0619f9c
A
989 newseq = oldseq;
990 if (seqfields & RWLOCK_SEQ_U) {
991 newseq.ucntval += PTHRW_INC;
992 }
f1a1da6c 993
a0619f9c
A
994 droplock = false;
995 uint32_t oldlcnt = (oldseq.lcntval & PTHRW_COUNT_MASK);
996 if (newseq.ucntval == oldlcnt) {
997 // last unlock, set L with U and init bits and set S to L with S bit
998 newseq.lcntval = oldlcnt | PTHRW_RWLOCK_INIT;
999 newseq.rw_seq = oldlcnt | PTHRW_RWS_INIT;
f1a1da6c 1000 } else {
a0619f9c
A
1001 // no L/S update if lock is not exclusive or no writer pending
1002 if ((oldseq.lcntval &
1003 (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0) {
1004 continue;
f1a1da6c
A
1005 }
1006
a0619f9c
A
1007 // kernel transition only needed if U == S
1008 if (newseq.ucntval != (oldseq.rw_seq & PTHRW_COUNT_MASK)) {
1009 continue;
f1a1da6c
A
1010 }
1011
a0619f9c
A
1012 droplock = true;
1013 // reset all bits and set K
1014 newseq.lcntval = oldlcnt | PTH_RWL_KBIT;
1015 // set I bit on S word
1016 newseq.rw_seq |= PTH_RWS_IBIT;
1017 if ((oldseq.lcntval & PTH_RWL_WBIT) != 0) {
1018 newseq.rw_seq |= PTH_RWS_WSVBIT;
f1a1da6c
A
1019 }
1020 }
a0619f9c
A
1021 } while (seqfields != (updated_seqfields = rwlock_seq_atomic_cmpxchgv(
1022 seqaddr, &oldseq, &newseq, seqfields, release)));
f1a1da6c
A
1023
1024 if (droplock) {
a0619f9c 1025 res = _pthread_rwlock_unlock_drop(orwlock, oldseq, newseq);
f1a1da6c
A
1026 }
1027
1028 PLOCKSTAT_RW_RELEASE(orwlock, wrlock);
1029
1030 return res;
1031}
1032
a0619f9c
A
1033PTHREAD_NOEXPORT_VARIANT
1034int
1035pthread_rwlock_unlock(pthread_rwlock_t *orwlock)
1036{
1037 _pthread_rwlock *rwlock = (_pthread_rwlock *)orwlock;
1038 rwlock_seqfields seqfields = RWLOCK_SEQ_LSU;
1039 rwlock_seqfields updated_seqfields = RWLOCK_SEQ_NONE;
1040
1041#if PLOCKSTAT
1042 if (PLOCKSTAT_RW_RELEASE_ENABLED() || PLOCKSTAT_RW_ERROR_ENABLED()) {
1043 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1044 }
1045#endif
1046
1047 if (os_unlikely(!_pthread_rwlock_check_signature(rwlock))) {
1048 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1049 }
1050
1051 rwlock_seq *seqaddr;
1052 RWLOCK_GETSEQ_ADDR(rwlock, &seqaddr);
1053
1054 rwlock_seq oldseq, newseq;
1055 rwlock_seq_load(seqaddr, &oldseq, seqfields);
1056
1057 if (os_unlikely(oldseq.lcntval & PTH_RWL_UBIT)) {
1058 // spurious unlock (unlock of unlocked lock)
1059 return 0;
1060 }
1061
1062 if (is_rwl_ebit_set(oldseq.lcntval)) {
1063#if __DARWIN_UNIX03
1064 uint64_t *tidaddr;
1065 RWLOCK_GETTID_ADDR(rwlock, &tidaddr);
1066 os_atomic_store(tidaddr, 0, relaxed);
1067#endif /* __DARWIN_UNIX03 */
1068 }
1069
1070 do {
1071 if (updated_seqfields) {
1072 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1073 }
1074
1075 newseq = oldseq;
1076 if (seqfields & RWLOCK_SEQ_U) {
1077 newseq.ucntval += PTHRW_INC;
1078 }
1079
1080 uint32_t oldlcnt = (oldseq.lcntval & PTHRW_COUNT_MASK);
1081 if (os_likely(newseq.ucntval == oldlcnt)) {
1082 // last unlock, set L with U and init bits and set S to L with S bit
1083 newseq.lcntval = oldlcnt | PTHRW_RWLOCK_INIT;
1084 newseq.rw_seq = oldlcnt | PTHRW_RWS_INIT;
1085 } else {
1086 if (os_likely((oldseq.lcntval &
1087 (PTH_RWL_EBIT | PTH_RWL_WBIT | PTH_RWL_KBIT)) == 0 ||
1088 newseq.ucntval != (oldseq.rw_seq & PTHRW_COUNT_MASK))) {
1089 // no L/S update if lock is not exclusive or no writer pending
1090 // kernel transition only needed if U == S
1091 } else {
1092 return _pthread_rwlock_unlock_slow(orwlock, updated_seqfields);
1093 }
1094 }
1095 } while (os_unlikely(seqfields != (updated_seqfields =
1096 rwlock_seq_atomic_cmpxchgv(seqaddr, &oldseq, &newseq, seqfields,
1097 release))));
1098
1099 return 0;
1100}
1101