2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
50 * -- Mutex variable support
55 #include "kern/kern_trace.h"
57 #ifndef BUILDING_VARIANT /* [ */
60 #include "plockstat.h"
61 /* This function is never called and exists to provide never-fired dtrace
62 * probes so that user d scripts don't get errors.
64 PTHREAD_NOEXPORT PTHREAD_USED
66 _plockstat_never_fired(void)
68 PLOCKSTAT_MUTEX_SPIN(NULL
);
69 PLOCKSTAT_MUTEX_SPUN(NULL
, 0, 0);
71 #else /* !PLOCKSTAT */
72 #define PLOCKSTAT_MUTEX_SPIN(x)
73 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
74 #define PLOCKSTAT_MUTEX_ERROR(x, y)
75 #define PLOCKSTAT_MUTEX_BLOCK(x)
76 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
77 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
78 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
79 #endif /* PLOCKSTAT */
81 #define BLOCK_FAIL_PLOCKSTAT 0
82 #define BLOCK_SUCCESS_PLOCKSTAT 1
84 #define PTHREAD_MUTEX_INIT_UNUSED 1
86 PTHREAD_NOEXPORT PTHREAD_WEAK
87 int _pthread_mutex_lock_init_slow(_pthread_mutex
*mutex
, bool trylock
);
89 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
90 int _pthread_mutex_fairshare_lock_slow(_pthread_mutex
*mutex
, bool trylock
);
92 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
93 int _pthread_mutex_firstfit_lock_slow(_pthread_mutex
*mutex
, bool trylock
);
95 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
96 int _pthread_mutex_fairshare_unlock_slow(_pthread_mutex
*mutex
);
98 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
99 int _pthread_mutex_firstfit_unlock_slow(_pthread_mutex
*mutex
);
101 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
102 int _pthread_mutex_corruption_abort(_pthread_mutex
*mutex
);
104 extern int __pthread_mutex_default_opt_policy PTHREAD_NOEXPORT
;
107 int __pthread_mutex_default_opt_policy PTHREAD_NOEXPORT
=
108 _PTHREAD_MTX_OPT_POLICY_DEFAULT
;
111 _pthread_mutex_policy_validate(int policy
)
113 return (policy
>= 0 && policy
< _PTHREAD_MUTEX_POLICY_LAST
);
117 _pthread_mutex_policy_to_opt(int policy
)
120 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP
:
121 return _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
;
122 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP
:
123 return _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
125 __builtin_unreachable();
131 _pthread_mutex_global_init(const char *envp
[],
132 struct _pthread_registration_data
*registration_data
)
135 int opt
= _PTHREAD_MTX_OPT_POLICY_DEFAULT
;
136 if (registration_data
->mutex_default_policy
) {
137 int policy
= registration_data
->mutex_default_policy
;
138 if (_pthread_mutex_policy_validate(policy
)) {
139 opt
= _pthread_mutex_policy_to_opt(policy
);
143 const char *envvar
= _simple_getenv(envp
, "PTHREAD_MUTEX_DEFAULT_POLICY");
145 int policy
= envvar
[0] - '0';
146 if (_pthread_mutex_policy_validate(policy
)) {
147 opt
= _pthread_mutex_policy_to_opt(policy
);
151 if (opt
!= __pthread_mutex_default_opt_policy
) {
152 __pthread_mutex_default_opt_policy
= opt
;
158 PTHREAD_ALWAYS_INLINE
159 static inline int _pthread_mutex_init(_pthread_mutex
*mutex
,
160 const pthread_mutexattr_t
*attr
, uint32_t static_type
);
162 typedef union mutex_seq
{
164 struct { uint32_t lgenval
; uint32_t ugenval
; };
165 struct { uint32_t mgen
; uint32_t ugen
; };
167 uint64_t _Atomic atomic_seq_LU
;
170 _Static_assert(sizeof(mutex_seq
) == 2 * sizeof(uint32_t),
171 "Incorrect mutex_seq size");
173 #if !__LITTLE_ENDIAN__
174 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
177 PTHREAD_ALWAYS_INLINE
179 MUTEX_GETSEQ_ADDR(_pthread_mutex
*mutex
, mutex_seq
**seqaddr
)
181 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
182 // We don't require more than byte alignment on OS X. rdar://22278325
183 *seqaddr
= (void *)(((uintptr_t)mutex
->m_seq
+ 0x7ul
) & ~0x7ul
);
186 PTHREAD_ALWAYS_INLINE
188 MUTEX_GETTID_ADDR(_pthread_mutex
*mutex
, uint64_t **tidaddr
)
190 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
191 // We don't require more than byte alignment on OS X. rdar://22278325
192 *tidaddr
= (void*)(((uintptr_t)mutex
->m_tid
+ 0x7ul
) & ~0x7ul
);
195 PTHREAD_ALWAYS_INLINE
197 mutex_seq_load(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
)
199 oldseqval
->seq_LU
= seqaddr
->seq_LU
;
202 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
203 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
205 PTHREAD_ALWAYS_INLINE PTHREAD_USED
207 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
208 mutex_seq
*newseqval
)
210 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
211 newseqval
->seq_LU
, &oldseqval
->seq_LU
, relaxed
);
214 PTHREAD_ALWAYS_INLINE PTHREAD_USED
216 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
217 mutex_seq
*newseqval
)
219 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
220 newseqval
->seq_LU
, &oldseqval
->seq_LU
, acquire
);
223 PTHREAD_ALWAYS_INLINE PTHREAD_USED
225 mutex_seq_atomic_cmpxchgv_release(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
226 mutex_seq
*newseqval
)
228 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
229 newseqval
->seq_LU
, &oldseqval
->seq_LU
, release
);
232 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
233 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
236 * Initialize a mutex variable, possibly with additional attributes.
237 * Public interface - so don't trust the lock - initialize it first.
239 PTHREAD_NOEXPORT_VARIANT
241 pthread_mutex_init(pthread_mutex_t
*omutex
, const pthread_mutexattr_t
*attr
)
244 /* conformance tests depend on not having this behavior */
245 /* The test for this behavior is optional */
246 if (_pthread_mutex_check_signature(mutex
))
249 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
250 _PTHREAD_LOCK_INIT(mutex
->lock
);
251 return (_pthread_mutex_init(mutex
, attr
, 0x7));
254 PTHREAD_NOEXPORT_VARIANT
256 pthread_mutex_getprioceiling(const pthread_mutex_t
*omutex
, int *prioceiling
)
259 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
260 if (_pthread_mutex_check_signature(mutex
)) {
261 _PTHREAD_LOCK(mutex
->lock
);
262 *prioceiling
= mutex
->prioceiling
;
264 _PTHREAD_UNLOCK(mutex
->lock
);
269 PTHREAD_NOEXPORT_VARIANT
271 pthread_mutex_setprioceiling(pthread_mutex_t
*omutex
, int prioceiling
,
272 int *old_prioceiling
)
275 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
276 if (_pthread_mutex_check_signature(mutex
)) {
277 _PTHREAD_LOCK(mutex
->lock
);
278 if (prioceiling
>= -999 && prioceiling
<= 999) {
279 *old_prioceiling
= mutex
->prioceiling
;
280 mutex
->prioceiling
= (int16_t)prioceiling
;
283 _PTHREAD_UNLOCK(mutex
->lock
);
290 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t
*attr
,
294 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
295 *prioceiling
= attr
->prioceiling
;
302 pthread_mutexattr_getprotocol(const pthread_mutexattr_t
*attr
, int *protocol
)
305 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
306 *protocol
= attr
->protocol
;
313 pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t
*attr
, int *policy
)
316 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
318 case _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
:
319 *policy
= PTHREAD_MUTEX_POLICY_FAIRSHARE_NP
;
322 case _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
:
323 *policy
= PTHREAD_MUTEX_POLICY_FIRSTFIT_NP
;
332 pthread_mutexattr_gettype(const pthread_mutexattr_t
*attr
, int *type
)
335 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
343 pthread_mutexattr_getpshared(const pthread_mutexattr_t
*attr
, int *pshared
)
346 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
347 *pshared
= (int)attr
->pshared
;
354 pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
356 attr
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
357 attr
->protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
358 attr
->opt
= __pthread_mutex_default_opt_policy
;
359 attr
->type
= PTHREAD_MUTEX_DEFAULT
;
360 attr
->sig
= _PTHREAD_MUTEX_ATTR_SIG
;
361 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
366 pthread_mutexattr_setprioceiling(pthread_mutexattr_t
*attr
, int prioceiling
)
369 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
370 if (prioceiling
>= -999 && prioceiling
<= 999) {
371 attr
->prioceiling
= prioceiling
;
379 pthread_mutexattr_setprotocol(pthread_mutexattr_t
*attr
, int protocol
)
382 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
384 case PTHREAD_PRIO_NONE
:
385 case PTHREAD_PRIO_INHERIT
:
386 case PTHREAD_PRIO_PROTECT
:
387 attr
->protocol
= protocol
;
396 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t
*attr
, int policy
)
399 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
400 // <rdar://problem/35844519> the first-fit implementation was broken
401 // pre-Liberty so this mapping exists to ensure that the old first-fit
402 // define (2) is no longer valid when used on older systems.
404 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP
:
405 attr
->opt
= _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
;
408 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP
:
409 attr
->opt
= _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
418 pthread_mutexattr_settype(pthread_mutexattr_t
*attr
, int type
)
421 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
423 case PTHREAD_MUTEX_NORMAL
:
424 case PTHREAD_MUTEX_ERRORCHECK
:
425 case PTHREAD_MUTEX_RECURSIVE
:
426 //case PTHREAD_MUTEX_DEFAULT:
436 pthread_mutexattr_setpshared(pthread_mutexattr_t
*attr
, int pshared
)
440 if (__unix_conforming
== 0) {
441 __unix_conforming
= 1;
443 #endif /* __DARWIN_UNIX03 */
445 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
447 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) ||
448 (pshared
== PTHREAD_PROCESS_SHARED
))
449 #else /* __DARWIN_UNIX03 */
450 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
451 #endif /* __DARWIN_UNIX03 */
453 attr
->pshared
= pshared
;
460 PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
462 _pthread_mutex_corruption_abort(_pthread_mutex
*mutex
)
464 PTHREAD_ABORT("pthread_mutex corruption: mutex owner changed in the "
465 "middle of lock/unlock");
471 _pthread_mutex_check_init_slow(_pthread_mutex
*mutex
)
475 if (_pthread_mutex_check_signature_init(mutex
)) {
476 _PTHREAD_LOCK(mutex
->lock
);
477 if (_pthread_mutex_check_signature_init(mutex
)) {
478 // initialize a statically initialized mutex to provide
479 // compatibility for misbehaving applications.
480 // (unlock should not be the first operation on a mutex)
481 res
= _pthread_mutex_init(mutex
, NULL
, (mutex
->sig
& 0xf));
482 } else if (_pthread_mutex_check_signature(mutex
)) {
485 _PTHREAD_UNLOCK(mutex
->lock
);
486 } else if (_pthread_mutex_check_signature(mutex
)) {
490 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, res
);
495 PTHREAD_ALWAYS_INLINE
497 _pthread_mutex_check_init(_pthread_mutex
*mutex
)
500 if (!_pthread_mutex_check_signature(mutex
)) {
501 return _pthread_mutex_check_init_slow(mutex
);
506 PTHREAD_ALWAYS_INLINE
508 _pthread_mutex_is_fairshare(_pthread_mutex
*mutex
)
510 return (mutex
->mtxopts
.options
.policy
== _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
);
513 PTHREAD_ALWAYS_INLINE
515 _pthread_mutex_is_firstfit(_pthread_mutex
*mutex
)
517 return (mutex
->mtxopts
.options
.policy
== _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
);
520 PTHREAD_ALWAYS_INLINE
522 _pthread_mutex_is_recursive(_pthread_mutex
*mutex
)
524 return (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
);
527 PTHREAD_ALWAYS_INLINE
529 _pthread_mutex_lock_handle_options(_pthread_mutex
*mutex
, bool trylock
,
532 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
) {
533 // NORMAL does not do EDEADLK checking
537 uint64_t selfid
= _pthread_selfid_direct();
538 if (os_atomic_load(tidaddr
, relaxed
) == selfid
) {
539 if (_pthread_mutex_is_recursive(mutex
)) {
540 if (mutex
->mtxopts
.options
.lock_count
< USHRT_MAX
) {
541 mutex
->mtxopts
.options
.lock_count
+= 1;
542 return mutex
->mtxopts
.options
.lock_count
;
546 } else if (trylock
) { /* PTHREAD_MUTEX_ERRORCHECK */
547 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
548 // return EDEADLK on a deadlock, it should return EBUSY.
550 } else { /* PTHREAD_MUTEX_ERRORCHECK */
555 // Not recursive, or recursive but first lock.
559 PTHREAD_ALWAYS_INLINE
561 _pthread_mutex_unlock_handle_options(_pthread_mutex
*mutex
, uint64_t *tidaddr
)
563 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
) {
564 // NORMAL does not do EDEADLK checking
568 uint64_t selfid
= _pthread_selfid_direct();
569 if (os_atomic_load(tidaddr
, relaxed
) != selfid
) {
571 } else if (_pthread_mutex_is_recursive(mutex
) &&
572 --mutex
->mtxopts
.options
.lock_count
) {
579 * Sequence numbers and TID:
581 * In steady (and uncontended) state, an unlocked mutex will
582 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
583 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
584 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
587 * If a contender comes in after B, the mutex will instead transition to
588 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
589 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
590 * contender will enter the kernel with either mutexwait(U4, TID0) or
591 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
592 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
593 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
594 * signal the next waiter (potentially as a prepost). When the waiter comes out
595 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
596 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
598 * At various points along these timelines, since the sequence words and TID are
599 * written independently, a thread may get preempted and another thread might
600 * see inconsistent data. In the worst case, another thread may see the TID in
601 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
602 * thread was preempted.
606 * Drop the mutex unlock references from cond_wait or mutex_unlock.
608 PTHREAD_ALWAYS_INLINE
610 _pthread_mutex_fairshare_unlock_updatebits(_pthread_mutex
*mutex
,
611 uint32_t *flagsp
, uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
613 uint32_t flags
= mutex
->mtxopts
.value
;
614 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
; // no notification by default
617 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
619 mutex_seq oldseq
, newseq
;
620 mutex_seq_load(seqaddr
, &oldseq
);
623 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
624 uint64_t oldtid
, newtid
;
626 int res
= _pthread_mutex_unlock_handle_options(mutex
, tidaddr
);
628 // Valid recursive unlock
632 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
634 } else if (res
< 0) {
635 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, -res
);
639 bool clearnotify
, spurious
;
642 oldtid
= os_atomic_load(tidaddr
, relaxed
);
648 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
649 if (numwaiters
== 0) {
650 // spurious unlock (unlock of unlocked lock)
653 newseq
.ugenval
+= PTHRW_INC
;
655 if ((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
656 (newseq
.ugenval
& PTHRW_COUNT_MASK
)) {
657 // our unlock sequence matches to lock sequence, so if the
658 // CAS is successful, the mutex is unlocked
660 /* do not reset Ibit, just K&E */
661 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
663 newtid
= 0; // clear owner
665 newtid
= PTHREAD_MTX_TID_SWITCHING
;
666 // need to signal others waiting for mutex
667 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
670 if (newtid
!= oldtid
) {
671 // We're giving up the mutex one way or the other, so go ahead
672 // and update the owner to 0 so that once the CAS below
673 // succeeds, there is no stale ownership information. If the
674 // CAS of the seqaddr fails, we may loop, but it's still valid
675 // for the owner to be SWITCHING/0
676 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, newtid
, relaxed
)) {
677 // we own this mutex, nobody should be updating it except us
678 return _pthread_mutex_corruption_abort(mutex
);
683 if (clearnotify
|| spurious
) {
684 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
;
686 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, release
));
688 PTHREAD_TRACE(psynch_mutex_unlock_updatebits
, mutex
, oldseq
.lgenval
,
689 newseq
.lgenval
, oldtid
);
692 *mgenp
= newseq
.lgenval
;
695 *ugenp
= newseq
.ugenval
;
698 *pmtxp
= (uint32_t *)mutex
;
700 if (flagsp
!= NULL
) {
707 PTHREAD_ALWAYS_INLINE
709 _pthread_mutex_fairshare_lock_updatebits(_pthread_mutex
*mutex
, uint64_t selfid
)
711 bool firstfit
= _pthread_mutex_is_firstfit(mutex
);
715 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
717 mutex_seq oldseq
, newseq
;
718 mutex_seq_load(seqaddr
, &oldseq
);
721 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
727 // firstfit locks can have the lock stolen out from under a locker
728 // between the unlock from the kernel and this lock path. When this
729 // happens, we still want to set the K bit before leaving the loop
730 // (or notice if the lock unlocks while we try to update).
731 gotlock
= !is_rwl_ebit_set(oldseq
.lgenval
);
732 } else if ((oldseq
.lgenval
& (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) ==
733 (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) {
734 // bit are already set, just update the owner tidaddr
738 newseq
.lgenval
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
739 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
743 os_atomic_store(tidaddr
, selfid
, relaxed
);
746 PTHREAD_TRACE(psynch_mutex_lock_updatebits
, mutex
, oldseq
.lgenval
,
749 // failing to take the lock in firstfit returns 1 to force the caller
750 // to wait in the kernel
751 return gotlock
? 0 : 1;
756 _pthread_mutex_fairshare_lock_wait(_pthread_mutex
*mutex
, mutex_seq newseq
,
760 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
761 uint64_t selfid
= _pthread_selfid_direct();
763 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t
*)mutex
);
767 updateval
= __psynch_mutexwait(mutex
, newseq
.lgenval
,
768 newseq
.ugenval
, oldtid
, mutex
->mtxopts
.value
);
769 oldtid
= os_atomic_load(tidaddr
, relaxed
);
770 } while (updateval
== (uint32_t)-1);
772 // returns 0 on succesful update; in firstfit it may fail with 1
773 } while (_pthread_mutex_fairshare_lock_updatebits(mutex
, selfid
) == 1);
774 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t
*)mutex
, BLOCK_SUCCESS_PLOCKSTAT
);
779 PTHREAD_NOEXPORT PTHREAD_NOINLINE
781 _pthread_mutex_fairshare_lock_slow(_pthread_mutex
*omutex
, bool trylock
)
783 int res
, recursive
= 0;
784 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
787 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
789 mutex_seq oldseq
, newseq
;
790 mutex_seq_load(seqaddr
, &oldseq
);
793 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
794 uint64_t oldtid
, selfid
= _pthread_selfid_direct();
796 res
= _pthread_mutex_lock_handle_options(mutex
, trylock
, tidaddr
);
801 } else if (res
< 0) {
809 oldtid
= os_atomic_load(tidaddr
, relaxed
);
811 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
813 if (trylock
&& !gotlock
) {
814 // A trylock on a held lock will fail immediately. But since
815 // we did not load the sequence words atomically, perform a
816 // no-op CAS64 to ensure that nobody has unlocked concurrently.
818 // Increment the lock sequence number and force the lock into E+K
819 // mode, whether "gotlock" is true or not.
820 newseq
.lgenval
+= PTHRW_INC
;
821 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
823 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
825 PTHREAD_TRACE(psynch_mutex_lock_updatebits
, omutex
, oldseq
.lgenval
,
829 os_atomic_store(tidaddr
, selfid
, relaxed
);
831 PTHREAD_TRACE(psynch_mutex_ulock
, omutex
, newseq
.lgenval
,
832 newseq
.ugenval
, selfid
);
833 } else if (trylock
) {
835 PTHREAD_TRACE(psynch_mutex_utrylock_failed
, omutex
, newseq
.lgenval
,
836 newseq
.ugenval
, oldtid
);
838 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_START
, omutex
,
839 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
840 res
= _pthread_mutex_fairshare_lock_wait(mutex
, newseq
, oldtid
);
841 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_END
, omutex
,
842 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
845 if (res
== 0 && _pthread_mutex_is_recursive(mutex
)) {
846 mutex
->mtxopts
.options
.lock_count
= 1;
852 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t
*)mutex
, recursive
, 0);
854 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, res
);
863 _pthread_mutex_fairshare_lock(_pthread_mutex
*mutex
, bool trylock
)
865 #if ENABLE_USERSPACE_TRACE
866 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
868 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
869 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
874 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
875 uint64_t selfid
= _pthread_selfid_direct();
878 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
880 mutex_seq oldseq
, newseq
;
881 mutex_seq_load(seqaddr
, &oldseq
);
883 if (os_unlikely(oldseq
.lgenval
& PTH_RWL_EBIT
)) {
884 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
891 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
893 if (trylock
&& !gotlock
) {
894 // A trylock on a held lock will fail immediately. But since
895 // we did not load the sequence words atomically, perform a
896 // no-op CAS64 to ensure that nobody has unlocked concurrently.
897 } else if (os_likely(gotlock
)) {
898 // Increment the lock sequence number and force the lock into E+K
899 // mode, whether "gotlock" is true or not.
900 newseq
.lgenval
+= PTHRW_INC
;
901 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
903 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
905 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
908 if (os_likely(gotlock
)) {
909 os_atomic_store(tidaddr
, selfid
, relaxed
);
911 } else if (trylock
) {
920 _pthread_mutex_fairshare_unlock_drop(_pthread_mutex
*mutex
, mutex_seq newseq
,
927 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
929 PTHREAD_TRACE(psynch_mutex_uunlock
| DBG_FUNC_START
, mutex
, newseq
.lgenval
,
930 newseq
.ugenval
, os_atomic_load(tidaddr
, relaxed
));
932 updateval
= __psynch_mutexdrop(mutex
, newseq
.lgenval
, newseq
.ugenval
,
933 os_atomic_load(tidaddr
, relaxed
), flags
);
935 PTHREAD_TRACE(psynch_mutex_uunlock
| DBG_FUNC_END
, mutex
, updateval
, 0, 0);
937 if (updateval
== (uint32_t)-1) {
944 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res
);
952 PTHREAD_NOEXPORT PTHREAD_NOINLINE
954 _pthread_mutex_fairshare_unlock_slow(_pthread_mutex
*mutex
)
960 res
= _pthread_mutex_fairshare_unlock_updatebits(mutex
, &flags
, NULL
,
961 &newseq
.lgenval
, &newseq
.ugenval
);
962 if (res
!= 0) return res
;
964 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) != 0) {
965 return _pthread_mutex_fairshare_unlock_drop(mutex
, newseq
, flags
);
968 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
969 PTHREAD_TRACE(psynch_mutex_uunlock
, mutex
, newseq
.lgenval
,
970 newseq
.ugenval
, os_atomic_load(tidaddr
, relaxed
));
978 _pthread_mutex_fairshare_unlock(_pthread_mutex
*mutex
)
980 #if ENABLE_USERSPACE_TRACE
981 return _pthread_mutex_fairshare_unlock_slow(mutex
);
983 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
984 return _pthread_mutex_fairshare_unlock_slow(mutex
);
989 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
992 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
994 mutex_seq oldseq
, newseq
;
995 mutex_seq_load(seqaddr
, &oldseq
);
997 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
998 if (os_unlikely(numwaiters
== 0)) {
999 // spurious unlock (unlock of unlocked lock)
1003 // We're giving up the mutex one way or the other, so go ahead and
1004 // update the owner to 0 so that once the CAS below succeeds, there
1005 // is no stale ownership information. If the CAS of the seqaddr
1006 // fails, we may loop, but it's still valid for the owner to be
1008 os_atomic_store(tidaddr
, 0, relaxed
);
1012 newseq
.ugenval
+= PTHRW_INC
;
1014 if (os_likely((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
1015 (newseq
.ugenval
& PTHRW_COUNT_MASK
))) {
1016 // if we succeed in performing the CAS we can be sure of a fast
1017 // path (only needing the CAS) unlock, if:
1018 // a. our lock and unlock sequence are equal
1019 // b. we don't need to clear an unlock prepost from the kernel
1021 // do not reset Ibit, just K&E
1022 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
1024 return _pthread_mutex_fairshare_unlock_slow(mutex
);
1026 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1032 #pragma mark firstfit
1034 PTHREAD_ALWAYS_INLINE
1036 _pthread_mutex_firstfit_unlock_updatebits(_pthread_mutex
*mutex
,
1037 uint32_t *flagsp
, uint32_t **mutexp
, uint32_t *lvalp
, uint32_t *uvalp
)
1039 uint32_t flags
= mutex
->mtxopts
.value
& ~_PTHREAD_MTX_OPT_NOTIFY
;
1043 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1045 mutex_seq oldseq
, newseq
;
1046 mutex_seq_load(seqaddr
, &oldseq
);
1049 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1052 int res
= _pthread_mutex_unlock_handle_options(mutex
, tidaddr
);
1054 // Valid recursive unlock
1058 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
1060 } else if (res
< 0) {
1061 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, -res
);
1067 oldtid
= os_atomic_load(tidaddr
, relaxed
);
1068 // More than one kernel waiter means we need to do a wake.
1069 kernel_wake
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
) > 0;
1070 newseq
.lgenval
&= ~PTH_RWL_EBIT
;
1073 // Going to the kernel post-unlock removes a single waiter unlock
1074 // from the mutex counts.
1075 newseq
.ugenval
+= PTHRW_INC
;
1079 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, 0, relaxed
)) {
1080 return _pthread_mutex_corruption_abort(mutex
);
1083 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, release
));
1085 PTHREAD_TRACE(psynch_ffmutex_unlock_updatebits
, mutex
, oldseq
.lgenval
,
1086 newseq
.lgenval
, newseq
.ugenval
);
1089 // We choose to return this out via flags because the condition
1090 // variable also uses this to determine whether to do a kernel wake
1091 // when beginning a cvwait.
1092 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
1095 *lvalp
= newseq
.lgenval
;
1098 *uvalp
= newseq
.ugenval
;
1101 *mutexp
= (uint32_t *)mutex
;
1109 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1111 _pthread_mutex_firstfit_wake(_pthread_mutex
*mutex
, mutex_seq newseq
,
1114 PTHREAD_TRACE(psynch_ffmutex_wake
, mutex
, newseq
.lgenval
, newseq
.ugenval
,
1116 int res
= __psynch_mutexdrop(mutex
, newseq
.lgenval
, newseq
.ugenval
, 0,
1125 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res
);
1132 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1134 _pthread_mutex_firstfit_unlock_slow(_pthread_mutex
*mutex
)
1140 res
= _pthread_mutex_firstfit_unlock_updatebits(mutex
, &flags
, NULL
,
1141 &newseq
.lgenval
, &newseq
.ugenval
);
1142 if (res
!= 0) return res
;
1144 if (flags
& _PTHREAD_MTX_OPT_NOTIFY
) {
1145 return _pthread_mutex_firstfit_wake(mutex
, newseq
, flags
);
1150 PTHREAD_ALWAYS_INLINE
1152 _pthread_mutex_firstfit_lock_updatebits(_pthread_mutex
*mutex
, uint64_t selfid
,
1158 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1160 mutex_seq oldseq
, newseq
;
1161 mutex_seq_load(seqaddr
, &oldseq
);
1164 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1166 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_START
, mutex
,
1167 oldseq
.lgenval
, oldseq
.ugenval
, 0);
1171 gotlock
= is_rwl_ebit_clear(oldseq
.lgenval
);
1174 // If we see the E-bit cleared, we should just attempt to take it.
1175 newseq
.lgenval
|= PTH_RWL_EBIT
;
1177 // If we failed to get the lock then we need to put ourselves back
1178 // in the queue of waiters. The previous unlocker that woke us out
1179 // of the kernel consumed the S-count for our previous wake. So
1180 // take another ticket on L and go back in the kernel to sleep.
1181 newseq
.lgenval
+= PTHRW_INC
;
1183 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
1186 os_atomic_store(tidaddr
, selfid
, relaxed
);
1189 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_END
, mutex
,
1190 newseq
.lgenval
, newseq
.ugenval
, 0);
1200 _pthread_mutex_firstfit_lock_wait(_pthread_mutex
*mutex
, mutex_seq newseq
,
1204 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1205 uint64_t selfid
= _pthread_selfid_direct();
1207 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t
*)mutex
);
1211 PTHREAD_TRACE(psynch_ffmutex_wait
| DBG_FUNC_START
, mutex
,
1212 newseq
.lgenval
, newseq
.ugenval
, mutex
->mtxopts
.value
);
1213 uval
= __psynch_mutexwait(mutex
, newseq
.lgenval
, newseq
.ugenval
,
1214 oldtid
, mutex
->mtxopts
.value
);
1215 PTHREAD_TRACE(psynch_ffmutex_wait
| DBG_FUNC_END
, mutex
,
1217 oldtid
= os_atomic_load(tidaddr
, relaxed
);
1218 } while (uval
== (uint32_t)-1);
1219 } while (!_pthread_mutex_firstfit_lock_updatebits(mutex
, selfid
, &newseq
));
1220 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t
*)mutex
, BLOCK_SUCCESS_PLOCKSTAT
);
1225 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1227 _pthread_mutex_firstfit_lock_slow(_pthread_mutex
*mutex
, bool trylock
)
1229 int res
, recursive
= 0;
1232 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1234 mutex_seq oldseq
, newseq
;
1235 mutex_seq_load(seqaddr
, &oldseq
);
1238 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1239 uint64_t oldtid
, selfid
= _pthread_selfid_direct();
1241 res
= _pthread_mutex_lock_handle_options(mutex
, trylock
, tidaddr
);
1246 } else if (res
< 0) {
1251 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_START
, mutex
,
1252 oldseq
.lgenval
, oldseq
.ugenval
, 0);
1257 oldtid
= os_atomic_load(tidaddr
, relaxed
);
1259 gotlock
= is_rwl_ebit_clear(oldseq
.lgenval
);
1260 if (trylock
&& !gotlock
) {
1261 // We still want to perform the CAS here, even though it won't
1262 // do anything so that it fails if someone unlocked while we were
1264 } else if (gotlock
) {
1265 // In first-fit, getting the lock simply adds the E-bit
1266 newseq
.lgenval
|= PTH_RWL_EBIT
;
1268 // Failed to get the lock, increment the L-val and go to
1269 // the kernel to sleep
1270 newseq
.lgenval
+= PTHRW_INC
;
1272 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
1274 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_END
, mutex
,
1275 newseq
.lgenval
, newseq
.ugenval
, 0);
1278 os_atomic_store(tidaddr
, selfid
, relaxed
);
1280 PTHREAD_TRACE(psynch_mutex_ulock
, mutex
, newseq
.lgenval
,
1281 newseq
.ugenval
, selfid
);
1282 } else if (trylock
) {
1284 PTHREAD_TRACE(psynch_mutex_utrylock_failed
, mutex
, newseq
.lgenval
,
1285 newseq
.ugenval
, oldtid
);
1287 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_START
, mutex
,
1288 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
1289 res
= _pthread_mutex_firstfit_lock_wait(mutex
, newseq
, oldtid
);
1290 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_END
, mutex
,
1291 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
1294 if (res
== 0 && _pthread_mutex_is_recursive(mutex
)) {
1295 mutex
->mtxopts
.options
.lock_count
= 1;
1301 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t
*)mutex
, recursive
, 0);
1303 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, res
);
1309 #pragma mark fast path
1311 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1313 _pthread_mutex_droplock(_pthread_mutex
*mutex
, uint32_t *flagsp
,
1314 uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
1316 if (_pthread_mutex_is_fairshare(mutex
)) {
1317 return _pthread_mutex_fairshare_unlock_updatebits(mutex
, flagsp
,
1318 pmtxp
, mgenp
, ugenp
);
1320 return _pthread_mutex_firstfit_unlock_updatebits(mutex
, flagsp
, pmtxp
,
1324 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1326 _pthread_mutex_lock_init_slow(_pthread_mutex
*mutex
, bool trylock
)
1330 res
= _pthread_mutex_check_init(mutex
);
1331 if (res
!= 0) return res
;
1333 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1334 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
1336 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1339 PTHREAD_NOEXPORT PTHREAD_NOINLINE
1341 _pthread_mutex_unlock_init_slow(_pthread_mutex
*mutex
)
1345 // Initialize static mutexes for compatibility with misbehaving
1346 // applications (unlock should not be the first operation on a mutex).
1347 res
= _pthread_mutex_check_init(mutex
);
1348 if (res
!= 0) return res
;
1350 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1351 return _pthread_mutex_fairshare_unlock_slow(mutex
);
1353 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1356 PTHREAD_NOEXPORT_VARIANT
1358 pthread_mutex_unlock(pthread_mutex_t
*omutex
)
1360 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
1361 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
1362 return _pthread_mutex_unlock_init_slow(mutex
);
1365 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1366 return _pthread_mutex_fairshare_unlock(mutex
);
1369 #if ENABLE_USERSPACE_TRACE
1370 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1372 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1373 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1378 * This is the first-fit fast path. The fairshare fast-ish path is in
1379 * _pthread_mutex_firstfit_unlock()
1382 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1385 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1387 mutex_seq oldseq
, newseq
;
1388 mutex_seq_load(seqaddr
, &oldseq
);
1390 // We're giving up the mutex one way or the other, so go ahead and
1391 // update the owner to 0 so that once the CAS below succeeds, there
1392 // is no stale ownership information. If the CAS of the seqaddr
1393 // fails, we may loop, but it's still valid for the owner to be
1395 os_atomic_store(tidaddr
, 0, relaxed
);
1400 if (diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
) == 0) {
1401 // No outstanding waiters in kernel, we can simply drop the E-bit
1403 newseq
.lgenval
&= ~PTH_RWL_EBIT
;
1405 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1407 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1413 PTHREAD_ALWAYS_INLINE
1415 _pthread_mutex_firstfit_lock(pthread_mutex_t
*omutex
, bool trylock
)
1417 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
1418 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
1419 return _pthread_mutex_lock_init_slow(mutex
, trylock
);
1422 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1423 return _pthread_mutex_fairshare_lock(mutex
, trylock
);
1426 #if ENABLE_USERSPACE_TRACE
1427 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1429 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1430 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1435 * This is the first-fit fast path. The fairshare fast-ish path is in
1436 * _pthread_mutex_firstfit_lock()
1439 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1440 uint64_t selfid
= _pthread_selfid_direct();
1443 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1445 mutex_seq oldseq
, newseq
;
1446 mutex_seq_load(seqaddr
, &oldseq
);
1448 if (os_unlikely(oldseq
.lgenval
& PTH_RWL_EBIT
)) {
1449 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1455 gotlock
= is_rwl_ebit_clear(oldseq
.lgenval
);
1457 if (trylock
&& !gotlock
) {
1458 // A trylock on a held lock will fail immediately. But since
1459 // we did not load the sequence words atomically, perform a
1460 // no-op CAS64 to ensure that nobody has unlocked concurrently.
1461 } else if (os_likely(gotlock
)) {
1462 // In first-fit, getting the lock simply adds the E-bit
1463 newseq
.lgenval
|= PTH_RWL_EBIT
;
1465 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1467 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1470 if (os_likely(gotlock
)) {
1471 os_atomic_store(tidaddr
, selfid
, relaxed
);
1473 } else if (trylock
) {
1480 PTHREAD_NOEXPORT_VARIANT
1482 pthread_mutex_lock(pthread_mutex_t
*mutex
)
1484 return _pthread_mutex_firstfit_lock(mutex
, false);
1487 PTHREAD_NOEXPORT_VARIANT
1489 pthread_mutex_trylock(pthread_mutex_t
*mutex
)
1491 return _pthread_mutex_firstfit_lock(mutex
, true);
1495 PTHREAD_ALWAYS_INLINE
1497 _pthread_mutex_init(_pthread_mutex
*mutex
, const pthread_mutexattr_t
*attr
,
1498 uint32_t static_type
)
1500 mutex
->mtxopts
.value
= 0;
1501 mutex
->mtxopts
.options
.mutex
= 1;
1503 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1506 mutex
->prioceiling
= (int16_t)attr
->prioceiling
;
1507 mutex
->mtxopts
.options
.protocol
= attr
->protocol
;
1508 mutex
->mtxopts
.options
.policy
= attr
->opt
;
1509 mutex
->mtxopts
.options
.type
= attr
->type
;
1510 mutex
->mtxopts
.options
.pshared
= attr
->pshared
;
1512 switch (static_type
) {
1514 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_ERRORCHECK
;
1517 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_RECURSIVE
;
1520 /* firstfit fall thru */
1522 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_DEFAULT
;
1528 mutex
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
1529 mutex
->mtxopts
.options
.protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
1530 if (static_type
!= 3) {
1531 mutex
->mtxopts
.options
.policy
= __pthread_mutex_default_opt_policy
;
1533 mutex
->mtxopts
.options
.policy
= _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
1535 mutex
->mtxopts
.options
.pshared
= _PTHREAD_DEFAULT_PSHARED
;
1537 mutex
->priority
= 0;
1540 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1543 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1545 #if PTHREAD_MUTEX_INIT_UNUSED
1546 if ((uint32_t*)tidaddr
!= mutex
->m_tid
) {
1547 mutex
->mtxopts
.options
.misalign
= 1;
1548 __builtin_memset(mutex
->m_tid
, 0xff, sizeof(mutex
->m_tid
));
1550 __builtin_memset(mutex
->m_mis
, 0xff, sizeof(mutex
->m_mis
));
1551 #endif // PTHREAD_MUTEX_INIT_UNUSED
1553 *seqaddr
= (mutex_seq
){ };
1555 long sig
= _PTHREAD_MUTEX_SIG
;
1556 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
&&
1557 (_pthread_mutex_is_fairshare(mutex
) ||
1558 _pthread_mutex_is_firstfit(mutex
))) {
1559 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1560 sig
= _PTHREAD_MUTEX_SIG_fast
;
1563 #if PTHREAD_MUTEX_INIT_UNUSED
1564 // For detecting copied mutexes and smashes during debugging
1565 uint32_t sig32
= (uint32_t)sig
;
1566 #if defined(__LP64__)
1567 uintptr_t guard
= ~(uintptr_t)mutex
; // use ~ to hide from leaks
1568 __builtin_memcpy(mutex
->_reserved
, &guard
, sizeof(guard
));
1569 mutex
->_reserved
[2] = sig32
;
1570 mutex
->_reserved
[3] = sig32
;
1571 mutex
->_pad
= sig32
;
1573 mutex
->_reserved
[0] = sig32
;
1575 #endif // PTHREAD_MUTEX_INIT_UNUSED
1577 // Ensure all contents are properly set before setting signature.
1578 #if defined(__LP64__)
1579 // For binary compatibility reasons we cannot require natural alignment of
1580 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1581 uint32_t *sig32_ptr
= (uint32_t*)&mutex
->sig
;
1582 uint32_t *sig32_val
= (uint32_t*)&sig
;
1583 *(sig32_ptr
+ 1) = *(sig32_val
+ 1);
1584 os_atomic_store(sig32_ptr
, *sig32_val
, release
);
1586 os_atomic_store2o(mutex
, sig
, sig
, release
);
1592 PTHREAD_NOEXPORT_VARIANT
1594 pthread_mutex_destroy(pthread_mutex_t
*omutex
)
1596 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
1600 _PTHREAD_LOCK(mutex
->lock
);
1601 if (_pthread_mutex_check_signature(mutex
)) {
1603 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1606 mutex_seq_load(seqaddr
, &seq
);
1609 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1611 if ((os_atomic_load(tidaddr
, relaxed
) == 0) &&
1612 (seq
.lgenval
& PTHRW_COUNT_MASK
) ==
1613 (seq
.ugenval
& PTHRW_COUNT_MASK
)) {
1614 mutex
->sig
= _PTHREAD_NO_SIG
;
1619 } else if (_pthread_mutex_check_signature_init(mutex
)) {
1620 mutex
->sig
= _PTHREAD_NO_SIG
;
1623 _PTHREAD_UNLOCK(mutex
->lock
);
1628 #endif /* !BUILDING_VARIANT ] */
1631 * Destroy a mutex attribute structure.
1634 pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
1637 if (__unix_conforming
== 0) {
1638 __unix_conforming
= 1;
1640 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1643 #endif /* __DARWIN_UNIX03 */
1645 attr
->sig
= _PTHREAD_NO_SIG
;