2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
50 * -- Mutex variable support
55 #include "kern/kern_trace.h"
57 #ifndef BUILDING_VARIANT /* [ */
60 #include "plockstat.h"
61 /* This function is never called and exists to provide never-fired dtrace
62 * probes so that user d scripts don't get errors.
65 _plockstat_never_fired(void);
67 _plockstat_never_fired(void)
69 PLOCKSTAT_MUTEX_SPIN(NULL
);
70 PLOCKSTAT_MUTEX_SPUN(NULL
, 0, 0);
72 #else /* !PLOCKSTAT */
73 #define PLOCKSTAT_MUTEX_SPIN(x)
74 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
75 #define PLOCKSTAT_MUTEX_ERROR(x, y)
76 #define PLOCKSTAT_MUTEX_BLOCK(x)
77 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
78 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
79 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
80 #endif /* PLOCKSTAT */
82 #define BLOCK_FAIL_PLOCKSTAT 0
83 #define BLOCK_SUCCESS_PLOCKSTAT 1
85 #define PTHREAD_MUTEX_INIT_UNUSED 1
89 int __pthread_mutex_default_opt_policy
= _PTHREAD_MTX_OPT_POLICY_DEFAULT
;
90 bool __pthread_mutex_use_ulock
= _PTHREAD_MTX_OPT_ULOCK_DEFAULT
;
91 bool __pthread_mutex_ulock_adaptive_spin
= _PTHREAD_MTX_OPT_ADAPTIVE_DEFAULT
;
94 _pthread_mutex_policy_validate(int policy
)
96 return (policy
>= 0 && policy
< _PTHREAD_MUTEX_POLICY_LAST
);
100 _pthread_mutex_policy_to_opt(int policy
)
103 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP
:
104 return _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
;
105 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP
:
106 return _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
108 __builtin_unreachable();
113 _pthread_mutex_global_init(const char *envp
[],
114 struct _pthread_registration_data
*registration_data
)
116 int opt
= _PTHREAD_MTX_OPT_POLICY_DEFAULT
;
117 if (registration_data
->mutex_default_policy
) {
118 int policy
= registration_data
->mutex_default_policy
&
119 _PTHREAD_REG_DEFAULT_POLICY_MASK
;
120 if (_pthread_mutex_policy_validate(policy
)) {
121 opt
= _pthread_mutex_policy_to_opt(policy
);
125 const char *envvar
= _simple_getenv(envp
, "PTHREAD_MUTEX_DEFAULT_POLICY");
127 int policy
= envvar
[0] - '0';
128 if (_pthread_mutex_policy_validate(policy
)) {
129 opt
= _pthread_mutex_policy_to_opt(policy
);
133 if (opt
!= __pthread_mutex_default_opt_policy
) {
134 __pthread_mutex_default_opt_policy
= opt
;
137 bool use_ulock
= _PTHREAD_MTX_OPT_ULOCK_DEFAULT
;
138 if (_os_xbs_chrooted
) {
141 envvar
= _simple_getenv(envp
, "PTHREAD_MUTEX_USE_ULOCK");
143 use_ulock
= (envvar
[0] == '1');
144 } else if (registration_data
->mutex_default_policy
) {
145 use_ulock
= registration_data
->mutex_default_policy
&
146 _PTHREAD_REG_DEFAULT_USE_ULOCK
;
150 if (use_ulock
!= __pthread_mutex_use_ulock
) {
151 __pthread_mutex_use_ulock
= use_ulock
;
154 bool adaptive_spin
= _PTHREAD_MTX_OPT_ADAPTIVE_DEFAULT
;
155 envvar
= _simple_getenv(envp
, "PTHREAD_MUTEX_ADAPTIVE_SPIN");
157 adaptive_spin
= (envvar
[0] == '1');
158 } else if (registration_data
->mutex_default_policy
) {
159 adaptive_spin
= registration_data
->mutex_default_policy
&
160 _PTHREAD_REG_DEFAULT_USE_ADAPTIVE_SPIN
;
163 if (adaptive_spin
!= __pthread_mutex_ulock_adaptive_spin
) {
164 __pthread_mutex_ulock_adaptive_spin
= adaptive_spin
;
168 #endif // !VARIANT_DYLD
172 static inline int _pthread_mutex_init(pthread_mutex_t
*mutex
,
173 const pthread_mutexattr_t
*attr
, uint32_t static_type
);
175 typedef union mutex_seq
{
177 struct { uint32_t lgenval
; uint32_t ugenval
; };
178 struct { uint32_t mgen
; uint32_t ugen
; };
180 uint64_t _Atomic atomic_seq_LU
;
183 _Static_assert(sizeof(mutex_seq
) == 2 * sizeof(uint32_t),
184 "Incorrect mutex_seq size");
186 #if !__LITTLE_ENDIAN__
187 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
192 MUTEX_GETSEQ_ADDR(pthread_mutex_t
*mutex
, mutex_seq
**seqaddr
)
194 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
195 // We don't require more than byte alignment on OS X. rdar://22278325
196 *seqaddr
= (void *)(((uintptr_t)mutex
->psynch
.m_seq
+ 0x7ul
) & ~0x7ul
);
201 MUTEX_GETTID_ADDR(pthread_mutex_t
*mutex
, uint64_t **tidaddr
)
203 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
204 // We don't require more than byte alignment on OS X. rdar://22278325
205 *tidaddr
= (void*)(((uintptr_t)mutex
->psynch
.m_tid
+ 0x7ul
) & ~0x7ul
);
210 mutex_seq_load(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
)
212 oldseqval
->seq_LU
= seqaddr
->seq_LU
;
215 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
216 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
218 OS_ALWAYS_INLINE OS_USED
220 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
221 mutex_seq
*newseqval
)
223 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
224 newseqval
->seq_LU
, &oldseqval
->seq_LU
, relaxed
);
227 OS_ALWAYS_INLINE OS_USED
229 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
230 mutex_seq
*newseqval
)
232 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
233 newseqval
->seq_LU
, &oldseqval
->seq_LU
, acquire
);
236 OS_ALWAYS_INLINE OS_USED
238 mutex_seq_atomic_cmpxchgv_release(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
239 mutex_seq
*newseqval
)
241 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
242 newseqval
->seq_LU
, &oldseqval
->seq_LU
, release
);
245 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
246 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
249 * Initialize a mutex variable, possibly with additional attributes.
250 * Public interface - so don't trust the lock - initialize it first.
252 PTHREAD_NOEXPORT_VARIANT
254 pthread_mutex_init(pthread_mutex_t
*mutex
, const pthread_mutexattr_t
*attr
)
257 /* conformance tests depend on not having this behavior */
258 /* The test for this behavior is optional */
259 if (_pthread_mutex_check_signature(mutex
))
262 _pthread_lock_init(&mutex
->lock
);
263 return (_pthread_mutex_init(mutex
, attr
, 0x7));
268 pthread_mutex_getprioceiling(const pthread_mutex_t
*omutex
, int *prioceiling
)
271 pthread_mutex_t
*mutex
= (pthread_mutex_t
*)omutex
;
272 if (_pthread_mutex_check_signature(mutex
)) {
273 _pthread_lock_lock(&mutex
->lock
);
274 *prioceiling
= mutex
->prioceiling
;
276 _pthread_lock_unlock(&mutex
->lock
);
282 pthread_mutex_setprioceiling(pthread_mutex_t
*mutex
, int prioceiling
,
283 int *old_prioceiling
)
286 if (_pthread_mutex_check_signature(mutex
)) {
287 _pthread_lock_lock(&mutex
->lock
);
288 if (prioceiling
>= -999 && prioceiling
<= 999) {
289 *old_prioceiling
= mutex
->prioceiling
;
290 mutex
->prioceiling
= (int16_t)prioceiling
;
293 _pthread_lock_unlock(&mutex
->lock
);
299 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t
*attr
,
303 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
304 *prioceiling
= attr
->prioceiling
;
311 pthread_mutexattr_getprotocol(const pthread_mutexattr_t
*attr
, int *protocol
)
314 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
315 *protocol
= attr
->protocol
;
322 pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t
*attr
, int *policy
)
325 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
327 case _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
:
328 *policy
= PTHREAD_MUTEX_POLICY_FAIRSHARE_NP
;
331 case _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
:
332 *policy
= PTHREAD_MUTEX_POLICY_FIRSTFIT_NP
;
341 pthread_mutexattr_gettype(const pthread_mutexattr_t
*attr
, int *type
)
344 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
352 pthread_mutexattr_getpshared(const pthread_mutexattr_t
*attr
, int *pshared
)
355 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
356 *pshared
= (int)attr
->pshared
;
363 pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
365 attr
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
366 attr
->protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
367 attr
->opt
= __pthread_mutex_default_opt_policy
;
368 attr
->type
= PTHREAD_MUTEX_DEFAULT
;
369 attr
->sig
= _PTHREAD_MUTEX_ATTR_SIG
;
370 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
375 pthread_mutexattr_setprioceiling(pthread_mutexattr_t
*attr
, int prioceiling
)
378 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
379 if (prioceiling
>= -999 && prioceiling
<= 999) {
380 attr
->prioceiling
= prioceiling
;
388 pthread_mutexattr_setprotocol(pthread_mutexattr_t
*attr
, int protocol
)
391 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
393 case PTHREAD_PRIO_NONE
:
394 case PTHREAD_PRIO_INHERIT
:
395 case PTHREAD_PRIO_PROTECT
:
396 attr
->protocol
= protocol
;
405 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t
*attr
, int policy
)
408 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
409 // <rdar://problem/35844519> the first-fit implementation was broken
410 // pre-Liberty so this mapping exists to ensure that the old first-fit
411 // define (2) is no longer valid when used on older systems.
413 case PTHREAD_MUTEX_POLICY_FAIRSHARE_NP
:
414 attr
->opt
= _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
;
417 case PTHREAD_MUTEX_POLICY_FIRSTFIT_NP
:
418 attr
->opt
= _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
427 pthread_mutexattr_settype(pthread_mutexattr_t
*attr
, int type
)
430 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
432 case PTHREAD_MUTEX_NORMAL
:
433 case PTHREAD_MUTEX_ERRORCHECK
:
434 case PTHREAD_MUTEX_RECURSIVE
:
435 //case PTHREAD_MUTEX_DEFAULT:
445 pthread_mutexattr_setpshared(pthread_mutexattr_t
*attr
, int pshared
)
448 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
449 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) ||
450 (pshared
== PTHREAD_PROCESS_SHARED
))
452 attr
->pshared
= pshared
;
461 _pthread_mutex_corruption_abort(pthread_mutex_t
*mutex
)
463 PTHREAD_CLIENT_CRASH(0, "pthread_mutex corruption: mutex owner changed "
464 "in the middle of lock/unlock");
470 _pthread_mutex_check_init_slow(pthread_mutex_t
*mutex
)
474 if (_pthread_mutex_check_signature_init(mutex
)) {
475 _pthread_lock_lock(&mutex
->lock
);
476 if (_pthread_mutex_check_signature_init(mutex
)) {
477 // initialize a statically initialized mutex to provide
478 // compatibility for misbehaving applications.
479 // (unlock should not be the first operation on a mutex)
480 res
= _pthread_mutex_init(mutex
, NULL
, (mutex
->sig
& 0xf));
481 } else if (_pthread_mutex_check_signature(mutex
)) {
484 _pthread_lock_unlock(&mutex
->lock
);
485 } else if (_pthread_mutex_check_signature(mutex
)) {
489 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, res
);
496 _pthread_mutex_check_init(pthread_mutex_t
*mutex
)
499 if (!_pthread_mutex_check_signature(mutex
)) {
500 return _pthread_mutex_check_init_slow(mutex
);
507 _pthread_mutex_is_fairshare(pthread_mutex_t
*mutex
)
509 return (mutex
->mtxopts
.options
.policy
== _PTHREAD_MTX_OPT_POLICY_FAIRSHARE
);
514 _pthread_mutex_is_firstfit(pthread_mutex_t
*mutex
)
516 return (mutex
->mtxopts
.options
.policy
== _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
);
521 _pthread_mutex_is_recursive(pthread_mutex_t
*mutex
)
523 return (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
);
528 _pthread_mutex_lock_handle_options(pthread_mutex_t
*mutex
, bool trylock
,
531 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
) {
532 // NORMAL does not do EDEADLK checking
536 uint64_t selfid
= _pthread_threadid_self_np_direct();
537 if (os_atomic_load_wide(tidaddr
, relaxed
) == selfid
) {
538 if (_pthread_mutex_is_recursive(mutex
)) {
539 if (mutex
->mtxopts
.options
.lock_count
< USHRT_MAX
) {
540 mutex
->mtxopts
.options
.lock_count
+= 1;
541 return mutex
->mtxopts
.options
.lock_count
;
545 } else if (trylock
) { /* PTHREAD_MUTEX_ERRORCHECK */
546 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
547 // return EDEADLK on a deadlock, it should return EBUSY.
549 } else { /* PTHREAD_MUTEX_ERRORCHECK */
554 // Not recursive, or recursive but first lock.
560 _pthread_mutex_unlock_handle_options(pthread_mutex_t
*mutex
, uint64_t *tidaddr
)
562 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
) {
563 // NORMAL does not do EDEADLK checking
567 uint64_t selfid
= _pthread_threadid_self_np_direct();
568 if (os_atomic_load_wide(tidaddr
, relaxed
) != selfid
) {
570 } else if (_pthread_mutex_is_recursive(mutex
) &&
571 --mutex
->mtxopts
.options
.lock_count
) {
578 * Sequence numbers and TID:
580 * In steady (and uncontended) state, an unlocked mutex will
581 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
582 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
583 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
586 * If a contender comes in after B, the mutex will instead transition to
587 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
588 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
589 * contender will enter the kernel with either mutexwait(U4, TID0) or
590 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
591 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
592 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
593 * signal the next waiter (potentially as a prepost). When the waiter comes out
594 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
595 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
597 * At various points along these timelines, since the sequence words and TID are
598 * written independently, a thread may get preempted and another thread might
599 * see inconsistent data. In the worst case, another thread may see the TID in
600 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
601 * thread was preempted.
605 * Drop the mutex unlock references from cond_wait or mutex_unlock.
609 _pthread_mutex_fairshare_unlock_updatebits(pthread_mutex_t
*mutex
,
610 uint32_t *flagsp
, uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
612 uint32_t flags
= mutex
->mtxopts
.value
;
613 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
; // no notification by default
616 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
618 mutex_seq oldseq
, newseq
;
619 mutex_seq_load(seqaddr
, &oldseq
);
622 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
623 uint64_t oldtid
, newtid
;
625 int res
= _pthread_mutex_unlock_handle_options(mutex
, tidaddr
);
627 // Valid recursive unlock
631 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
633 } else if (res
< 0) {
634 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, -res
);
638 bool clearnotify
, spurious
;
641 oldtid
= os_atomic_load_wide(tidaddr
, relaxed
);
647 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
648 if (numwaiters
== 0) {
649 // spurious unlock (unlock of unlocked lock)
652 newseq
.ugenval
+= PTHRW_INC
;
654 if ((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
655 (newseq
.ugenval
& PTHRW_COUNT_MASK
)) {
656 // our unlock sequence matches to lock sequence, so if the
657 // CAS is successful, the mutex is unlocked
659 /* do not reset Ibit, just K&E */
660 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
662 newtid
= 0; // clear owner
664 newtid
= PTHREAD_MTX_TID_SWITCHING
;
665 // need to signal others waiting for mutex
666 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
669 if (newtid
!= oldtid
) {
670 // We're giving up the mutex one way or the other, so go ahead
671 // and update the owner to 0 so that once the CAS below
672 // succeeds, there is no stale ownership information. If the
673 // CAS of the seqaddr fails, we may loop, but it's still valid
674 // for the owner to be SWITCHING/0
675 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, newtid
, relaxed
)) {
676 // we own this mutex, nobody should be updating it except us
677 return _pthread_mutex_corruption_abort(mutex
);
682 if (clearnotify
|| spurious
) {
683 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
;
685 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, release
));
687 PTHREAD_TRACE(psynch_mutex_unlock_updatebits
, mutex
, oldseq
.lgenval
,
688 newseq
.lgenval
, oldtid
);
691 *mgenp
= newseq
.lgenval
;
694 *ugenp
= newseq
.ugenval
;
697 *pmtxp
= (uint32_t *)mutex
;
699 if (flagsp
!= NULL
) {
708 _pthread_mutex_fairshare_lock_updatebits(pthread_mutex_t
*mutex
, uint64_t selfid
)
710 bool firstfit
= _pthread_mutex_is_firstfit(mutex
);
714 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
716 mutex_seq oldseq
, newseq
;
717 mutex_seq_load(seqaddr
, &oldseq
);
720 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
726 // firstfit locks can have the lock stolen out from under a locker
727 // between the unlock from the kernel and this lock path. When this
728 // happens, we still want to set the K bit before leaving the loop
729 // (or notice if the lock unlocks while we try to update).
730 gotlock
= !is_rwl_ebit_set(oldseq
.lgenval
);
731 } else if ((oldseq
.lgenval
& (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) ==
732 (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) {
733 // bit are already set, just update the owner tidaddr
737 newseq
.lgenval
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
738 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
742 os_atomic_store_wide(tidaddr
, selfid
, relaxed
);
745 PTHREAD_TRACE(psynch_mutex_lock_updatebits
, mutex
, oldseq
.lgenval
,
748 // failing to take the lock in firstfit returns 1 to force the caller
749 // to wait in the kernel
750 return gotlock
? 0 : 1;
755 _pthread_mutex_fairshare_lock_wait(pthread_mutex_t
*mutex
, mutex_seq newseq
,
759 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
760 uint64_t selfid
= _pthread_threadid_self_np_direct();
762 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t
*)mutex
);
766 updateval
= __psynch_mutexwait(mutex
, newseq
.lgenval
,
767 newseq
.ugenval
, oldtid
, mutex
->mtxopts
.value
);
768 oldtid
= os_atomic_load_wide(tidaddr
, relaxed
);
769 } while (updateval
== (uint32_t)-1);
771 // returns 0 on succesful update; in firstfit it may fail with 1
772 } while (_pthread_mutex_fairshare_lock_updatebits(mutex
, selfid
) == 1);
773 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t
*)mutex
, BLOCK_SUCCESS_PLOCKSTAT
);
780 _pthread_mutex_fairshare_lock_slow(pthread_mutex_t
*mutex
, bool trylock
)
782 int res
, recursive
= 0;
785 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
787 mutex_seq oldseq
, newseq
;
788 mutex_seq_load(seqaddr
, &oldseq
);
791 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
792 uint64_t oldtid
, selfid
= _pthread_threadid_self_np_direct();
794 res
= _pthread_mutex_lock_handle_options(mutex
, trylock
, tidaddr
);
799 } else if (res
< 0) {
807 oldtid
= os_atomic_load_wide(tidaddr
, relaxed
);
809 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
811 if (trylock
&& !gotlock
) {
812 // A trylock on a held lock will fail immediately. But since
813 // we did not load the sequence words atomically, perform a
814 // no-op CAS64 to ensure that nobody has unlocked concurrently.
816 // Increment the lock sequence number and force the lock into E+K
817 // mode, whether "gotlock" is true or not.
818 newseq
.lgenval
+= PTHRW_INC
;
819 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
821 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
823 PTHREAD_TRACE(psynch_mutex_lock_updatebits
, mutex
, oldseq
.lgenval
,
827 os_atomic_store_wide(tidaddr
, selfid
, relaxed
);
829 PTHREAD_TRACE(psynch_mutex_ulock
, mutex
, newseq
.lgenval
,
830 newseq
.ugenval
, selfid
);
831 } else if (trylock
) {
833 PTHREAD_TRACE(psynch_mutex_utrylock_failed
, mutex
, newseq
.lgenval
,
834 newseq
.ugenval
, oldtid
);
836 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_START
, mutex
,
837 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
838 res
= _pthread_mutex_fairshare_lock_wait(mutex
, newseq
, oldtid
);
839 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_END
, mutex
,
840 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
843 if (res
== 0 && _pthread_mutex_is_recursive(mutex
)) {
844 mutex
->mtxopts
.options
.lock_count
= 1;
850 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t
*)mutex
, recursive
, 0);
852 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, res
);
861 _pthread_mutex_fairshare_lock(pthread_mutex_t
*mutex
, bool trylock
)
863 #if ENABLE_USERSPACE_TRACE
864 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
866 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
867 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
872 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
873 uint64_t selfid
= _pthread_threadid_self_np_direct();
876 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
878 mutex_seq oldseq
, newseq
;
879 mutex_seq_load(seqaddr
, &oldseq
);
881 if (os_unlikely(oldseq
.lgenval
& PTH_RWL_EBIT
)) {
882 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
889 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
891 if (trylock
&& !gotlock
) {
892 // A trylock on a held lock will fail immediately. But since
893 // we did not load the sequence words atomically, perform a
894 // no-op CAS64 to ensure that nobody has unlocked concurrently.
895 } else if (os_likely(gotlock
)) {
896 // Increment the lock sequence number and force the lock into E+K
897 // mode, whether "gotlock" is true or not.
898 newseq
.lgenval
+= PTHRW_INC
;
899 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
901 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
903 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
906 if (os_likely(gotlock
)) {
907 os_atomic_store_wide(tidaddr
, selfid
, relaxed
);
909 } else if (trylock
) {
918 _pthread_mutex_fairshare_unlock_drop(pthread_mutex_t
*mutex
, mutex_seq newseq
,
925 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
927 PTHREAD_TRACE(psynch_mutex_uunlock
| DBG_FUNC_START
, mutex
, newseq
.lgenval
,
928 newseq
.ugenval
, os_atomic_load_wide(tidaddr
, relaxed
));
930 updateval
= __psynch_mutexdrop(mutex
, newseq
.lgenval
, newseq
.ugenval
,
931 os_atomic_load_wide(tidaddr
, relaxed
), flags
);
933 PTHREAD_TRACE(psynch_mutex_uunlock
| DBG_FUNC_END
, mutex
, updateval
, 0, 0);
935 if (updateval
== (uint32_t)-1) {
942 PTHREAD_INTERNAL_CRASH(res
, "__psynch_mutexdrop failed");
952 _pthread_mutex_fairshare_unlock_slow(pthread_mutex_t
*mutex
)
958 res
= _pthread_mutex_fairshare_unlock_updatebits(mutex
, &flags
, NULL
,
959 &newseq
.lgenval
, &newseq
.ugenval
);
960 if (res
!= 0) return res
;
962 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) != 0) {
963 return _pthread_mutex_fairshare_unlock_drop(mutex
, newseq
, flags
);
966 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
967 PTHREAD_TRACE(psynch_mutex_uunlock
, mutex
, newseq
.lgenval
,
968 newseq
.ugenval
, os_atomic_load_wide(tidaddr
, relaxed
));
976 _pthread_mutex_fairshare_unlock(pthread_mutex_t
*mutex
)
978 #if ENABLE_USERSPACE_TRACE
979 return _pthread_mutex_fairshare_unlock_slow(mutex
);
981 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
982 return _pthread_mutex_fairshare_unlock_slow(mutex
);
987 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
990 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
992 mutex_seq oldseq
, newseq
;
993 mutex_seq_load(seqaddr
, &oldseq
);
995 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
996 if (os_unlikely(numwaiters
== 0)) {
997 // spurious unlock (unlock of unlocked lock)
1001 // We're giving up the mutex one way or the other, so go ahead and
1002 // update the owner to 0 so that once the CAS below succeeds, there
1003 // is no stale ownership information. If the CAS of the seqaddr
1004 // fails, we may loop, but it's still valid for the owner to be
1006 os_atomic_store_wide(tidaddr
, 0, relaxed
);
1010 newseq
.ugenval
+= PTHRW_INC
;
1012 if (os_likely((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
1013 (newseq
.ugenval
& PTHRW_COUNT_MASK
))) {
1014 // if we succeed in performing the CAS we can be sure of a fast
1015 // path (only needing the CAS) unlock, if:
1016 // a. our lock and unlock sequence are equal
1017 // b. we don't need to clear an unlock prepost from the kernel
1019 // do not reset Ibit, just K&E
1020 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
1022 return _pthread_mutex_fairshare_unlock_slow(mutex
);
1024 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1033 static inline uint32_t
1034 _pthread_mutex_ulock_self_owner_value(void)
1036 mach_port_t self_port
= _pthread_mach_thread_self_direct();
1037 return self_port
& _PTHREAD_MUTEX_ULOCK_OWNER_MASK
;
1042 _pthread_mutex_ulock_lock_slow(pthread_mutex_t
*mutex
, uint32_t self_ownerval
,
1045 bool success
= false, kernel_waiters
= false;
1047 uint32_t wait_op
= UL_UNFAIR_LOCK
| ULF_NO_ERRNO
;
1048 if (__pthread_mutex_ulock_adaptive_spin
) {
1049 wait_op
|= ULF_WAIT_ADAPTIVE_SPIN
;
1052 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t
*)mutex
);
1054 bool owner_dead
= false;
1057 uint32_t current_ownerval
= state
& _PTHREAD_MUTEX_ULOCK_OWNER_MASK
;
1058 if (os_unlikely(owner_dead
)) {
1059 // TODO: PTHREAD_STRICT candidate
1061 // For a non-recursive mutex, this indicates that it's really
1062 // being used as a semaphore: even though we're the current
1063 // owner, in reality we're expecting another thread to 'unlock'
1064 // this mutex on our behalf later.
1066 // __ulock_wait(2) doesn't permit you to wait for yourself, so
1067 // we need to first swap our ownership for the anonymous owner
1069 MACH_PORT_DEAD
& _PTHREAD_MUTEX_ULOCK_OWNER_MASK
;
1072 uint32_t new_state
=
1073 current_ownerval
| _PTHREAD_MUTEX_ULOCK_WAITERS_BIT
;
1074 success
= os_atomic_cmpxchgv(&mutex
->ulock
.uval
, state
, new_state
,
1080 int rc
= __ulock_wait(wait_op
, &mutex
->ulock
, new_state
, 0);
1082 PTHREAD_TRACE(ulmutex_lock_wait
, mutex
, new_state
, rc
, 0);
1084 if (os_unlikely(rc
< 0)) {
1093 PTHREAD_INTERNAL_CRASH(rc
, "ulock_wait failure");
1095 } else if (rc
> 0) {
1096 kernel_waiters
= true;
1099 state
= os_atomic_load(&mutex
->ulock
.uval
, relaxed
);
1100 } while (state
!= _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE
);
1102 uint32_t locked_state
= self_ownerval
;
1103 if (kernel_waiters
) {
1104 locked_state
|= _PTHREAD_MUTEX_ULOCK_WAITERS_BIT
;
1107 success
= os_atomic_cmpxchgv(&mutex
->ulock
.uval
, state
, locked_state
,
1110 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t
*)mutex
, BLOCK_SUCCESS_PLOCKSTAT
);
1115 PTHREAD_NOEXPORT_VARIANT
1117 _pthread_mutex_ulock_lock(pthread_mutex_t
*mutex
, bool trylock
)
1119 uint32_t unlocked
= _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE
;
1120 uint32_t locked
= _pthread_mutex_ulock_self_owner_value();
1123 bool success
= os_atomic_cmpxchgv(&mutex
->ulock
.uval
, unlocked
, locked
,
1127 PTHREAD_TRACE(ulmutex_trylock
, mutex
, locked
, state
, success
);
1129 PTHREAD_TRACE(ulmutex_lock
, mutex
, locked
, state
, success
);
1137 rc
= _pthread_mutex_ulock_lock_slow(mutex
, locked
, state
);
1142 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, rc
);
1144 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t
*)mutex
, /* recursive */ 0, 0);
1152 _pthread_mutex_ulock_unlock_slow(pthread_mutex_t
*mutex
, uint32_t self_ownerval
,
1153 uint32_t orig_state
)
1155 if (os_unlikely(orig_state
== _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE
)) {
1156 // XXX This is illegal, but psynch permitted it...
1157 // TODO: PTHREAD_STRICT candidate
1161 uint32_t wake_flags
= 0;
1163 uint32_t orig_ownerval
= orig_state
& _PTHREAD_MUTEX_ULOCK_OWNER_MASK
;
1164 bool orig_waiters
= orig_state
& _PTHREAD_MUTEX_ULOCK_WAITERS_BIT
;
1165 if (os_unlikely(orig_ownerval
!= self_ownerval
)) {
1166 // XXX This is illegal, but psynch permitted it...
1167 // TODO: PTHREAD_STRICT candidate
1168 if (!orig_waiters
) {
1172 wake_flags
|= ULF_WAKE_ALLOW_NON_OWNER
;
1173 } else if (os_unlikely(!orig_waiters
)) {
1174 PTHREAD_INTERNAL_CRASH(0, "unlock_slow without orig_waiters");
1178 int rc
= __ulock_wake(UL_UNFAIR_LOCK
| ULF_NO_ERRNO
| wake_flags
,
1181 PTHREAD_TRACE(ulmutex_unlock_wake
, mutex
, rc
, 0, 0);
1183 if (os_unlikely(rc
< 0)) {
1190 PTHREAD_INTERNAL_CRASH(-rc
, "ulock_wake failure");
1199 PTHREAD_NOEXPORT_VARIANT
1201 _pthread_mutex_ulock_unlock(pthread_mutex_t
*mutex
)
1203 uint32_t locked_uncontended
= _pthread_mutex_ulock_self_owner_value();
1204 uint32_t unlocked
= _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE
;
1205 uint32_t state
= os_atomic_xchg(&mutex
->ulock
.uval
, unlocked
, release
);
1207 PTHREAD_TRACE(ulmutex_unlock
, mutex
, locked_uncontended
, state
, 0);
1210 if (state
!= locked_uncontended
) {
1211 rc
= _pthread_mutex_ulock_unlock_slow(mutex
, locked_uncontended
,
1216 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, rc
);
1218 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, /* recursive */ 0);
1224 #pragma mark firstfit
1228 _pthread_mutex_firstfit_unlock_updatebits(pthread_mutex_t
*mutex
,
1229 uint32_t *flagsp
, uint32_t **mutexp
, uint32_t *lvalp
, uint32_t *uvalp
)
1231 uint32_t flags
= mutex
->mtxopts
.value
& ~_PTHREAD_MTX_OPT_NOTIFY
;
1235 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1237 mutex_seq oldseq
, newseq
;
1238 mutex_seq_load(seqaddr
, &oldseq
);
1241 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1244 int res
= _pthread_mutex_unlock_handle_options(mutex
, tidaddr
);
1246 // Valid recursive unlock
1250 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
1252 } else if (res
< 0) {
1253 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, -res
);
1259 oldtid
= os_atomic_load_wide(tidaddr
, relaxed
);
1260 // More than one kernel waiter means we need to do a wake.
1261 kernel_wake
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
) > 0;
1262 newseq
.lgenval
&= ~PTH_RWL_EBIT
;
1265 // Going to the kernel post-unlock removes a single waiter unlock
1266 // from the mutex counts.
1267 newseq
.ugenval
+= PTHRW_INC
;
1271 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, 0, relaxed
)) {
1272 return _pthread_mutex_corruption_abort(mutex
);
1275 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, release
));
1277 PTHREAD_TRACE(psynch_ffmutex_unlock_updatebits
, mutex
, oldseq
.lgenval
,
1278 newseq
.lgenval
, newseq
.ugenval
);
1281 // We choose to return this out via flags because the condition
1282 // variable also uses this to determine whether to do a kernel wake
1283 // when beginning a cvwait.
1284 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
1287 *lvalp
= newseq
.lgenval
;
1290 *uvalp
= newseq
.ugenval
;
1293 *mutexp
= (uint32_t *)mutex
;
1303 _pthread_mutex_firstfit_wake(pthread_mutex_t
*mutex
, mutex_seq newseq
,
1306 PTHREAD_TRACE(psynch_ffmutex_wake
, mutex
, newseq
.lgenval
, newseq
.ugenval
,
1308 int res
= __psynch_mutexdrop(mutex
, newseq
.lgenval
, newseq
.ugenval
, 0,
1317 PTHREAD_INTERNAL_CRASH(res
, "__psynch_mutexdrop failed");
1326 _pthread_mutex_firstfit_unlock_slow(pthread_mutex_t
*mutex
)
1332 res
= _pthread_mutex_firstfit_unlock_updatebits(mutex
, &flags
, NULL
,
1333 &newseq
.lgenval
, &newseq
.ugenval
);
1334 if (res
!= 0) return res
;
1336 if (flags
& _PTHREAD_MTX_OPT_NOTIFY
) {
1337 return _pthread_mutex_firstfit_wake(mutex
, newseq
, flags
);
1344 _pthread_mutex_firstfit_lock_updatebits(pthread_mutex_t
*mutex
, uint64_t selfid
,
1350 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1352 mutex_seq oldseq
, newseq
;
1353 mutex_seq_load(seqaddr
, &oldseq
);
1356 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1358 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_START
, mutex
,
1359 oldseq
.lgenval
, oldseq
.ugenval
, 0);
1363 gotlock
= is_rwl_ebit_clear(oldseq
.lgenval
);
1366 // If we see the E-bit cleared, we should just attempt to take it.
1367 newseq
.lgenval
|= PTH_RWL_EBIT
;
1369 // If we failed to get the lock then we need to put ourselves back
1370 // in the queue of waiters. The previous unlocker that woke us out
1371 // of the kernel consumed the S-count for our previous wake. So
1372 // take another ticket on L and go back in the kernel to sleep.
1373 newseq
.lgenval
+= PTHRW_INC
;
1375 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
1378 os_atomic_store_wide(tidaddr
, selfid
, relaxed
);
1381 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_END
, mutex
,
1382 newseq
.lgenval
, newseq
.ugenval
, 0);
1392 _pthread_mutex_firstfit_lock_wait(pthread_mutex_t
*mutex
, mutex_seq newseq
,
1396 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1397 uint64_t selfid
= _pthread_threadid_self_np_direct();
1399 PLOCKSTAT_MUTEX_BLOCK((pthread_mutex_t
*)mutex
);
1403 PTHREAD_TRACE(psynch_ffmutex_wait
| DBG_FUNC_START
, mutex
,
1404 newseq
.lgenval
, newseq
.ugenval
, mutex
->mtxopts
.value
);
1405 uval
= __psynch_mutexwait(mutex
, newseq
.lgenval
, newseq
.ugenval
,
1406 oldtid
, mutex
->mtxopts
.value
);
1407 PTHREAD_TRACE(psynch_ffmutex_wait
| DBG_FUNC_END
, mutex
,
1409 oldtid
= os_atomic_load_wide(tidaddr
, relaxed
);
1410 } while (uval
== (uint32_t)-1);
1411 } while (!_pthread_mutex_firstfit_lock_updatebits(mutex
, selfid
, &newseq
));
1412 PLOCKSTAT_MUTEX_BLOCKED((pthread_mutex_t
*)mutex
, BLOCK_SUCCESS_PLOCKSTAT
);
1419 _pthread_mutex_firstfit_lock_slow(pthread_mutex_t
*mutex
, bool trylock
)
1421 int res
, recursive
= 0;
1424 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1426 mutex_seq oldseq
, newseq
;
1427 mutex_seq_load(seqaddr
, &oldseq
);
1430 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1431 uint64_t oldtid
, selfid
= _pthread_threadid_self_np_direct();
1433 res
= _pthread_mutex_lock_handle_options(mutex
, trylock
, tidaddr
);
1438 } else if (res
< 0) {
1443 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_START
, mutex
,
1444 oldseq
.lgenval
, oldseq
.ugenval
, 0);
1449 oldtid
= os_atomic_load_wide(tidaddr
, relaxed
);
1451 gotlock
= is_rwl_ebit_clear(oldseq
.lgenval
);
1452 if (trylock
&& !gotlock
) {
1453 // We still want to perform the CAS here, even though it won't
1454 // do anything so that it fails if someone unlocked while we were
1456 } else if (gotlock
) {
1457 // In first-fit, getting the lock simply adds the E-bit
1458 newseq
.lgenval
|= PTH_RWL_EBIT
;
1460 // Failed to get the lock, increment the L-val and go to
1461 // the kernel to sleep
1462 newseq
.lgenval
+= PTHRW_INC
;
1464 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
1466 PTHREAD_TRACE(psynch_ffmutex_lock_updatebits
| DBG_FUNC_END
, mutex
,
1467 newseq
.lgenval
, newseq
.ugenval
, 0);
1470 os_atomic_store_wide(tidaddr
, selfid
, relaxed
);
1472 PTHREAD_TRACE(psynch_mutex_ulock
, mutex
, newseq
.lgenval
,
1473 newseq
.ugenval
, selfid
);
1474 } else if (trylock
) {
1476 PTHREAD_TRACE(psynch_mutex_utrylock_failed
, mutex
, newseq
.lgenval
,
1477 newseq
.ugenval
, oldtid
);
1479 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_START
, mutex
,
1480 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
1481 res
= _pthread_mutex_firstfit_lock_wait(mutex
, newseq
, oldtid
);
1482 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_END
, mutex
,
1483 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
1486 if (res
== 0 && _pthread_mutex_is_recursive(mutex
)) {
1487 mutex
->mtxopts
.options
.lock_count
= 1;
1493 PLOCKSTAT_MUTEX_ACQUIRE((pthread_mutex_t
*)mutex
, recursive
, 0);
1495 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, res
);
1501 #pragma mark fast path
1505 _pthread_mutex_droplock(pthread_mutex_t
*mutex
, uint32_t *flagsp
,
1506 uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
1508 if (_pthread_mutex_is_fairshare(mutex
)) {
1509 return _pthread_mutex_fairshare_unlock_updatebits(mutex
, flagsp
,
1510 pmtxp
, mgenp
, ugenp
);
1512 return _pthread_mutex_firstfit_unlock_updatebits(mutex
, flagsp
, pmtxp
,
1518 _pthread_mutex_lock_init_slow(pthread_mutex_t
*mutex
, bool trylock
)
1522 res
= _pthread_mutex_check_init(mutex
);
1523 if (res
!= 0) return res
;
1525 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1526 return _pthread_mutex_fairshare_lock_slow(mutex
, trylock
);
1527 } else if (os_unlikely(_pthread_mutex_uses_ulock(mutex
))) {
1528 return _pthread_mutex_ulock_lock(mutex
, trylock
);
1530 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1535 _pthread_mutex_unlock_init_slow(pthread_mutex_t
*mutex
)
1539 // Initialize static mutexes for compatibility with misbehaving
1540 // applications (unlock should not be the first operation on a mutex).
1541 res
= _pthread_mutex_check_init(mutex
);
1542 if (res
!= 0) return res
;
1544 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1545 return _pthread_mutex_fairshare_unlock_slow(mutex
);
1546 } else if (os_unlikely(_pthread_mutex_uses_ulock(mutex
))) {
1547 return _pthread_mutex_ulock_unlock(mutex
);
1549 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1552 PTHREAD_NOEXPORT_VARIANT
1554 pthread_mutex_unlock(pthread_mutex_t
*mutex
)
1556 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
1557 return _pthread_mutex_unlock_init_slow(mutex
);
1560 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1561 return _pthread_mutex_fairshare_unlock(mutex
);
1564 if (os_unlikely(_pthread_mutex_uses_ulock(mutex
))) {
1565 return _pthread_mutex_ulock_unlock(mutex
);
1568 #if ENABLE_USERSPACE_TRACE
1569 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1571 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1572 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1577 * This is the first-fit fast path. The fairshare fast-ish path is in
1578 * _pthread_mutex_firstfit_unlock()
1581 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1584 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1586 mutex_seq oldseq
, newseq
;
1587 mutex_seq_load(seqaddr
, &oldseq
);
1589 // We're giving up the mutex one way or the other, so go ahead and
1590 // update the owner to 0 so that once the CAS below succeeds, there
1591 // is no stale ownership information. If the CAS of the seqaddr
1592 // fails, we may loop, but it's still valid for the owner to be
1594 os_atomic_store_wide(tidaddr
, 0, relaxed
);
1599 if (diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
) == 0) {
1600 // No outstanding waiters in kernel, we can simply drop the E-bit
1602 newseq
.lgenval
&= ~PTH_RWL_EBIT
;
1604 return _pthread_mutex_firstfit_unlock_slow(mutex
);
1606 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1614 _pthread_mutex_firstfit_lock(pthread_mutex_t
*mutex
, bool trylock
)
1617 * This is the first-fit fast path. The fairshare fast-ish path is in
1618 * _pthread_mutex_fairshare_lock()
1621 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1622 uint64_t selfid
= _pthread_threadid_self_np_direct();
1625 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1627 mutex_seq oldseq
, newseq
;
1628 mutex_seq_load(seqaddr
, &oldseq
);
1630 if (os_unlikely(!trylock
&& (oldseq
.lgenval
& PTH_RWL_EBIT
))) {
1631 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1637 gotlock
= is_rwl_ebit_clear(oldseq
.lgenval
);
1639 if (trylock
&& !gotlock
) {
1641 // The sequence load is atomic, so we can bail here without writing
1642 // it and avoid some unnecessary coherence traffic - rdar://57259033
1643 os_atomic_thread_fence(acquire
);
1646 // A trylock on a held lock will fail immediately. But since
1647 // we did not load the sequence words atomically, perform a
1648 // no-op CAS64 to ensure that nobody has unlocked concurrently.
1650 } else if (os_likely(gotlock
)) {
1651 // In first-fit, getting the lock simply adds the E-bit
1652 newseq
.lgenval
|= PTH_RWL_EBIT
;
1654 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1656 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1659 if (os_likely(gotlock
)) {
1660 os_atomic_store_wide(tidaddr
, selfid
, relaxed
);
1662 } else if (trylock
) {
1671 _pthread_mutex_lock(pthread_mutex_t
*mutex
, bool trylock
)
1673 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
1674 return _pthread_mutex_lock_init_slow(mutex
, trylock
);
1677 if (os_unlikely(_pthread_mutex_is_fairshare(mutex
))) {
1678 return _pthread_mutex_fairshare_lock(mutex
, trylock
);
1681 if (os_unlikely(_pthread_mutex_uses_ulock(mutex
))) {
1682 return _pthread_mutex_ulock_lock(mutex
, trylock
);
1685 #if ENABLE_USERSPACE_TRACE
1686 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1688 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
1689 return _pthread_mutex_firstfit_lock_slow(mutex
, trylock
);
1693 return _pthread_mutex_firstfit_lock(mutex
, trylock
);
1696 PTHREAD_NOEXPORT_VARIANT
1698 pthread_mutex_lock(pthread_mutex_t
*mutex
)
1700 return _pthread_mutex_lock(mutex
, false);
1703 PTHREAD_NOEXPORT_VARIANT
1705 pthread_mutex_trylock(pthread_mutex_t
*mutex
)
1707 return _pthread_mutex_lock(mutex
, true);
1713 _pthread_mutex_init(pthread_mutex_t
*mutex
, const pthread_mutexattr_t
*attr
,
1714 uint32_t static_type
)
1716 mutex
->mtxopts
.value
= 0;
1717 mutex
->mtxopts
.options
.mutex
= 1;
1719 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1722 mutex
->prioceiling
= (int16_t)attr
->prioceiling
;
1723 mutex
->mtxopts
.options
.protocol
= attr
->protocol
;
1724 mutex
->mtxopts
.options
.policy
= attr
->opt
;
1725 mutex
->mtxopts
.options
.type
= attr
->type
;
1726 mutex
->mtxopts
.options
.pshared
= attr
->pshared
;
1728 switch (static_type
) {
1730 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_ERRORCHECK
;
1733 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_RECURSIVE
;
1736 /* firstfit fall thru */
1738 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_DEFAULT
;
1744 mutex
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
1745 mutex
->mtxopts
.options
.protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
1746 if (static_type
!= 3) {
1747 mutex
->mtxopts
.options
.policy
= __pthread_mutex_default_opt_policy
;
1749 mutex
->mtxopts
.options
.policy
= _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
;
1751 mutex
->mtxopts
.options
.pshared
= _PTHREAD_DEFAULT_PSHARED
;
1754 mutex
->priority
= 0;
1757 long sig
= _PTHREAD_MUTEX_SIG
;
1758 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
&&
1759 (_pthread_mutex_is_fairshare(mutex
) ||
1760 _pthread_mutex_is_firstfit(mutex
))) {
1761 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1762 sig
= _PTHREAD_MUTEX_SIG_fast
;
1765 // Criteria for ulock eligility:
1766 // - not ERRORCHECK or RECURSIVE
1768 // - not PROCESS_SHARED
1769 // - checkfix for rdar://21813573 not active
1771 // All of these should be addressed eventually.
1772 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
&&
1773 mutex
->mtxopts
.options
.policy
== _PTHREAD_MTX_OPT_POLICY_FIRSTFIT
&&
1774 mutex
->mtxopts
.options
.pshared
== PTHREAD_PROCESS_PRIVATE
&&
1775 sig
== _PTHREAD_MUTEX_SIG_fast
) {
1776 mutex
->mtxopts
.options
.ulock
= __pthread_mutex_use_ulock
;
1778 mutex
->mtxopts
.options
.ulock
= false;
1781 if (mutex
->mtxopts
.options
.ulock
) {
1782 #if PTHREAD_MUTEX_INIT_UNUSED
1783 __builtin_memset(&mutex
->psynch
, 0xff, sizeof(mutex
->psynch
));
1784 #endif // PTHREAD_MUTEX_INIT_UNUSED
1786 mutex
->ulock
= _PTHREAD_MUTEX_ULOCK_UNLOCKED
;
1789 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1792 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1794 #if PTHREAD_MUTEX_INIT_UNUSED
1795 if ((uint32_t*)tidaddr
!= mutex
->psynch
.m_tid
) {
1796 // TODO: PTHREAD_STRICT candidate
1797 mutex
->mtxopts
.options
.misalign
= 1;
1798 __builtin_memset(mutex
->psynch
.m_tid
, 0xff,
1799 sizeof(mutex
->psynch
.m_tid
));
1801 __builtin_memset(mutex
->psynch
.m_mis
, 0xff, sizeof(mutex
->psynch
.m_mis
));
1802 #endif // PTHREAD_MUTEX_INIT_UNUSED
1804 *seqaddr
= (mutex_seq
){ };
1807 #if PTHREAD_MUTEX_INIT_UNUSED
1808 // For detecting copied mutexes and smashes during debugging
1809 uint32_t sig32
= (uint32_t)sig
;
1810 #if defined(__LP64__)
1811 uintptr_t guard
= ~(uintptr_t)mutex
; // use ~ to hide from leaks
1812 __builtin_memcpy(mutex
->_reserved
, &guard
, sizeof(guard
));
1813 mutex
->_reserved
[2] = sig32
;
1814 mutex
->_reserved
[3] = sig32
;
1815 mutex
->_pad
= sig32
;
1817 mutex
->_reserved
[0] = sig32
;
1819 #endif // PTHREAD_MUTEX_INIT_UNUSED
1821 // Ensure all contents are properly set before setting signature.
1822 #if defined(__LP64__)
1823 // For binary compatibility reasons we cannot require natural alignment of
1824 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1825 uint32_t *sig32_ptr
= (uint32_t*)&mutex
->sig
;
1826 uint32_t *sig32_val
= (uint32_t*)&sig
;
1827 *(sig32_ptr
+ 1) = *(sig32_val
+ 1);
1828 os_atomic_store(sig32_ptr
, *sig32_val
, release
);
1830 os_atomic_store(&mutex
->sig
, sig
, release
);
1836 PTHREAD_NOEXPORT_VARIANT
1838 pthread_mutex_destroy(pthread_mutex_t
*mutex
)
1842 _pthread_lock_lock(&mutex
->lock
);
1843 if (_pthread_mutex_check_signature(mutex
)) {
1844 // TODO: PTHREAD_STRICT candidate
1847 if (_pthread_mutex_uses_ulock(mutex
) &&
1848 mutex
->ulock
.uval
== _PTHREAD_MUTEX_ULOCK_UNLOCKED_VALUE
) {
1852 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1855 mutex_seq_load(seqaddr
, &seq
);
1858 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1860 if ((os_atomic_load_wide(tidaddr
, relaxed
) == 0) &&
1861 (seq
.lgenval
& PTHRW_COUNT_MASK
) ==
1862 (seq
.ugenval
& PTHRW_COUNT_MASK
)) {
1866 } else if (_pthread_mutex_check_signature_init(mutex
)) {
1871 mutex
->sig
= _PTHREAD_NO_SIG
;
1874 _pthread_lock_unlock(&mutex
->lock
);
1879 #endif /* !BUILDING_VARIANT ] */
1882 * Destroy a mutex attribute structure.
1885 pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
1887 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1891 attr
->sig
= _PTHREAD_NO_SIG
;