2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
50 * -- Mutex variable support
55 #include "kern/kern_trace.h"
57 extern int __unix_conforming
;
59 #ifndef BUILDING_VARIANT /* [ */
62 #include "plockstat.h"
63 /* This function is never called and exists to provide never-fired dtrace
64 * probes so that user d scripts don't get errors.
66 PTHREAD_NOEXPORT PTHREAD_USED
68 _plockstat_never_fired(void)
70 PLOCKSTAT_MUTEX_SPIN(NULL
);
71 PLOCKSTAT_MUTEX_SPUN(NULL
, 0, 0);
73 #else /* !PLOCKSTAT */
74 #define PLOCKSTAT_MUTEX_SPIN(x)
75 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
76 #define PLOCKSTAT_MUTEX_ERROR(x, y)
77 #define PLOCKSTAT_MUTEX_BLOCK(x)
78 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
79 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
80 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
81 #endif /* PLOCKSTAT */
83 #define BLOCK_FAIL_PLOCKSTAT 0
84 #define BLOCK_SUCCESS_PLOCKSTAT 1
86 #define PTHREAD_MUTEX_INIT_UNUSED 1
88 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
89 int _pthread_mutex_lock_slow(pthread_mutex_t
*omutex
, bool trylock
);
91 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
92 int _pthread_mutex_unlock_slow(pthread_mutex_t
*omutex
);
94 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
95 int _pthread_mutex_corruption_abort(_pthread_mutex
*mutex
);
97 extern int __pthread_mutex_default_policy PTHREAD_NOEXPORT
;
100 int __pthread_mutex_default_policy PTHREAD_NOEXPORT
=
101 _PTHREAD_MUTEX_POLICY_FAIRSHARE
;
105 _pthread_mutex_global_init(const char *envp
[],
106 struct _pthread_registration_data
*registration_data
)
108 const char *envvar
= _simple_getenv(envp
, "PTHREAD_MUTEX_DEFAULT_POLICY");
109 if ((envvar
&& (envvar
[0] - '0') == _PTHREAD_MUTEX_POLICY_FIRSTFIT
) ||
110 (registration_data
->mutex_default_policy
==
111 _PTHREAD_MUTEX_POLICY_FIRSTFIT
)) {
112 __pthread_mutex_default_policy
= _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
118 PTHREAD_ALWAYS_INLINE
119 static inline int _pthread_mutex_init(_pthread_mutex
*mutex
,
120 const pthread_mutexattr_t
*attr
, uint32_t static_type
);
122 typedef union mutex_seq
{
124 struct { uint32_t lgenval
; uint32_t ugenval
; };
125 struct { uint32_t mgen
; uint32_t ugen
; };
127 uint64_t _Atomic atomic_seq_LU
;
130 _Static_assert(sizeof(mutex_seq
) == 2 * sizeof(uint32_t),
131 "Incorrect mutex_seq size");
133 #if !__LITTLE_ENDIAN__
134 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
137 PTHREAD_ALWAYS_INLINE
139 MUTEX_GETSEQ_ADDR(_pthread_mutex
*mutex
, mutex_seq
**seqaddr
)
141 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
142 // We don't require more than byte alignment on OS X. rdar://22278325
143 *seqaddr
= (void *)(((uintptr_t)mutex
->m_seq
+ 0x7ul
) & ~0x7ul
);
146 PTHREAD_ALWAYS_INLINE
148 MUTEX_GETTID_ADDR(_pthread_mutex
*mutex
, uint64_t **tidaddr
)
150 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
151 // We don't require more than byte alignment on OS X. rdar://22278325
152 *tidaddr
= (void*)(((uintptr_t)mutex
->m_tid
+ 0x7ul
) & ~0x7ul
);
155 PTHREAD_ALWAYS_INLINE
157 mutex_seq_load(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
)
159 oldseqval
->seq_LU
= seqaddr
->seq_LU
;
162 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
163 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
165 PTHREAD_ALWAYS_INLINE
167 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
168 mutex_seq
*newseqval
)
170 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
171 newseqval
->seq_LU
, &oldseqval
->seq_LU
, relaxed
);
174 PTHREAD_ALWAYS_INLINE
176 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
177 mutex_seq
*newseqval
)
179 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
180 newseqval
->seq_LU
, &oldseqval
->seq_LU
, acquire
);
183 PTHREAD_ALWAYS_INLINE
185 mutex_seq_atomic_cmpxchgv_release(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
186 mutex_seq
*newseqval
)
188 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
189 newseqval
->seq_LU
, &oldseqval
->seq_LU
, release
);
192 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
193 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
196 * Initialize a mutex variable, possibly with additional attributes.
197 * Public interface - so don't trust the lock - initialize it first.
199 PTHREAD_NOEXPORT_VARIANT
201 pthread_mutex_init(pthread_mutex_t
*omutex
, const pthread_mutexattr_t
*attr
)
204 /* conformance tests depend on not having this behavior */
205 /* The test for this behavior is optional */
206 if (_pthread_mutex_check_signature(mutex
))
209 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
210 _PTHREAD_LOCK_INIT(mutex
->lock
);
211 return (_pthread_mutex_init(mutex
, attr
, 0x7));
214 PTHREAD_NOEXPORT_VARIANT
216 pthread_mutex_getprioceiling(const pthread_mutex_t
*omutex
, int *prioceiling
)
219 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
220 if (_pthread_mutex_check_signature(mutex
)) {
221 _PTHREAD_LOCK(mutex
->lock
);
222 *prioceiling
= mutex
->prioceiling
;
224 _PTHREAD_UNLOCK(mutex
->lock
);
229 PTHREAD_NOEXPORT_VARIANT
231 pthread_mutex_setprioceiling(pthread_mutex_t
*omutex
, int prioceiling
,
232 int *old_prioceiling
)
235 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
236 if (_pthread_mutex_check_signature(mutex
)) {
237 _PTHREAD_LOCK(mutex
->lock
);
238 if (prioceiling
>= -999 && prioceiling
<= 999) {
239 *old_prioceiling
= mutex
->prioceiling
;
240 mutex
->prioceiling
= (int16_t)prioceiling
;
243 _PTHREAD_UNLOCK(mutex
->lock
);
250 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t
*attr
,
254 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
255 *prioceiling
= attr
->prioceiling
;
262 pthread_mutexattr_getprotocol(const pthread_mutexattr_t
*attr
, int *protocol
)
265 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
266 *protocol
= attr
->protocol
;
273 pthread_mutexattr_getpolicy_np(const pthread_mutexattr_t
*attr
, int *policy
)
276 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
277 *policy
= attr
->policy
;
284 pthread_mutexattr_gettype(const pthread_mutexattr_t
*attr
, int *type
)
287 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
295 pthread_mutexattr_getpshared(const pthread_mutexattr_t
*attr
, int *pshared
)
298 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
299 *pshared
= (int)attr
->pshared
;
306 pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
308 attr
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
309 attr
->protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
310 attr
->policy
= __pthread_mutex_default_policy
;
311 attr
->type
= PTHREAD_MUTEX_DEFAULT
;
312 attr
->sig
= _PTHREAD_MUTEX_ATTR_SIG
;
313 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
318 pthread_mutexattr_setprioceiling(pthread_mutexattr_t
*attr
, int prioceiling
)
321 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
322 if (prioceiling
>= -999 && prioceiling
<= 999) {
323 attr
->prioceiling
= prioceiling
;
331 pthread_mutexattr_setprotocol(pthread_mutexattr_t
*attr
, int protocol
)
334 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
336 case PTHREAD_PRIO_NONE
:
337 case PTHREAD_PRIO_INHERIT
:
338 case PTHREAD_PRIO_PROTECT
:
339 attr
->protocol
= protocol
;
348 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t
*attr
, int policy
)
351 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
353 case _PTHREAD_MUTEX_POLICY_FAIRSHARE
:
354 case _PTHREAD_MUTEX_POLICY_FIRSTFIT
:
355 attr
->policy
= policy
;
364 pthread_mutexattr_settype(pthread_mutexattr_t
*attr
, int type
)
367 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
369 case PTHREAD_MUTEX_NORMAL
:
370 case PTHREAD_MUTEX_ERRORCHECK
:
371 case PTHREAD_MUTEX_RECURSIVE
:
372 //case PTHREAD_MUTEX_DEFAULT:
382 pthread_mutexattr_setpshared(pthread_mutexattr_t
*attr
, int pshared
)
386 if (__unix_conforming
== 0) {
387 __unix_conforming
= 1;
389 #endif /* __DARWIN_UNIX03 */
391 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
393 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) ||
394 (pshared
== PTHREAD_PROCESS_SHARED
))
395 #else /* __DARWIN_UNIX03 */
396 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
397 #endif /* __DARWIN_UNIX03 */
399 attr
->pshared
= pshared
;
406 PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
408 _pthread_mutex_corruption_abort(_pthread_mutex
*mutex
)
410 PTHREAD_ABORT("pthread_mutex corruption: mutex owner changed in the "
411 "middle of lock/unlock");
416 * Sequence numbers and TID:
418 * In steady (and uncontended) state, an unlocked mutex will
419 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
420 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
421 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
424 * If a contender comes in after B, the mutex will instead transition to
425 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
426 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
427 * contender will enter the kernel with either mutexwait(U4, TID0) or
428 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
429 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
430 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
431 * signal the next waiter (potentially as a prepost). When the waiter comes out
432 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
433 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
435 * At various points along these timelines, since the sequence words and TID are
436 * written independently, a thread may get preempted and another thread might
437 * see inconsistent data. In the worst case, another thread may see the TID in
438 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
439 * thread was preempted.
443 * Drop the mutex unlock references from cond_wait or mutex_unlock.
445 PTHREAD_ALWAYS_INLINE
447 _pthread_mutex_unlock_updatebits(_pthread_mutex
*mutex
, uint32_t *flagsp
,
448 uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
450 bool firstfit
= (mutex
->mtxopts
.options
.policy
==
451 _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
452 uint32_t flags
= mutex
->mtxopts
.value
;
453 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
; // no notification by default
456 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
458 mutex_seq oldseq
, newseq
;
459 mutex_seq_load(seqaddr
, &oldseq
);
462 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
463 uint64_t oldtid
, newtid
;
465 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
466 uint64_t selfid
= _pthread_selfid_direct();
467 if (os_atomic_load(tidaddr
, relaxed
) != selfid
) {
468 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, EPERM
);
470 } else if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
&&
471 --mutex
->mtxopts
.options
.lock_count
) {
472 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
473 if (flagsp
!= NULL
) {
480 bool clearprepost
, clearnotify
, spurious
;
483 oldtid
= os_atomic_load(tidaddr
, relaxed
);
485 clearprepost
= false;
490 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
491 if (numwaiters
== 0) {
492 // spurious unlock (unlock of unlocked lock)
495 newseq
.ugenval
+= PTHRW_INC
;
497 if ((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
498 (newseq
.ugenval
& PTHRW_COUNT_MASK
)) {
499 // our unlock sequence matches to lock sequence, so if the
500 // CAS is successful, the mutex is unlocked
502 /* do not reset Ibit, just K&E */
503 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
505 newtid
= 0; // clear owner
508 // reset E bit so another can acquire meanwhile
509 newseq
.lgenval
&= ~PTH_RWL_EBIT
;
512 newtid
= PTHREAD_MTX_TID_SWITCHING
;
514 // need to signal others waiting for mutex
515 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
518 if (newtid
!= oldtid
) {
519 // We're giving up the mutex one way or the other, so go ahead
520 // and update the owner to 0 so that once the CAS below
521 // succeeds, there is no stale ownership information. If the
522 // CAS of the seqaddr fails, we may loop, but it's still valid
523 // for the owner to be SWITCHING/0
524 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, newtid
, relaxed
)) {
525 // we own this mutex, nobody should be updating it except us
526 return _pthread_mutex_corruption_abort(mutex
);
531 if (clearnotify
|| spurious
) {
532 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
;
533 if (firstfit
&& (newseq
.lgenval
& PTH_RWL_PBIT
)) {
535 newseq
.lgenval
&= ~PTH_RWL_PBIT
;
538 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, release
));
540 PTHREAD_TRACE(psynch_mutex_unlock_updatebits
, mutex
, oldseq
.lgenval
,
541 newseq
.lgenval
, oldtid
);
544 __psynch_cvclrprepost(mutex
, newseq
.lgenval
, newseq
.ugenval
, 0, 0,
545 newseq
.lgenval
, flags
| _PTHREAD_MTX_OPT_MUTEX
);
549 *mgenp
= newseq
.lgenval
;
552 *ugenp
= newseq
.ugenval
;
555 *pmtxp
= (uint32_t *)mutex
;
557 if (flagsp
!= NULL
) {
564 PTHREAD_NOEXPORT PTHREAD_NOINLINE
566 _pthread_mutex_droplock(_pthread_mutex
*mutex
, uint32_t *flagsp
,
567 uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
569 return _pthread_mutex_unlock_updatebits(mutex
, flagsp
, pmtxp
, mgenp
, ugenp
);
572 PTHREAD_ALWAYS_INLINE
574 _pthread_mutex_lock_updatebits(_pthread_mutex
*mutex
, uint64_t selfid
)
576 bool firstfit
= (mutex
->mtxopts
.options
.policy
==
577 _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
581 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
583 mutex_seq oldseq
, newseq
;
584 mutex_seq_load(seqaddr
, &oldseq
);
587 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
592 oldtid
= os_atomic_load(tidaddr
, relaxed
);
595 // firstfit locks can have the lock stolen out from under a locker
596 // between the unlock from the kernel and this lock path. When this
597 // happens, we still want to set the K bit before leaving the loop
598 // (or notice if the lock unlocks while we try to update).
599 gotlock
= !is_rwl_ebit_set(oldseq
.lgenval
);
600 } else if ((oldseq
.lgenval
& (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) ==
601 (PTH_RWL_KBIT
| PTH_RWL_EBIT
)) {
602 // bit are already set, just update the owner tidaddr
606 newseq
.lgenval
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
607 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
611 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, selfid
, relaxed
)) {
612 // we own this mutex, nobody should be updating it except us
613 return _pthread_mutex_corruption_abort(mutex
);
617 PTHREAD_TRACE(psynch_mutex_lock_updatebits
, mutex
, oldseq
.lgenval
,
618 newseq
.lgenval
, oldtid
);
620 // failing to take the lock in firstfit returns 1 to force the caller
621 // to wait in the kernel
622 return gotlock
? 0 : 1;
627 _pthread_mutex_markprepost(_pthread_mutex
*mutex
, uint32_t updateval
)
630 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
632 mutex_seq oldseq
, newseq
;
633 mutex_seq_load(seqaddr
, &oldseq
);
637 clearprepost
= false;
640 /* update the bits */
641 if ((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
642 (oldseq
.ugenval
& PTHRW_COUNT_MASK
)) {
644 newseq
.lgenval
&= ~PTH_RWL_PBIT
;
646 newseq
.lgenval
|= PTH_RWL_PBIT
;
648 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, relaxed
));
651 __psynch_cvclrprepost(mutex
, newseq
.lgenval
, newseq
.ugenval
, 0, 0,
652 newseq
.lgenval
, mutex
->mtxopts
.value
| _PTHREAD_MTX_OPT_MUTEX
);
660 _pthread_mutex_check_init_slow(pthread_mutex_t
*omutex
)
663 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
665 if (_pthread_mutex_check_signature_init(mutex
)) {
666 _PTHREAD_LOCK(mutex
->lock
);
667 if (_pthread_mutex_check_signature_init(mutex
)) {
668 // initialize a statically initialized mutex to provide
669 // compatibility for misbehaving applications.
670 // (unlock should not be the first operation on a mutex)
671 res
= _pthread_mutex_init(mutex
, NULL
, (mutex
->sig
& 0xf));
672 } else if (_pthread_mutex_check_signature(mutex
)) {
675 _PTHREAD_UNLOCK(mutex
->lock
);
676 } else if (_pthread_mutex_check_signature(mutex
)) {
680 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
685 PTHREAD_ALWAYS_INLINE
687 _pthread_mutex_check_init(pthread_mutex_t
*omutex
)
690 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
692 if (!_pthread_mutex_check_signature(mutex
)) {
693 return _pthread_mutex_check_init_slow(omutex
);
700 _pthread_mutex_lock_wait(pthread_mutex_t
*omutex
, mutex_seq newseq
,
703 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
706 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
707 uint64_t selfid
= _pthread_selfid_direct();
709 PLOCKSTAT_MUTEX_BLOCK(omutex
);
713 updateval
= __psynch_mutexwait(omutex
, newseq
.lgenval
,
714 newseq
.ugenval
, oldtid
, mutex
->mtxopts
.value
);
715 oldtid
= os_atomic_load(tidaddr
, relaxed
);
716 } while (updateval
== (uint32_t)-1);
718 // returns 0 on succesful update; in firstfit it may fail with 1
719 } while (_pthread_mutex_lock_updatebits(mutex
, selfid
) == 1);
720 PLOCKSTAT_MUTEX_BLOCKED(omutex
, BLOCK_SUCCESS_PLOCKSTAT
);
725 PTHREAD_NOEXPORT PTHREAD_NOINLINE
727 _pthread_mutex_lock_slow(pthread_mutex_t
*omutex
, bool trylock
)
729 int res
, recursive
= 0;
730 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
732 res
= _pthread_mutex_check_init(omutex
);
733 if (res
!= 0) return res
;
736 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
738 mutex_seq oldseq
, newseq
;
739 mutex_seq_load(seqaddr
, &oldseq
);
742 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
743 uint64_t oldtid
, selfid
= _pthread_selfid_direct();
745 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
746 if (os_atomic_load(tidaddr
, relaxed
) == selfid
) {
747 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
) {
748 if (mutex
->mtxopts
.options
.lock_count
< USHRT_MAX
) {
749 mutex
->mtxopts
.options
.lock_count
++;
755 } else if (trylock
) { /* PTHREAD_MUTEX_ERRORCHECK */
756 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
757 // return EDEADLK on a deadlock, it should return EBUSY.
759 } else { /* PTHREAD_MUTEX_ERRORCHECK */
769 oldtid
= os_atomic_load(tidaddr
, relaxed
);
771 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
773 if (trylock
&& !gotlock
) {
774 // A trylock on a held lock will fail immediately. But since
775 // we did not load the sequence words atomically, perform a
776 // no-op CAS64 to ensure that nobody has unlocked concurrently.
778 // Increment the lock sequence number and force the lock into E+K
779 // mode, whether "gotlock" is true or not.
780 newseq
.lgenval
+= PTHRW_INC
;
781 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
783 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
785 PTHREAD_TRACE(psynch_mutex_lock_updatebits
, omutex
, oldseq
.lgenval
,
789 os_atomic_store(tidaddr
, selfid
, relaxed
);
791 PTHREAD_TRACE(psynch_mutex_ulock
, omutex
, newseq
.lgenval
,
792 newseq
.ugenval
, selfid
);
793 } else if (trylock
) {
795 PTHREAD_TRACE(psynch_mutex_utrylock_failed
, omutex
, newseq
.lgenval
,
796 newseq
.ugenval
, oldtid
);
798 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_START
, omutex
,
799 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
800 res
= _pthread_mutex_lock_wait(omutex
, newseq
, oldtid
);
801 PTHREAD_TRACE(psynch_mutex_ulock
| DBG_FUNC_END
, omutex
,
802 newseq
.lgenval
, newseq
.ugenval
, oldtid
);
805 if (res
== 0 && mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
) {
806 mutex
->mtxopts
.options
.lock_count
= 1;
812 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, recursive
, 0);
814 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
821 PTHREAD_ALWAYS_INLINE
823 _pthread_mutex_lock(pthread_mutex_t
*omutex
, bool trylock
)
825 #if ENABLE_USERSPACE_TRACE
826 return _pthread_mutex_lock_slow(omutex
, trylock
);
828 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
829 return _pthread_mutex_lock_slow(omutex
, trylock
);
833 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
834 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
835 return _pthread_mutex_lock_slow(omutex
, trylock
);
839 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
840 uint64_t selfid
= _pthread_selfid_direct();
843 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
845 mutex_seq oldseq
, newseq
;
846 mutex_seq_load(seqaddr
, &oldseq
);
848 if (os_unlikely(oldseq
.lgenval
& PTH_RWL_EBIT
)) {
849 return _pthread_mutex_lock_slow(omutex
, trylock
);
856 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
858 if (trylock
&& !gotlock
) {
859 // A trylock on a held lock will fail immediately. But since
860 // we did not load the sequence words atomically, perform a
861 // no-op CAS64 to ensure that nobody has unlocked concurrently.
862 } else if (os_likely(gotlock
)) {
863 // Increment the lock sequence number and force the lock into E+K
864 // mode, whether "gotlock" is true or not.
865 newseq
.lgenval
+= PTHRW_INC
;
866 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
868 return _pthread_mutex_lock_slow(omutex
, trylock
);
870 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
873 if (os_likely(gotlock
)) {
874 os_atomic_store(tidaddr
, selfid
, relaxed
);
876 } else if (trylock
) {
883 PTHREAD_NOEXPORT_VARIANT
885 pthread_mutex_lock(pthread_mutex_t
*mutex
)
887 return _pthread_mutex_lock(mutex
, false);
890 PTHREAD_NOEXPORT_VARIANT
892 pthread_mutex_trylock(pthread_mutex_t
*mutex
)
894 return _pthread_mutex_lock(mutex
, true);
899 * TODO: Priority inheritance stuff
904 _pthread_mutex_unlock_drop(pthread_mutex_t
*omutex
, mutex_seq newseq
,
908 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
913 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
915 PTHREAD_TRACE(psynch_mutex_uunlock
| DBG_FUNC_START
, omutex
, newseq
.lgenval
,
916 newseq
.ugenval
, os_atomic_load(tidaddr
, relaxed
));
918 updateval
= __psynch_mutexdrop(omutex
, newseq
.lgenval
, newseq
.ugenval
,
919 os_atomic_load(tidaddr
, relaxed
), flags
);
921 PTHREAD_TRACE(psynch_mutex_uunlock
| DBG_FUNC_END
, omutex
, updateval
, 0, 0);
923 if (updateval
== (uint32_t)-1) {
930 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res
);
933 } else if ((mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
)
934 && (updateval
& PTH_RWL_PBIT
)) {
935 return _pthread_mutex_markprepost(mutex
, updateval
);
941 PTHREAD_NOEXPORT PTHREAD_NOINLINE
943 _pthread_mutex_unlock_slow(pthread_mutex_t
*omutex
)
946 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
950 // Initialize static mutexes for compatibility with misbehaving
951 // applications (unlock should not be the first operation on a mutex).
952 res
= _pthread_mutex_check_init(omutex
);
953 if (res
!= 0) return res
;
955 res
= _pthread_mutex_unlock_updatebits(mutex
, &flags
, NULL
, &newseq
.lgenval
,
957 if (res
!= 0) return res
;
959 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) != 0) {
960 return _pthread_mutex_unlock_drop(omutex
, newseq
, flags
);
963 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
964 PTHREAD_TRACE(psynch_mutex_uunlock
, omutex
, newseq
.lgenval
,
965 newseq
.ugenval
, os_atomic_load(tidaddr
, relaxed
));
971 PTHREAD_NOEXPORT_VARIANT
973 pthread_mutex_unlock(pthread_mutex_t
*omutex
)
975 #if ENABLE_USERSPACE_TRACE
976 return _pthread_mutex_unlock_slow(omutex
);
978 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED()) {
979 return _pthread_mutex_unlock_slow(omutex
);
982 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
983 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
984 return _pthread_mutex_unlock_slow(omutex
);
988 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
991 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
993 mutex_seq oldseq
, newseq
;
994 mutex_seq_load(seqaddr
, &oldseq
);
996 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
997 if (os_unlikely(numwaiters
== 0)) {
998 // spurious unlock (unlock of unlocked lock)
1002 // We're giving up the mutex one way or the other, so go ahead and
1003 // update the owner to 0 so that once the CAS below succeeds, there
1004 // is no stale ownership information. If the CAS of the seqaddr
1005 // fails, we may loop, but it's still valid for the owner to be
1007 os_atomic_store(tidaddr
, 0, relaxed
);
1011 newseq
.ugenval
+= PTHRW_INC
;
1013 if (os_likely((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
1014 (newseq
.ugenval
& PTHRW_COUNT_MASK
))) {
1015 // our unlock sequence matches to lock sequence, so if the
1016 // CAS is successful, the mutex is unlocked
1018 // do not reset Ibit, just K&E
1019 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
1021 return _pthread_mutex_unlock_slow(omutex
);
1023 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1030 PTHREAD_ALWAYS_INLINE
1032 _pthread_mutex_init(_pthread_mutex
*mutex
, const pthread_mutexattr_t
*attr
,
1033 uint32_t static_type
)
1035 mutex
->mtxopts
.value
= 0;
1036 mutex
->mtxopts
.options
.mutex
= 1;
1038 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1041 mutex
->prioceiling
= (int16_t)attr
->prioceiling
;
1042 mutex
->mtxopts
.options
.protocol
= attr
->protocol
;
1043 mutex
->mtxopts
.options
.policy
= attr
->policy
;
1044 mutex
->mtxopts
.options
.type
= attr
->type
;
1045 mutex
->mtxopts
.options
.pshared
= attr
->pshared
;
1047 switch (static_type
) {
1049 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_ERRORCHECK
;
1052 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_RECURSIVE
;
1055 /* firstfit fall thru */
1057 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_DEFAULT
;
1063 mutex
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
1064 mutex
->mtxopts
.options
.protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
1065 if (static_type
!= 3) {
1066 mutex
->mtxopts
.options
.policy
= __pthread_mutex_default_policy
;
1068 mutex
->mtxopts
.options
.policy
= _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
1070 mutex
->mtxopts
.options
.pshared
= _PTHREAD_DEFAULT_PSHARED
;
1072 mutex
->priority
= 0;
1075 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1078 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1080 #if PTHREAD_MUTEX_INIT_UNUSED
1081 if ((uint32_t*)tidaddr
!= mutex
->m_tid
) {
1082 mutex
->mtxopts
.options
.misalign
= 1;
1083 __builtin_memset(mutex
->m_tid
, 0xff, sizeof(mutex
->m_tid
));
1085 __builtin_memset(mutex
->m_mis
, 0xff, sizeof(mutex
->m_mis
));
1086 #endif // PTHREAD_MUTEX_INIT_UNUSED
1088 *seqaddr
= (mutex_seq
){ };
1090 long sig
= _PTHREAD_MUTEX_SIG
;
1091 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
&&
1092 mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FAIRSHARE
) {
1093 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1094 sig
= _PTHREAD_MUTEX_SIG_fast
;
1097 #if PTHREAD_MUTEX_INIT_UNUSED
1098 // For detecting copied mutexes and smashes during debugging
1099 uint32_t sig32
= (uint32_t)sig
;
1100 #if defined(__LP64__)
1101 uintptr_t guard
= ~(uintptr_t)mutex
; // use ~ to hide from leaks
1102 __builtin_memcpy(mutex
->_reserved
, &guard
, sizeof(guard
));
1103 mutex
->_reserved
[2] = sig32
;
1104 mutex
->_reserved
[3] = sig32
;
1105 mutex
->_pad
= sig32
;
1107 mutex
->_reserved
[0] = sig32
;
1109 #endif // PTHREAD_MUTEX_INIT_UNUSED
1111 // Ensure all contents are properly set before setting signature.
1112 #if defined(__LP64__)
1113 // For binary compatibility reasons we cannot require natural alignment of
1114 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1115 uint32_t *sig32_ptr
= (uint32_t*)&mutex
->sig
;
1116 uint32_t *sig32_val
= (uint32_t*)&sig
;
1117 *(sig32_ptr
+ 1) = *(sig32_val
+ 1);
1118 os_atomic_store(sig32_ptr
, *sig32_val
, release
);
1120 os_atomic_store2o(mutex
, sig
, sig
, release
);
1126 PTHREAD_NOEXPORT_VARIANT
1128 pthread_mutex_destroy(pthread_mutex_t
*omutex
)
1130 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
1134 _PTHREAD_LOCK(mutex
->lock
);
1135 if (_pthread_mutex_check_signature(mutex
)) {
1137 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1140 mutex_seq_load(seqaddr
, &seq
);
1143 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1145 if ((os_atomic_load(tidaddr
, relaxed
) == 0) &&
1146 (seq
.lgenval
& PTHRW_COUNT_MASK
) ==
1147 (seq
.ugenval
& PTHRW_COUNT_MASK
)) {
1148 mutex
->sig
= _PTHREAD_NO_SIG
;
1153 } else if (_pthread_mutex_check_signature_init(mutex
)) {
1154 mutex
->sig
= _PTHREAD_NO_SIG
;
1157 _PTHREAD_UNLOCK(mutex
->lock
);
1162 #endif /* !BUILDING_VARIANT ] */
1165 * Destroy a mutex attribute structure.
1168 pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
1171 if (__unix_conforming
== 0) {
1172 __unix_conforming
= 1;
1174 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1177 #endif /* __DARWIN_UNIX03 */
1179 attr
->sig
= _PTHREAD_NO_SIG
;