2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
50 * -- Mutex variable support
55 #include "kern/kern_trace.h"
57 extern int __unix_conforming
;
59 #ifndef BUILDING_VARIANT /* [ */
62 #include "plockstat.h"
63 /* This function is never called and exists to provide never-fired dtrace
64 * probes so that user d scripts don't get errors.
66 PTHREAD_NOEXPORT PTHREAD_USED
68 _plockstat_never_fired(void)
70 PLOCKSTAT_MUTEX_SPIN(NULL
);
71 PLOCKSTAT_MUTEX_SPUN(NULL
, 0, 0);
73 #else /* !PLOCKSTAT */
74 #define PLOCKSTAT_MUTEX_SPIN(x)
75 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
76 #define PLOCKSTAT_MUTEX_ERROR(x, y)
77 #define PLOCKSTAT_MUTEX_BLOCK(x)
78 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
79 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
80 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
81 #endif /* PLOCKSTAT */
83 #define BLOCK_FAIL_PLOCKSTAT 0
84 #define BLOCK_SUCCESS_PLOCKSTAT 1
86 #define PTHREAD_MUTEX_INIT_UNUSED 1
88 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
89 int _pthread_mutex_lock_slow(pthread_mutex_t
*omutex
, bool trylock
);
91 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
92 int _pthread_mutex_unlock_slow(pthread_mutex_t
*omutex
);
94 PTHREAD_NOEXPORT PTHREAD_WEAK
// prevent inlining of return value into callers
95 int _pthread_mutex_corruption_abort(_pthread_mutex
*mutex
);
99 static inline int _pthread_mutex_init(_pthread_mutex
*mutex
,
100 const pthread_mutexattr_t
*attr
, uint32_t static_type
);
102 #define DEBUG_TRACE_POINTS 0
104 #if DEBUG_TRACE_POINTS
105 #include <sys/kdebug.h>
106 #define DEBUG_TRACE(x, a, b, c, d) kdebug_trace(TRACE_##x, a, b, c, d)
108 #define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
111 typedef union mutex_seq
{
113 struct { uint32_t lgenval
; uint32_t ugenval
; };
114 struct { uint32_t mgen
; uint32_t ugen
; };
116 uint64_t _Atomic atomic_seq_LU
;
119 _Static_assert(sizeof(mutex_seq
) == 2 * sizeof(uint32_t),
120 "Incorrect mutex_seq size");
122 #if !__LITTLE_ENDIAN__
123 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
126 PTHREAD_ALWAYS_INLINE
128 MUTEX_GETSEQ_ADDR(_pthread_mutex
*mutex
, mutex_seq
**seqaddr
)
130 // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex)
131 // We don't require more than byte alignment on OS X. rdar://22278325
132 *seqaddr
= (void *)(((uintptr_t)mutex
->m_seq
+ 0x7ul
) & ~0x7ul
);
135 PTHREAD_ALWAYS_INLINE
137 MUTEX_GETTID_ADDR(_pthread_mutex
*mutex
, uint64_t **tidaddr
)
139 // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex)
140 // We don't require more than byte alignment on OS X. rdar://22278325
141 *tidaddr
= (void*)(((uintptr_t)mutex
->m_tid
+ 0x7ul
) & ~0x7ul
);
144 PTHREAD_ALWAYS_INLINE
146 mutex_seq_load(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
)
148 oldseqval
->seq_LU
= seqaddr
->seq_LU
;
151 PTHREAD_ALWAYS_INLINE
153 mutex_seq_atomic_load_relaxed(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
)
155 oldseqval
->seq_LU
= os_atomic_load(&seqaddr
->atomic_seq_LU
, relaxed
);
158 #define mutex_seq_atomic_load(seqaddr, oldseqval, m) \
159 mutex_seq_atomic_load_##m(seqaddr, oldseqval)
161 PTHREAD_ALWAYS_INLINE
163 mutex_seq_atomic_cmpxchgv_relaxed(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
164 mutex_seq
*newseqval
)
166 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
167 newseqval
->seq_LU
, &oldseqval
->seq_LU
, relaxed
);
170 PTHREAD_ALWAYS_INLINE
172 mutex_seq_atomic_cmpxchgv_acquire(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
173 mutex_seq
*newseqval
)
175 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
176 newseqval
->seq_LU
, &oldseqval
->seq_LU
, acquire
);
179 PTHREAD_ALWAYS_INLINE
181 mutex_seq_atomic_cmpxchgv_release(mutex_seq
*seqaddr
, mutex_seq
*oldseqval
,
182 mutex_seq
*newseqval
)
184 return os_atomic_cmpxchgv(&seqaddr
->atomic_seq_LU
, oldseqval
->seq_LU
,
185 newseqval
->seq_LU
, &oldseqval
->seq_LU
, release
);
188 #define mutex_seq_atomic_cmpxchgv(seqaddr, oldseqval, newseqval, m)\
189 mutex_seq_atomic_cmpxchgv_##m(seqaddr, oldseqval, newseqval)
192 * Initialize a mutex variable, possibly with additional attributes.
193 * Public interface - so don't trust the lock - initialize it first.
195 PTHREAD_NOEXPORT_VARIANT
197 pthread_mutex_init(pthread_mutex_t
*omutex
, const pthread_mutexattr_t
*attr
)
200 /* conformance tests depend on not having this behavior */
201 /* The test for this behavior is optional */
202 if (_pthread_mutex_check_signature(mutex
))
205 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
206 _PTHREAD_LOCK_INIT(mutex
->lock
);
207 return (_pthread_mutex_init(mutex
, attr
, 0x7));
210 PTHREAD_NOEXPORT_VARIANT
212 pthread_mutex_getprioceiling(const pthread_mutex_t
*omutex
, int *prioceiling
)
215 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
216 if (_pthread_mutex_check_signature(mutex
)) {
217 _PTHREAD_LOCK(mutex
->lock
);
218 *prioceiling
= mutex
->prioceiling
;
220 _PTHREAD_UNLOCK(mutex
->lock
);
225 PTHREAD_NOEXPORT_VARIANT
227 pthread_mutex_setprioceiling(pthread_mutex_t
*omutex
, int prioceiling
,
228 int *old_prioceiling
)
231 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
232 if (_pthread_mutex_check_signature(mutex
)) {
233 _PTHREAD_LOCK(mutex
->lock
);
234 if (prioceiling
>= -999 && prioceiling
<= 999) {
235 *old_prioceiling
= mutex
->prioceiling
;
236 mutex
->prioceiling
= (int16_t)prioceiling
;
239 _PTHREAD_UNLOCK(mutex
->lock
);
246 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t
*attr
,
250 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
251 *prioceiling
= attr
->prioceiling
;
258 pthread_mutexattr_getprotocol(const pthread_mutexattr_t
*attr
, int *protocol
)
261 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
262 *protocol
= attr
->protocol
;
269 pthread_mutexattr_gettype(const pthread_mutexattr_t
*attr
, int *type
)
272 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
280 pthread_mutexattr_getpshared(const pthread_mutexattr_t
*attr
, int *pshared
)
283 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
284 *pshared
= (int)attr
->pshared
;
291 pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
293 attr
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
294 attr
->protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
295 attr
->policy
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
;
296 attr
->type
= PTHREAD_MUTEX_DEFAULT
;
297 attr
->sig
= _PTHREAD_MUTEX_ATTR_SIG
;
298 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
303 pthread_mutexattr_setprioceiling(pthread_mutexattr_t
*attr
, int prioceiling
)
306 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
307 if (prioceiling
>= -999 && prioceiling
<= 999) {
308 attr
->prioceiling
= prioceiling
;
316 pthread_mutexattr_setprotocol(pthread_mutexattr_t
*attr
, int protocol
)
319 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
321 case PTHREAD_PRIO_NONE
:
322 case PTHREAD_PRIO_INHERIT
:
323 case PTHREAD_PRIO_PROTECT
:
324 attr
->protocol
= protocol
;
333 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t
*attr
, int policy
)
336 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
338 case _PTHREAD_MUTEX_POLICY_FAIRSHARE
:
339 case _PTHREAD_MUTEX_POLICY_FIRSTFIT
:
340 attr
->policy
= policy
;
349 pthread_mutexattr_settype(pthread_mutexattr_t
*attr
, int type
)
352 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
354 case PTHREAD_MUTEX_NORMAL
:
355 case PTHREAD_MUTEX_ERRORCHECK
:
356 case PTHREAD_MUTEX_RECURSIVE
:
357 //case PTHREAD_MUTEX_DEFAULT:
367 pthread_mutexattr_setpshared(pthread_mutexattr_t
*attr
, int pshared
)
371 if (__unix_conforming
== 0) {
372 __unix_conforming
= 1;
374 #endif /* __DARWIN_UNIX03 */
376 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
378 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) ||
379 (pshared
== PTHREAD_PROCESS_SHARED
))
380 #else /* __DARWIN_UNIX03 */
381 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
382 #endif /* __DARWIN_UNIX03 */
384 attr
->pshared
= pshared
;
391 PTHREAD_NOEXPORT PTHREAD_NOINLINE PTHREAD_NORETURN
393 _pthread_mutex_corruption_abort(_pthread_mutex
*mutex
)
395 PTHREAD_ABORT("pthread_mutex corruption: mutex owner changed in the "
396 "middle of lock/unlock");
401 * Sequence numbers and TID:
403 * In steady (and uncontended) state, an unlocked mutex will
404 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
405 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
406 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
409 * If a contender comes in after B, the mutex will instead transition to
410 * E=[L6+KE U4 TID0] and then F=[L6+KE U4 TID940]. If a contender comes in after
411 * C, it will transition to F=[L6+KE U4 TID940] directly. In both cases, the
412 * contender will enter the kernel with either mutexwait(U4, TID0) or
413 * mutexwait(U4, TID940). The first owner will unlock the mutex by first
414 * updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
415 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to
416 * signal the next waiter (potentially as a prepost). When the waiter comes out
417 * of the kernel, it will update the owner to I=[L6+KE U5 TID941]. An unlock at
418 * this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
420 * At various points along these timelines, since the sequence words and TID are
421 * written independently, a thread may get preempted and another thread might
422 * see inconsistent data. In the worst case, another thread may see the TID in
423 * the SWITCHING (-1) state or unlocked (0) state for longer because the owning
424 * thread was preempted.
428 * Drop the mutex unlock references from cond_wait or mutex_unlock.
430 PTHREAD_ALWAYS_INLINE
432 _pthread_mutex_unlock_updatebits(_pthread_mutex
*mutex
, uint32_t *flagsp
,
433 uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
435 bool firstfit
= (mutex
->mtxopts
.options
.policy
==
436 _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
437 uint32_t flags
= mutex
->mtxopts
.value
;
438 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
; // no notification by default
441 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
443 mutex_seq oldseq
, newseq
;
444 mutex_seq_load(seqaddr
, &oldseq
);
447 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
448 uint64_t oldtid
, newtid
;
450 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
451 uint64_t selfid
= _pthread_selfid_direct();
452 if (os_atomic_load(tidaddr
, relaxed
) != selfid
) {
453 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, EPERM
);
455 } else if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
&&
456 --mutex
->mtxopts
.options
.lock_count
) {
457 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
458 if (flagsp
!= NULL
) {
465 bool clearprepost
, clearnotify
, spurious
;
468 oldtid
= os_atomic_load(tidaddr
, relaxed
);
470 clearprepost
= false;
475 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
476 if (numwaiters
== 0) {
477 // spurious unlock (unlock of unlocked lock)
480 newseq
.ugenval
+= PTHRW_INC
;
482 if ((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
483 (newseq
.ugenval
& PTHRW_COUNT_MASK
)) {
484 // our unlock sequence matches to lock sequence, so if the
485 // CAS is successful, the mutex is unlocked
487 /* do not reset Ibit, just K&E */
488 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
490 newtid
= 0; // clear owner
493 // reset E bit so another can acquire meanwhile
494 newseq
.lgenval
&= ~PTH_RWL_EBIT
;
497 newtid
= PTHREAD_MTX_TID_SWITCHING
;
499 // need to signal others waiting for mutex
500 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
503 if (newtid
!= oldtid
) {
504 // We're giving up the mutex one way or the other, so go ahead
505 // and update the owner to 0 so that once the CAS below
506 // succeeds, there is no stale ownership information. If the
507 // CAS of the seqaddr fails, we may loop, but it's still valid
508 // for the owner to be SWITCHING/0
509 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, newtid
, relaxed
)) {
510 // we own this mutex, nobody should be updating it except us
511 return _pthread_mutex_corruption_abort(mutex
);
516 if (clearnotify
|| spurious
) {
517 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
;
518 if (firstfit
&& (newseq
.lgenval
& PTH_RWL_PBIT
)) {
520 newseq
.lgenval
&= ~PTH_RWL_PBIT
;
523 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, release
));
526 __psynch_cvclrprepost(mutex
, newseq
.lgenval
, newseq
.ugenval
, 0, 0,
527 newseq
.lgenval
, flags
| _PTHREAD_MTX_OPT_MUTEX
);
531 *mgenp
= newseq
.lgenval
;
534 *ugenp
= newseq
.ugenval
;
537 *pmtxp
= (uint32_t *)mutex
;
539 if (flagsp
!= NULL
) {
546 PTHREAD_NOEXPORT PTHREAD_NOINLINE
548 _pthread_mutex_droplock(_pthread_mutex
*mutex
, uint32_t *flagsp
,
549 uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
551 return _pthread_mutex_unlock_updatebits(mutex
, flagsp
, pmtxp
, mgenp
, ugenp
);
554 PTHREAD_ALWAYS_INLINE
556 _pthread_mutex_lock_updatebits(_pthread_mutex
*mutex
, uint64_t selfid
)
559 bool firstfit
= (mutex
->mtxopts
.options
.policy
==
560 _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
561 bool isebit
= false, updated
= false;
564 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
566 mutex_seq oldseq
, newseq
;
567 mutex_seq_load(seqaddr
, &oldseq
);
570 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
574 if (firstfit
&& isebit
&& updated
) {
575 mutex_seq_atomic_load(seqaddr
, &oldseq
, relaxed
);
578 oldtid
= os_atomic_load(tidaddr
, relaxed
);
580 if (isebit
&& !(oldseq
.lgenval
& PTH_RWL_EBIT
)) {
581 // E bit was set on first pass through the loop but is no longer
582 // set. Apparently we spin until it arrives.
583 // XXX: verify this is desired behavior.
588 // first fit mutex now has the E bit set. Return 1.
594 isebit
= (oldseq
.lgenval
& PTH_RWL_EBIT
);
595 } else if ((oldseq
.lgenval
& (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) ==
596 (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) {
597 // fairshare mutex and the bits are already set, just update tid
601 // either first fit or no E bit set
603 newseq
.lgenval
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
605 // Retry if CAS fails, or if it succeeds with firstfit and E bit
607 } while (!(updated
= mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
608 relaxed
)) || (firstfit
&& isebit
));
611 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, selfid
, relaxed
)) {
612 // we own this mutex, nobody should be updating it except us
613 return _pthread_mutex_corruption_abort(mutex
);
622 _pthread_mutex_markprepost(_pthread_mutex
*mutex
, uint32_t updateval
)
625 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
627 mutex_seq oldseq
, newseq
;
628 mutex_seq_load(seqaddr
, &oldseq
);
632 clearprepost
= false;
635 /* update the bits */
636 if ((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
637 (oldseq
.ugenval
& PTHRW_COUNT_MASK
)) {
639 newseq
.lgenval
&= ~PTH_RWL_PBIT
;
641 newseq
.lgenval
|= PTH_RWL_PBIT
;
643 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, relaxed
));
646 __psynch_cvclrprepost(mutex
, newseq
.lgenval
, newseq
.ugenval
, 0, 0,
647 newseq
.lgenval
, mutex
->mtxopts
.value
| _PTHREAD_MTX_OPT_MUTEX
);
655 _pthread_mutex_check_init_slow(pthread_mutex_t
*omutex
)
658 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
660 if (_pthread_mutex_check_signature_init(mutex
)) {
661 _PTHREAD_LOCK(mutex
->lock
);
662 if (_pthread_mutex_check_signature_init(mutex
)) {
663 // initialize a statically initialized mutex to provide
664 // compatibility for misbehaving applications.
665 // (unlock should not be the first operation on a mutex)
666 res
= _pthread_mutex_init(mutex
, NULL
, (mutex
->sig
& 0xf));
667 } else if (_pthread_mutex_check_signature(mutex
)) {
670 _PTHREAD_UNLOCK(mutex
->lock
);
671 } else if (_pthread_mutex_check_signature(mutex
)) {
675 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
680 PTHREAD_ALWAYS_INLINE
682 _pthread_mutex_check_init(pthread_mutex_t
*omutex
)
685 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
687 if (!_pthread_mutex_check_signature(mutex
)) {
688 return _pthread_mutex_check_init_slow(omutex
);
695 _pthread_mutex_lock_wait(pthread_mutex_t
*omutex
, mutex_seq newseq
,
698 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
701 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
702 uint64_t selfid
= _pthread_selfid_direct();
704 PLOCKSTAT_MUTEX_BLOCK(omutex
);
708 updateval
= __psynch_mutexwait(omutex
, newseq
.lgenval
,
709 newseq
.ugenval
, oldtid
, mutex
->mtxopts
.value
);
710 oldtid
= os_atomic_load(tidaddr
, relaxed
);
711 } while (updateval
== (uint32_t)-1);
713 // returns 0 on succesful update; in firstfit it may fail with 1
714 } while (_pthread_mutex_lock_updatebits(mutex
, selfid
) == 1);
715 PLOCKSTAT_MUTEX_BLOCKED(omutex
, BLOCK_SUCCESS_PLOCKSTAT
);
720 PTHREAD_NOEXPORT PTHREAD_NOINLINE
722 _pthread_mutex_lock_slow(pthread_mutex_t
*omutex
, bool trylock
)
724 int res
, recursive
= 0;
725 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
727 res
= _pthread_mutex_check_init(omutex
);
728 if (res
!= 0) return res
;
731 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
733 mutex_seq oldseq
, newseq
;
734 mutex_seq_load(seqaddr
, &oldseq
);
737 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
738 uint64_t oldtid
, selfid
= _pthread_selfid_direct();
740 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
741 if (os_atomic_load(tidaddr
, relaxed
) == selfid
) {
742 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
) {
743 if (mutex
->mtxopts
.options
.lock_count
< USHRT_MAX
) {
744 mutex
->mtxopts
.options
.lock_count
++;
750 } else if (trylock
) { /* PTHREAD_MUTEX_ERRORCHECK */
751 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
752 // return EDEADLK on a deadlock, it should return EBUSY.
754 } else { /* PTHREAD_MUTEX_ERRORCHECK */
764 oldtid
= os_atomic_load(tidaddr
, relaxed
);
766 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
768 if (trylock
&& !gotlock
) {
769 // A trylock on a held lock will fail immediately. But since
770 // we did not load the sequence words atomically, perform a
771 // no-op CAS64 to ensure that nobody has unlocked concurrently.
773 // Increment the lock sequence number and force the lock into E+K
774 // mode, whether "gotlock" is true or not.
775 newseq
.lgenval
+= PTHRW_INC
;
776 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
778 } while (!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
, acquire
));
781 os_atomic_store(tidaddr
, selfid
, relaxed
);
783 DEBUG_TRACE(psynch_mutex_ulock
, omutex
, lgenval
, ugenval
, selfid
);
784 } else if (trylock
) {
786 DEBUG_TRACE(psynch_mutex_utrylock_failed
, omutex
, lgenval
, ugenval
,
789 res
= _pthread_mutex_lock_wait(omutex
, newseq
, oldtid
);
792 if (res
== 0 && mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
) {
793 mutex
->mtxopts
.options
.lock_count
= 1;
799 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, recursive
, 0);
801 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
808 PTHREAD_ALWAYS_INLINE
810 _pthread_mutex_lock(pthread_mutex_t
*omutex
, bool trylock
)
812 #if PLOCKSTAT || DEBUG_TRACE_POINTS
813 if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
814 DEBUG_TRACE_POINTS
) {
815 return _pthread_mutex_lock_slow(omutex
, trylock
);
818 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
819 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
820 return _pthread_mutex_lock_slow(omutex
, trylock
);
824 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
825 uint64_t selfid
= _pthread_selfid_direct();
828 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
830 mutex_seq oldseq
, newseq
;
831 mutex_seq_load(seqaddr
, &oldseq
);
833 if (os_unlikely(oldseq
.lgenval
& PTH_RWL_EBIT
)) {
834 return _pthread_mutex_lock_slow(omutex
, trylock
);
841 gotlock
= ((oldseq
.lgenval
& PTH_RWL_EBIT
) == 0);
843 if (trylock
&& !gotlock
) {
844 // A trylock on a held lock will fail immediately. But since
845 // we did not load the sequence words atomically, perform a
846 // no-op CAS64 to ensure that nobody has unlocked concurrently.
847 } else if (os_likely(gotlock
)) {
848 // Increment the lock sequence number and force the lock into E+K
849 // mode, whether "gotlock" is true or not.
850 newseq
.lgenval
+= PTHRW_INC
;
851 newseq
.lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
853 return _pthread_mutex_lock_slow(omutex
, trylock
);
855 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
858 if (os_likely(gotlock
)) {
859 os_atomic_store(tidaddr
, selfid
, relaxed
);
861 } else if (trylock
) {
868 PTHREAD_NOEXPORT_VARIANT
870 pthread_mutex_lock(pthread_mutex_t
*mutex
)
872 return _pthread_mutex_lock(mutex
, false);
875 PTHREAD_NOEXPORT_VARIANT
877 pthread_mutex_trylock(pthread_mutex_t
*mutex
)
879 return _pthread_mutex_lock(mutex
, true);
884 * TODO: Priority inheritance stuff
889 _pthread_mutex_unlock_drop(pthread_mutex_t
*omutex
, mutex_seq newseq
,
893 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
898 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
900 updateval
= __psynch_mutexdrop(omutex
, newseq
.lgenval
, newseq
.ugenval
,
901 os_atomic_load(tidaddr
, relaxed
), flags
);
903 if (updateval
== (uint32_t)-1) {
910 PTHREAD_ABORT("__psynch_mutexdrop failed with error %d", res
);
913 } else if ((mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
)
914 && (updateval
& PTH_RWL_PBIT
)) {
915 return _pthread_mutex_markprepost(mutex
, updateval
);
921 PTHREAD_NOEXPORT PTHREAD_NOINLINE
923 _pthread_mutex_unlock_slow(pthread_mutex_t
*omutex
)
926 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
930 // Initialize static mutexes for compatibility with misbehaving
931 // applications (unlock should not be the first operation on a mutex).
932 res
= _pthread_mutex_check_init(omutex
);
933 if (res
!= 0) return res
;
935 res
= _pthread_mutex_unlock_updatebits(mutex
, &flags
, NULL
, &newseq
.lgenval
,
937 if (res
!= 0) return res
;
939 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) != 0) {
940 return _pthread_mutex_unlock_drop(omutex
, newseq
, flags
);
943 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
944 DEBUG_TRACE(psynch_mutex_uunlock
, omutex
, mtxgen
, mtxugen
,
945 os_atomic_load(tidaddr
, relaxed
));
951 PTHREAD_NOEXPORT_VARIANT
953 pthread_mutex_unlock(pthread_mutex_t
*omutex
)
955 #if PLOCKSTAT || DEBUG_TRACE_POINTS
956 if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() ||
957 DEBUG_TRACE_POINTS
) {
958 return _pthread_mutex_unlock_slow(omutex
);
961 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
962 if (os_unlikely(!_pthread_mutex_check_signature_fast(mutex
))) {
963 return _pthread_mutex_unlock_slow(omutex
);
967 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
970 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
972 mutex_seq oldseq
, newseq
;
973 mutex_seq_load(seqaddr
, &oldseq
);
975 int numwaiters
= diff_genseq(oldseq
.lgenval
, oldseq
.ugenval
);
976 if (os_unlikely(numwaiters
== 0)) {
977 // spurious unlock (unlock of unlocked lock)
981 // We're giving up the mutex one way or the other, so go ahead and
982 // update the owner to 0 so that once the CAS below succeeds, there
983 // is no stale ownership information. If the CAS of the seqaddr
984 // fails, we may loop, but it's still valid for the owner to be
986 os_atomic_store(tidaddr
, 0, relaxed
);
990 newseq
.ugenval
+= PTHRW_INC
;
992 if (os_likely((oldseq
.lgenval
& PTHRW_COUNT_MASK
) ==
993 (newseq
.ugenval
& PTHRW_COUNT_MASK
))) {
994 // our unlock sequence matches to lock sequence, so if the
995 // CAS is successful, the mutex is unlocked
997 // do not reset Ibit, just K&E
998 newseq
.lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
1000 return _pthread_mutex_unlock_slow(omutex
);
1002 } while (os_unlikely(!mutex_seq_atomic_cmpxchgv(seqaddr
, &oldseq
, &newseq
,
1009 PTHREAD_ALWAYS_INLINE
1011 _pthread_mutex_init(_pthread_mutex
*mutex
, const pthread_mutexattr_t
*attr
,
1012 uint32_t static_type
)
1014 mutex
->mtxopts
.value
= 0;
1015 mutex
->mtxopts
.options
.mutex
= 1;
1017 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1020 mutex
->prioceiling
= (int16_t)attr
->prioceiling
;
1021 mutex
->mtxopts
.options
.protocol
= attr
->protocol
;
1022 mutex
->mtxopts
.options
.policy
= attr
->policy
;
1023 mutex
->mtxopts
.options
.type
= attr
->type
;
1024 mutex
->mtxopts
.options
.pshared
= attr
->pshared
;
1026 switch (static_type
) {
1028 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_ERRORCHECK
;
1031 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_RECURSIVE
;
1034 /* firstfit fall thru */
1036 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_DEFAULT
;
1042 mutex
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
1043 mutex
->mtxopts
.options
.protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
1044 if (static_type
!= 3) {
1045 mutex
->mtxopts
.options
.policy
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
;
1047 mutex
->mtxopts
.options
.policy
= _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
1049 mutex
->mtxopts
.options
.pshared
= _PTHREAD_DEFAULT_PSHARED
;
1051 mutex
->priority
= 0;
1054 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1057 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1059 #if PTHREAD_MUTEX_INIT_UNUSED
1060 if ((uint32_t*)tidaddr
!= mutex
->m_tid
) {
1061 mutex
->mtxopts
.options
.misalign
= 1;
1062 __builtin_memset(mutex
->m_tid
, 0xff, sizeof(mutex
->m_tid
));
1064 __builtin_memset(mutex
->m_mis
, 0xff, sizeof(mutex
->m_mis
));
1065 #endif // PTHREAD_MUTEX_INIT_UNUSED
1067 *seqaddr
= (mutex_seq
){ };
1069 long sig
= _PTHREAD_MUTEX_SIG
;
1070 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_NORMAL
&&
1071 mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FAIRSHARE
) {
1072 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath
1073 sig
= _PTHREAD_MUTEX_SIG_fast
;
1076 #if PTHREAD_MUTEX_INIT_UNUSED
1077 // For detecting copied mutexes and smashes during debugging
1078 uint32_t sig32
= (uint32_t)sig
;
1079 #if defined(__LP64__)
1080 uintptr_t guard
= ~(uintptr_t)mutex
; // use ~ to hide from leaks
1081 __builtin_memcpy(mutex
->_reserved
, &guard
, sizeof(guard
));
1082 mutex
->_reserved
[2] = sig32
;
1083 mutex
->_reserved
[3] = sig32
;
1084 mutex
->_pad
= sig32
;
1086 mutex
->_reserved
[0] = sig32
;
1088 #endif // PTHREAD_MUTEX_INIT_UNUSED
1090 // Ensure all contents are properly set before setting signature.
1091 #if defined(__LP64__)
1092 // For binary compatibility reasons we cannot require natural alignment of
1093 // the 64bit 'sig' long value in the struct. rdar://problem/21610439
1094 uint32_t *sig32_ptr
= (uint32_t*)&mutex
->sig
;
1095 uint32_t *sig32_val
= (uint32_t*)&sig
;
1096 *(sig32_ptr
+ 1) = *(sig32_val
+ 1);
1097 os_atomic_store(sig32_ptr
, *sig32_val
, release
);
1099 os_atomic_store2o(mutex
, sig
, sig
, release
);
1105 PTHREAD_NOEXPORT_VARIANT
1107 pthread_mutex_destroy(pthread_mutex_t
*omutex
)
1109 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
1113 _PTHREAD_LOCK(mutex
->lock
);
1114 if (_pthread_mutex_check_signature(mutex
)) {
1116 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
1119 mutex_seq_load(seqaddr
, &seq
);
1122 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
1124 if ((os_atomic_load(tidaddr
, relaxed
) == 0) &&
1125 (seq
.lgenval
& PTHRW_COUNT_MASK
) ==
1126 (seq
.ugenval
& PTHRW_COUNT_MASK
)) {
1127 mutex
->sig
= _PTHREAD_NO_SIG
;
1132 } else if (_pthread_mutex_check_signature_init(mutex
)) {
1133 mutex
->sig
= _PTHREAD_NO_SIG
;
1136 _PTHREAD_UNLOCK(mutex
->lock
);
1141 #endif /* !BUILDING_VARIANT ] */
1144 * Destroy a mutex attribute structure.
1147 pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
1150 if (__unix_conforming
== 0) {
1151 __unix_conforming
= 1;
1153 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
1156 #endif /* __DARWIN_UNIX03 */
1158 attr
->sig
= _PTHREAD_NO_SIG
;