2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
50 * -- Mutex variable support
54 #include "kern/kern_trace.h"
55 #include <sys/syscall.h>
58 #include "plockstat.h"
59 #else /* !PLOCKSTAT */
60 #define PLOCKSTAT_MUTEX_SPIN(x)
61 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
62 #define PLOCKSTAT_MUTEX_ERROR(x, y)
63 #define PLOCKSTAT_MUTEX_BLOCK(x)
64 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
65 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
66 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
67 #endif /* PLOCKSTAT */
69 extern int __unix_conforming
;
71 #ifndef BUILDING_VARIANT
72 PTHREAD_NOEXPORT
int __mtx_markprepost(_pthread_mutex
*mutex
, uint32_t oupdateval
, int firstfit
);
73 #endif /* BUILDING_VARIANT */
75 #define DEBUG_TRACE_POINTS 0
77 #if DEBUG_TRACE_POINTS
78 extern int __syscall(int number
, ...);
79 #define DEBUG_TRACE(x, a, b, c, d) __syscall(SYS_kdebug_trace, TRACE_##x, a, b, c, d)
81 #define DEBUG_TRACE(x, a, b, c, d) do { } while(0)
84 #include <machine/cpu_capabilities.h>
86 static int _pthread_mutex_init(_pthread_mutex
*mutex
, const pthread_mutexattr_t
*attr
, uint32_t static_type
);
88 #if !__LITTLE_ENDIAN__
89 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words
93 MUTEX_GETSEQ_ADDR(_pthread_mutex
*mutex
,
94 volatile uint64_t **seqaddr
)
96 if (mutex
->mtxopts
.options
.misalign
) {
97 *seqaddr
= (volatile uint64_t *)&mutex
->m_seq
[1];
99 *seqaddr
= (volatile uint64_t *)&mutex
->m_seq
[0];
104 MUTEX_GETTID_ADDR(_pthread_mutex
*mutex
,
105 volatile uint64_t **tidaddr
)
107 if (mutex
->mtxopts
.options
.misalign
) {
108 *tidaddr
= (volatile uint64_t *)&mutex
->m_tid
[1];
110 *tidaddr
= (volatile uint64_t *)&mutex
->m_tid
[0];
114 #ifndef BUILDING_VARIANT /* [ */
116 #define BLOCK_FAIL_PLOCKSTAT 0
117 #define BLOCK_SUCCESS_PLOCKSTAT 1
119 /* This function is never called and exists to provide never-fired dtrace
120 * probes so that user d scripts don't get errors.
122 __private_extern__
__attribute__((used
)) void
123 _plockstat_never_fired(void)
125 PLOCKSTAT_MUTEX_SPIN(NULL
);
126 PLOCKSTAT_MUTEX_SPUN(NULL
, 0, 0);
131 * Initialize a mutex variable, possibly with additional attributes.
132 * Public interface - so don't trust the lock - initialize it first.
135 pthread_mutex_init(pthread_mutex_t
*omutex
, const pthread_mutexattr_t
*attr
)
138 /* conformance tests depend on not having this behavior */
139 /* The test for this behavior is optional */
140 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
)
143 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
144 LOCK_INIT(mutex
->lock
);
145 return (_pthread_mutex_init(mutex
, attr
, 0x7));
149 pthread_mutex_getprioceiling(const pthread_mutex_t
*omutex
, int *prioceiling
)
152 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
153 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
) {
155 *prioceiling
= mutex
->prioceiling
;
163 pthread_mutex_setprioceiling(pthread_mutex_t
*omutex
, int prioceiling
, int *old_prioceiling
)
166 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
167 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
) {
169 if (prioceiling
>= -999 || prioceiling
<= 999) {
170 *old_prioceiling
= mutex
->prioceiling
;
171 mutex
->prioceiling
= prioceiling
;
180 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t
*attr
, int *prioceiling
)
183 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
184 *prioceiling
= attr
->prioceiling
;
191 pthread_mutexattr_getprotocol(const pthread_mutexattr_t
*attr
, int *protocol
)
194 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
195 *protocol
= attr
->protocol
;
202 pthread_mutexattr_gettype(const pthread_mutexattr_t
*attr
, int *type
)
205 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
213 pthread_mutexattr_getpshared(const pthread_mutexattr_t
*attr
, int *pshared
)
216 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
217 *pshared
= (int)attr
->pshared
;
224 pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
226 attr
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
227 attr
->protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
228 attr
->policy
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
;
229 attr
->type
= PTHREAD_MUTEX_DEFAULT
;
230 attr
->sig
= _PTHREAD_MUTEX_ATTR_SIG
;
231 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
236 pthread_mutexattr_setprioceiling(pthread_mutexattr_t
*attr
, int prioceiling
)
239 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
240 if (prioceiling
>= -999 || prioceiling
<= 999) {
241 attr
->prioceiling
= prioceiling
;
249 pthread_mutexattr_setprotocol(pthread_mutexattr_t
*attr
, int protocol
)
252 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
254 case PTHREAD_PRIO_NONE
:
255 case PTHREAD_PRIO_INHERIT
:
256 case PTHREAD_PRIO_PROTECT
:
257 attr
->protocol
= protocol
;
266 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t
*attr
, int policy
)
269 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
271 case _PTHREAD_MUTEX_POLICY_FAIRSHARE
:
272 case _PTHREAD_MUTEX_POLICY_FIRSTFIT
:
273 attr
->policy
= policy
;
282 pthread_mutexattr_settype(pthread_mutexattr_t
*attr
, int type
)
285 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
287 case PTHREAD_MUTEX_NORMAL
:
288 case PTHREAD_MUTEX_ERRORCHECK
:
289 case PTHREAD_MUTEX_RECURSIVE
:
290 //case PTHREAD_MUTEX_DEFAULT:
307 pthread_yield_np(void)
314 * Temp: till pshared is fixed correctly
317 pthread_mutexattr_setpshared(pthread_mutexattr_t
*attr
, int pshared
)
321 if (__unix_conforming
== 0) {
322 __unix_conforming
= 1;
324 #endif /* __DARWIN_UNIX03 */
326 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
) {
328 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) || (pshared
== PTHREAD_PROCESS_SHARED
))
329 #else /* __DARWIN_UNIX03 */
330 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
331 #endif /* __DARWIN_UNIX03 */
333 attr
->pshared
= pshared
;
341 * Sequence numbers and TID:
343 * In steady (and uncontended) state, an unlocked mutex will
344 * look like A=[L4 U4 TID0]. When it is being locked, it transitions
345 * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex,
346 * the unlock path will then transition to D=[L5 U4 TID0] and then finally
349 * If a contender comes in after B, the mutex will instead transition to E=[L6+KE U4 TID0]
350 * and then F=[L6+KE U4 TID940]. If a contender comes in after C, it will transition to
351 * F=[L6+KE U4 TID940] directly. In both cases, the contender will enter the kernel with either
352 * mutexwait(U4, TID0) or mutexwait(U4, TID940). The first owner will unlock the mutex
353 * by first updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to
354 * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to signal the next waiter
355 * (potentially as a prepost). When the waiter comes out of the kernel, it will update the owner to
356 * I=[L6+KE U5 TID941]. An unlock at this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0].
358 * At various points along these timelines, since the sequence words and TID are written independently,
359 * a thread may get preempted and another thread might see inconsistent data. In the worst case, another
360 * thread may see the TID in the SWITCHING (-1) state or unlocked (0) state for longer because the
361 * owning thread was preempted.
364 * Drop the mutex unlock references from cond_wait. or mutex_unlock.
366 __private_extern__
int
367 __mtx_droplock(_pthread_mutex
*mutex
, uint32_t *flagsp
, uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
)
369 bool firstfit
= (mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
370 uint32_t lgenval
, ugenval
, flags
;
371 uint64_t oldtid
, newtid
;
372 volatile uint64_t *tidaddr
;
373 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
375 flags
= mutex
->mtxopts
.value
;
376 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
; // no notification by default
378 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
379 uint64_t selfid
= _pthread_selfid_direct();
381 if (*tidaddr
!= selfid
) {
382 //PTHREAD_ABORT("dropping recur or error mutex not owned by the thread\n");
383 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, EPERM
);
385 } else if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
&&
386 --mutex
->mtxopts
.options
.lock_count
) {
387 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
388 if (flagsp
!= NULL
) {
395 uint64_t oldval64
, newval64
;
396 volatile uint64_t *seqaddr
;
397 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
399 bool clearprepost
, clearnotify
, spurious
;
403 lgenval
= (uint32_t)oldval64
;
404 ugenval
= (uint32_t)(oldval64
>> 32);
406 clearprepost
= false;
410 int numwaiters
= diff_genseq(lgenval
, ugenval
); // pending waiters
412 if (numwaiters
== 0) {
413 // spurious unlock; do not touch tid
416 ugenval
+= PTHRW_INC
;
418 if ((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
& PTHRW_COUNT_MASK
)) {
419 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked
421 /* do not reset Ibit, just K&E */
422 lgenval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
424 newtid
= 0; // clear owner
427 lgenval
&= ~PTH_RWL_EBIT
; // reset E bit so another can acquire meanwhile
430 newtid
= PTHREAD_MTX_TID_SWITCHING
;
432 // need to signal others waiting for mutex
433 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
436 if (newtid
!= oldtid
) {
437 // We're giving up the mutex one way or the other, so go ahead and update the owner to SWITCHING
438 // or 0 so that once the CAS below succeeds, there is no stale ownership information.
439 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner
441 if (!OSAtomicCompareAndSwap64(oldtid
, newtid
, (volatile int64_t *)tidaddr
)) {
442 // we own this mutex, nobody should be updating it except us
448 if (clearnotify
|| spurious
) {
449 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
;
450 if (firstfit
&& ((lgenval
& PTH_RWL_PBIT
) != 0)) {
452 lgenval
&= ~PTH_RWL_PBIT
;
456 newval64
= (((uint64_t)ugenval
) << 32);
459 } while (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)seqaddr
) != TRUE
);
462 __psynch_cvclrprepost(mutex
, lgenval
, ugenval
, 0, 0, lgenval
, (flags
| _PTHREAD_MTX_OPT_MUTEX
));
472 *pmtxp
= (uint32_t *)mutex
;
474 if (flagsp
!= NULL
) {
482 __mtx_updatebits(_pthread_mutex
*mutex
, uint64_t selfid
)
485 int firstfit
= (mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
488 uint32_t lgenval
, ugenval
;
489 uint64_t oldval64
, newval64
;
490 volatile uint64_t *seqaddr
;
491 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
493 volatile uint64_t *tidaddr
;
494 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
500 lgenval
= (uint32_t)oldval64
;
501 ugenval
= (uint32_t)(oldval64
>> 32);
503 // E bit was set on first pass through the loop but is no longer
504 // set. Apparently we spin until it arrives.
505 // XXX: verify this is desired behavior.
506 } while (isebit
&& (lgenval
& PTH_RWL_EBIT
) == 0);
509 // first fit mutex now has the E bit set. Return 1.
515 isebit
= (lgenval
& PTH_RWL_EBIT
) != 0;
516 } else if ((lgenval
& (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) == (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) {
517 // fairshare mutex and the bits are already set, just update tid
521 // either first fit or no E bit set
523 lgenval
|= PTH_RWL_KBIT
| PTH_RWL_EBIT
;
525 newval64
= (((uint64_t)ugenval
) << 32);
529 // Retry if CAS fails, or if it succeeds with firstfit and E bit already set
530 } while (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)seqaddr
) != TRUE
||
531 (firstfit
&& isebit
));
534 if (!OSAtomicCompareAndSwap64Barrier(oldtid
, selfid
, (volatile int64_t *)tidaddr
)) {
535 // we own this mutex, nobody should be updating it except us
544 __mtx_markprepost(_pthread_mutex
*mutex
, uint32_t updateval
, int firstfit
)
547 uint32_t lgenval
, ugenval
;
548 uint64_t oldval64
, newval64
;
550 volatile uint64_t *seqaddr
;
551 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
553 if (firstfit
!= 0 && (updateval
& PTH_RWL_PBIT
) != 0) {
558 flags
= mutex
->mtxopts
.value
;
561 lgenval
= (uint32_t)oldval64
;
562 ugenval
= (uint32_t)(oldval64
>> 32);
564 /* update the bits */
565 if ((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
& PTHRW_COUNT_MASK
)) {
567 lgenval
&= ~PTH_RWL_PBIT
;
569 lgenval
|= PTH_RWL_PBIT
;
571 newval64
= (((uint64_t)ugenval
) << 32);
573 } while (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)seqaddr
) != TRUE
);
575 if (clearprepost
!= 0) {
576 __psynch_cvclrprepost(mutex
, lgenval
, ugenval
, 0, 0, lgenval
, (flags
| _PTHREAD_MTX_OPT_MUTEX
));
583 _pthread_mutex_check_init_fast(_pthread_mutex
*mutex
)
585 return (mutex
->sig
== _PTHREAD_MUTEX_SIG
);
588 static int __attribute__((noinline
))
589 _pthread_mutex_check_init(pthread_mutex_t
*omutex
)
592 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
594 if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
) {
596 if ((mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
) == _PTHREAD_MUTEX_SIG_CMP
) {
598 if ((mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
) == _PTHREAD_MUTEX_SIG_CMP
) {
599 // initialize a statically initialized mutex to provide
600 // compatibility for misbehaving applications.
601 // (unlock should not be the first operation on a mutex)
602 res
= _pthread_mutex_init(mutex
, NULL
, (mutex
->sig
& 0xf));
603 } else if (mutex
->sig
== _PTHREAD_MUTEX_SIG
) {
609 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
616 _pthread_mutex_lock(pthread_mutex_t
*omutex
, bool trylock
)
619 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
621 if (os_slowpath(!_pthread_mutex_check_init_fast(mutex
))) {
622 res
= _pthread_mutex_check_init(omutex
);
629 volatile uint64_t *tidaddr
;
630 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
631 uint64_t selfid
= _pthread_selfid_direct();
633 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
634 if (*tidaddr
== selfid
) {
635 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
) {
636 if (mutex
->mtxopts
.options
.lock_count
< USHRT_MAX
) {
637 mutex
->mtxopts
.options
.lock_count
++;
638 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 1, 0);
642 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
644 } else if (trylock
) { /* PTHREAD_MUTEX_ERRORCHECK */
645 // <rdar://problem/16261552> as per OpenGroup, trylock cannot
646 // return EDEADLK on a deadlock, it should return EBUSY.
648 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
649 } else { /* PTHREAD_MUTEX_ERRORCHECK */
651 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
657 uint64_t oldval64
, newval64
;
658 volatile uint64_t *seqaddr
;
659 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
661 uint32_t lgenval
, ugenval
;
662 bool gotlock
= false;
667 lgenval
= (uint32_t)oldval64
;
668 ugenval
= (uint32_t)(oldval64
>> 32);
670 gotlock
= ((lgenval
& PTH_RWL_EBIT
) == 0);
672 if (trylock
&& !gotlock
) {
673 // A trylock on a held lock will fail immediately. But since
674 // we did not load the sequence words atomically, perform a
675 // no-op CAS64 to ensure that nobody has unlocked concurrently.
677 // Increment the lock sequence number and force the lock into E+K
678 // mode, whether "gotlock" is true or not.
679 lgenval
+= PTHRW_INC
;
680 lgenval
|= PTH_RWL_EBIT
| PTH_RWL_KBIT
;
683 newval64
= (((uint64_t)ugenval
) << 32);
687 } while (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)seqaddr
) == FALSE
);
690 if (!OSAtomicCompareAndSwap64Barrier(oldtid
, selfid
, (volatile int64_t *)tidaddr
)) {
691 while (!OSAtomicCompareAndSwap64Barrier(*tidaddr
, selfid
, (volatile int64_t *)tidaddr
));
694 DEBUG_TRACE(psynch_mutex_ulock
, omutex
, lgenval
, ugenval
, selfid
);
695 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 0, 0);
696 } else if (trylock
) {
698 DEBUG_TRACE(psynch_mutex_utrylock_failed
, omutex
, lgenval
, ugenval
, oldtid
);
699 PLOCKSTAT_MUTEX_ERROR(omutex
, res
);
701 PLOCKSTAT_MUTEX_BLOCK(omutex
);
705 updateval
= __psynch_mutexwait(omutex
, lgenval
, ugenval
, oldtid
, mutex
->mtxopts
.value
);
707 } while (updateval
== (uint32_t)-1);
709 // returns 0 on succesful update; in firstfit it may fail with 1
710 } while (__mtx_updatebits(mutex
, selfid
) == 1);
712 PLOCKSTAT_MUTEX_BLOCKED(omutex
, BLOCK_SUCCESS_PLOCKSTAT
);
715 if (res
== 0 && mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
) {
716 mutex
->mtxopts
.options
.lock_count
= 1;
719 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 0, 0);
725 pthread_mutex_lock(pthread_mutex_t
*mutex
)
727 return _pthread_mutex_lock(mutex
, false);
731 pthread_mutex_trylock(pthread_mutex_t
*mutex
)
733 return _pthread_mutex_lock(mutex
, true);
738 * TODO: Priority inheritance stuff
741 pthread_mutex_unlock(pthread_mutex_t
*omutex
)
744 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
745 uint32_t mtxgen
, mtxugen
, flags
;
747 // Initialize static mutexes for compatibility with misbehaving
748 // applications (unlock should not be the first operation on a mutex).
749 if (os_slowpath(!_pthread_mutex_check_init_fast(mutex
))) {
750 res
= _pthread_mutex_check_init(omutex
);
756 res
= __mtx_droplock(mutex
, &flags
, NULL
, &mtxgen
, &mtxugen
);
761 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) != 0) {
763 int firstfit
= (mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
764 volatile uint64_t *tidaddr
;
765 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
767 updateval
= __psynch_mutexdrop(omutex
, mtxgen
, mtxugen
, *tidaddr
, flags
);
769 if (updateval
== (uint32_t)-1) {
776 PTHREAD_ABORT("__p_mutexdrop failed with error %d\n", res
);
779 } else if (firstfit
== 1) {
780 if ((updateval
& PTH_RWL_PBIT
) != 0) {
781 __mtx_markprepost(mutex
, updateval
, firstfit
);
785 volatile uint64_t *tidaddr
;
786 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
787 DEBUG_TRACE(psynch_mutex_uunlock
, omutex
, mtxgen
, mtxugen
, *tidaddr
);
794 _pthread_mutex_init(_pthread_mutex
*mutex
, const pthread_mutexattr_t
*attr
, uint32_t static_type
)
797 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
800 mutex
->prioceiling
= attr
->prioceiling
;
801 mutex
->mtxopts
.options
.protocol
= attr
->protocol
;
802 mutex
->mtxopts
.options
.policy
= attr
->policy
;
803 mutex
->mtxopts
.options
.type
= attr
->type
;
804 mutex
->mtxopts
.options
.pshared
= attr
->pshared
;
806 switch (static_type
) {
808 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_ERRORCHECK
;
811 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_RECURSIVE
;
814 /* firstfit fall thru */
816 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_DEFAULT
;
822 mutex
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
823 mutex
->mtxopts
.options
.protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
824 if (static_type
!= 3) {
825 mutex
->mtxopts
.options
.policy
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
;
827 mutex
->mtxopts
.options
.policy
= _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
829 mutex
->mtxopts
.options
.pshared
= _PTHREAD_DEFAULT_PSHARED
;
832 mutex
->mtxopts
.options
.notify
= 0;
833 mutex
->mtxopts
.options
.unused
= 0;
834 mutex
->mtxopts
.options
.hold
= 0;
835 mutex
->mtxopts
.options
.mutex
= 1;
836 mutex
->mtxopts
.options
.lock_count
= 0;
843 mutex
->prioceiling
= 0;
846 mutex
->mtxopts
.options
.misalign
= (((uintptr_t)&mutex
->m_seq
[0]) & 0x7) != 0;
848 // Ensure all contents are properly set before setting signature.
851 mutex
->sig
= _PTHREAD_MUTEX_SIG
;
857 pthread_mutex_destroy(pthread_mutex_t
*omutex
)
859 _pthread_mutex
*mutex
= (_pthread_mutex
*)omutex
;
864 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
) {
865 uint32_t lgenval
, ugenval
;
867 volatile uint64_t *seqaddr
;
868 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
);
869 volatile uint64_t *tidaddr
;
870 MUTEX_GETTID_ADDR(mutex
, &tidaddr
);
873 lgenval
= (uint32_t)oldval64
;
874 ugenval
= (uint32_t)(oldval64
>> 32);
875 if ((*tidaddr
== (uint64_t)0) &&
876 ((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
& PTHRW_COUNT_MASK
))) {
877 mutex
->sig
= _PTHREAD_NO_SIG
;
882 } else if ((mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
) == _PTHREAD_MUTEX_SIG_CMP
) {
883 mutex
->sig
= _PTHREAD_NO_SIG
;
891 #endif /* !BUILDING_VARIANT ] */
894 * Destroy a mutex attribute structure.
897 pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
900 if (__unix_conforming
== 0) {
901 __unix_conforming
= 1;
903 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
) {
906 #endif /* __DARWIN_UNIX03 */
908 attr
->sig
= _PTHREAD_NO_SIG
;