2 * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991
27 * Permission to use, copy, modify, and distribute this software and
28 * its documentation for any purpose and without fee is hereby granted,
29 * provided that the above copyright notice appears in all copies and
30 * that both the copyright notice and this permission notice appear in
31 * supporting documentation.
33 * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
34 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE.
37 * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
38 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
39 * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
40 * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
41 * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
49 * POSIX Pthread Library
50 * -- Mutex variable support
53 #include "pthread_internals.h"
56 #include "plockstat.h"
57 #else /* !PLOCKSTAT */
58 #define PLOCKSTAT_MUTEX_SPIN(x)
59 #define PLOCKSTAT_MUTEX_SPUN(x, y, z)
60 #define PLOCKSTAT_MUTEX_ERROR(x, y)
61 #define PLOCKSTAT_MUTEX_BLOCK(x)
62 #define PLOCKSTAT_MUTEX_BLOCKED(x, y)
63 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z)
64 #define PLOCKSTAT_MUTEX_RELEASE(x, y)
65 #endif /* PLOCKSTAT */
67 extern int __unix_conforming
;
68 extern int __unix_conforming
;
70 #ifndef BUILDING_VARIANT
71 __private_extern__
int usenew_mtximpl
= 1;
72 static void __pthread_mutex_set_signature(npthread_mutex_t
* mutex
);
73 int __mtx_markprepost(npthread_mutex_t
*mutex
, uint32_t oupdateval
, int firstfit
);
74 static int _pthread_mutex_destroy_locked(pthread_mutex_t
*omutex
);
75 #else /* BUILDING_VARIANT */
76 extern int usenew_mtximpl
;
77 #endif /* BUILDING_VARIANT */
82 extern int _commpage_pthread_mutex_lock(uint32_t * lvalp
, int flags
, uint64_t mtid
, uint32_t mask
, uint64_t * tidp
, int *sysret
);
85 #include <machine/cpu_capabilities.h>
87 int _pthread_mutex_init(pthread_mutex_t
*mutex
, const pthread_mutexattr_t
*attr
, uint32_t static_type
);
91 #define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
93 if (mutex->mtxopts.options.misalign != 0) { \
94 lseqaddr = &mutex->m_seq[0]; \
95 useqaddr = &mutex->m_seq[1]; \
97 lseqaddr = &mutex->m_seq[1]; \
98 useqaddr = &mutex->m_seq[2]; \
102 #define MUTEX_GETSEQ_ADDR(mutex, lseqaddr, useqaddr) \
104 if (mutex->mtxopts.options.misalign != 0) { \
105 lseqaddr = &mutex->m_seq[1]; \
106 useqaddr = &mutex->m_seq[2]; \
108 lseqaddr = &mutex->m_seq[0]; \
109 useqaddr = &mutex->m_seq[1]; \
112 #endif /* __LP64__ */
114 #define _KSYN_TRACE_ 0
117 /* The Function qualifiers */
118 #define DBG_FUNC_START 1
119 #define DBG_FUNC_END 2
120 #define DBG_FUNC_NONE 0
122 int __kdebug_trace(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
124 #define _KSYN_TRACE_UM_LOCK 0x9000060
125 #define _KSYN_TRACE_UM_UNLOCK 0x9000064
126 #define _KSYN_TRACE_UM_MHOLD 0x9000068
127 #define _KSYN_TRACE_UM_MDROP 0x900006c
128 #define _KSYN_TRACE_UM_MUBITS 0x900007c
129 #define _KSYN_TRACE_UM_MARKPP 0x90000a8
131 #endif /* _KSYN_TRACE_ */
133 #ifndef BUILDING_VARIANT /* [ */
135 #define BLOCK_FAIL_PLOCKSTAT 0
136 #define BLOCK_SUCCESS_PLOCKSTAT 1
139 /* 5243343 - temporary hack to detect if we are running the conformance test */
140 extern int PR_5243343_flag
;
141 #endif /* PR_5243343 */
143 /* This function is never called and exists to provide never-fired dtrace
144 * probes so that user d scripts don't get errors.
146 __private_extern__
__attribute__((used
)) void
147 _plockstat_never_fired(void)
149 PLOCKSTAT_MUTEX_SPIN(NULL
);
150 PLOCKSTAT_MUTEX_SPUN(NULL
, 0, 0);
155 * Initialize a mutex variable, possibly with additional attributes.
156 * Public interface - so don't trust the lock - initialize it first.
159 pthread_mutex_init(pthread_mutex_t
*mutex
, const pthread_mutexattr_t
*attr
)
162 /* conformance tests depend on not having this behavior */
163 /* The test for this behavior is optional */
164 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
)
167 LOCK_INIT(mutex
->lock
);
168 return (_pthread_mutex_init(mutex
, attr
, 0x7));
172 * Fetch the priority ceiling value from a mutex variable.
173 * Note: written as a 'helper' function to hide implementation details.
176 pthread_mutex_getprioceiling(const pthread_mutex_t
*mutex
,
182 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
)
184 *prioceiling
= mutex
->prioceiling
;
187 res
= EINVAL
; /* Not an initialized 'attribute' structure */
193 * Set the priority ceiling for a mutex.
194 * Note: written as a 'helper' function to hide implementation details.
197 pthread_mutex_setprioceiling(pthread_mutex_t
*mutex
,
199 int *old_prioceiling
)
204 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
)
206 if ((prioceiling
>= -999) ||
207 (prioceiling
<= 999))
209 *old_prioceiling
= mutex
->prioceiling
;
210 mutex
->prioceiling
= prioceiling
;
213 res
= EINVAL
; /* Invalid parameter */
215 res
= EINVAL
; /* Not an initialized 'attribute' structure */
221 * Get the priority ceiling value from a mutex attribute structure.
222 * Note: written as a 'helper' function to hide implementation details.
225 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t
*attr
,
228 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
230 *prioceiling
= attr
->prioceiling
;
234 return (EINVAL
); /* Not an initialized 'attribute' structure */
239 * Get the mutex 'protocol' value from a mutex attribute structure.
240 * Note: written as a 'helper' function to hide implementation details.
243 pthread_mutexattr_getprotocol(const pthread_mutexattr_t
*attr
,
246 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
248 *protocol
= attr
->protocol
;
252 return (EINVAL
); /* Not an initialized 'attribute' structure */
256 * Get the mutex 'type' value from a mutex attribute structure.
257 * Note: written as a 'helper' function to hide implementation details.
260 pthread_mutexattr_gettype(const pthread_mutexattr_t
*attr
,
263 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
269 return (EINVAL
); /* Not an initialized 'attribute' structure */
277 pthread_mutexattr_getpshared(const pthread_mutexattr_t
*attr
, int *pshared
)
279 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
281 *pshared
= (int)attr
->pshared
;
285 return (EINVAL
); /* Not an initialized 'attribute' structure */
290 * Initialize a mutex attribute structure to system defaults.
293 pthread_mutexattr_init(pthread_mutexattr_t
*attr
)
295 attr
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
296 attr
->protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
297 attr
->policy
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
;
298 attr
->type
= PTHREAD_MUTEX_DEFAULT
;
299 attr
->sig
= _PTHREAD_MUTEX_ATTR_SIG
;
300 attr
->pshared
= _PTHREAD_DEFAULT_PSHARED
;
305 * Set the priority ceiling value in a mutex attribute structure.
306 * Note: written as a 'helper' function to hide implementation details.
309 pthread_mutexattr_setprioceiling(pthread_mutexattr_t
*attr
,
312 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
314 if ((prioceiling
>= -999) ||
315 (prioceiling
<= 999))
317 attr
->prioceiling
= prioceiling
;
321 return (EINVAL
); /* Invalid parameter */
325 return (EINVAL
); /* Not an initialized 'attribute' structure */
330 * Set the mutex 'protocol' value in a mutex attribute structure.
331 * Note: written as a 'helper' function to hide implementation details.
334 pthread_mutexattr_setprotocol(pthread_mutexattr_t
*attr
,
337 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
339 if ((protocol
== PTHREAD_PRIO_NONE
) ||
340 (protocol
== PTHREAD_PRIO_INHERIT
) ||
341 (protocol
== PTHREAD_PRIO_PROTECT
))
343 attr
->protocol
= protocol
;
347 return (EINVAL
); /* Invalid parameter */
351 return (EINVAL
); /* Not an initialized 'attribute' structure */
356 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t
*attr
,
359 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
362 (policy
== _PTHREAD_MUTEX_POLICY_FAIRSHARE
) ||
363 (policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
)
366 (policy
== _PTHREAD_MUTEX_POLICY_REALTIME
) ||
367 (policy
== _PTHREAD_MUTEX_POLICY_ADAPTIVE
) ||
368 (policy
== _PTHREAD_MUTEX_POLICY_PRIPROTECT
) ||
369 (policy
== _PTHREAD_MUTEX_POLICY_PRIINHERIT
)
373 attr
->policy
= policy
;
377 return (EINVAL
); /* Invalid parameter */
381 return (EINVAL
); /* Not an initialized 'attribute' structure */
386 * Set the mutex 'type' value in a mutex attribute structure.
387 * Note: written as a 'helper' function to hide implementation details.
390 pthread_mutexattr_settype(pthread_mutexattr_t
*attr
,
393 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
395 if ((type
== PTHREAD_MUTEX_NORMAL
) ||
396 (type
== PTHREAD_MUTEX_ERRORCHECK
) ||
397 (type
== PTHREAD_MUTEX_RECURSIVE
) ||
398 (type
== PTHREAD_MUTEX_DEFAULT
))
404 return (EINVAL
); /* Invalid parameter */
408 return (EINVAL
); /* Not an initialized 'attribute' structure */
413 int mutex_try_lock(int *x
) {
414 return _spin_lock_try((pthread_lock_t
*)x
);
417 void mutex_wait_lock(int *x
) {
419 if( _spin_lock_try((pthread_lock_t
*)x
)) {
433 pthread_yield_np (void)
440 * Temp: till pshared is fixed correctly
443 pthread_mutexattr_setpshared(pthread_mutexattr_t
*attr
, int pshared
)
446 if (__unix_conforming
== 0)
447 __unix_conforming
= 1;
448 #endif /* __DARWIN_UNIX03 */
450 if (attr
->sig
== _PTHREAD_MUTEX_ATTR_SIG
)
453 if (( pshared
== PTHREAD_PROCESS_PRIVATE
) || (pshared
== PTHREAD_PROCESS_SHARED
))
454 #else /* __DARWIN_UNIX03 */
455 if ( pshared
== PTHREAD_PROCESS_PRIVATE
)
456 #endif /* __DARWIN_UNIX03 */
458 attr
->pshared
= pshared
;
461 return (EINVAL
); /* Invalid parameter */
465 return (EINVAL
); /* Not an initialized 'attribute' structure */
470 * Drop the mutex unlock references(from cond wait or mutex_unlock().
473 __private_extern__
int
474 __mtx_droplock(npthread_mutex_t
* mutex
, uint32_t diffgen
, uint32_t * flagsp
, uint32_t ** pmtxp
, uint32_t * mgenp
, uint32_t * ugenp
)
477 uint64_t selfid
, resettid
;
478 int firstfit
= (mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
479 uint32_t lgenval
, ugenval
, nlval
, ulval
, morewaiters
=0, flags
;
480 volatile uint32_t * lseqaddr
, *useqaddr
;
481 uint64_t oldval64
, newval64
;
482 int numwaiters
=0, clearprepost
= 0;
485 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP
| DBG_FUNC_START
, (uint32_t)mutex
, diffgen
, 0, 0, 0);
487 MUTEX_GETSEQ_ADDR(mutex
, lseqaddr
, useqaddr
);
490 flags
= mutex
->mtxopts
.value
;
491 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
; /* no notification by default */
494 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
)
496 self
= pthread_self();
497 (void) pthread_threadid_np(self
, &selfid
);
499 if (mutex
->m_tid
!= selfid
)
501 //LIBC_ABORT("dropping recur or error mutex not owned by the thread\n");
502 PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t
*)mutex
, EPERM
);
504 } else if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
&&
505 --mutex
->mtxopts
.options
.lock_count
)
507 PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t
*)mutex
, 1);
519 numwaiters
= diff_genseq((lgenval
& PTHRW_COUNT_MASK
),(ugenval
& PTHRW_COUNT_MASK
)); /* pendig waiters */
521 if (numwaiters
== 0) {
522 /* spurious unlocks, do not touch tid */
523 oldval64
= (((uint64_t)ugenval
) << 32);
525 if ((firstfit
!= 0) && ((lgenval
& PTH_RWL_PBIT
) != 0)) {
527 lgenval
&= ~PTH_RWL_PBIT
;
528 newval64
= (((uint64_t)ugenval
) << 32);
532 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) != TRUE
)
534 /* validated L & U to be same, this is spurious unlock */
535 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
;
536 if (clearprepost
== 1)
537 __psynch_cvclrprepost(mutex
, lgenval
, ugenval
, 0, 0, lgenval
, (flags
| _PTHREAD_MTX_OPT_MUTEX
));
542 if (numwaiters
< diffgen
) {
544 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP
| DBG_FUNC_NONE
, (uint32_t)mutex
, numwaiters
, lgenval
, ugenval
, 0);
546 /* cannot drop more than existing number of waiters */
547 diffgen
= numwaiters
;
550 oldval64
= (((uint64_t)ugenval
) << 32);
552 ulval
= ugenval
+ diffgen
;
555 if ((lgenval
& PTHRW_COUNT_MASK
) == (ulval
& PTHRW_COUNT_MASK
)) {
556 /* do not reset Ibit, just K&E */
557 nlval
&= ~(PTH_RWL_KBIT
| PTH_RWL_EBIT
);
558 flags
&= ~_PTHREAD_MTX_OPT_NOTIFY
;
559 if ((firstfit
!= 0) && ((lgenval
& PTH_RWL_PBIT
) != 0)) {
561 nlval
&= ~PTH_RWL_PBIT
;
564 /* need to signal others waiting for mutex */
566 flags
|= _PTHREAD_MTX_OPT_NOTIFY
;
569 if (((nlval
& PTH_RWL_EBIT
) != 0) && (firstfit
!= 0)) {
570 nlval
&= ~PTH_RWL_EBIT
; /* reset Ebit so another can acquire meanwhile */
573 newval64
= (((uint64_t)ulval
) << 32);
576 resettid
= mutex
->m_tid
;
578 if ((lgenval
& PTHRW_COUNT_MASK
) == (ulval
& PTHRW_COUNT_MASK
))
580 else if (firstfit
== 0)
581 mutex
->m_tid
= PTHREAD_MTX_TID_SWITCHING
;
583 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) != TRUE
) {
584 mutex
->m_tid
= resettid
;
590 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, lgenval
, ugenval
, 0);
591 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, nlval
, ulval
, 0);
594 if (clearprepost
!= 0) {
595 __psynch_cvclrprepost(mutex
, nlval
, ulval
, 0, 0, nlval
, (flags
| _PTHREAD_MTX_OPT_MUTEX
));
607 *pmtxp
= (uint32_t *)mutex
;
615 (void)__kdebug_trace(_KSYN_TRACE_UM_MDROP
| DBG_FUNC_END
, (uint32_t)mutex
, flags
, 0, 0, 0);
621 __mtx_updatebits(npthread_mutex_t
*mutex
, uint32_t oupdateval
, int firstfit
, int fromcond
, uint64_t selfid
)
623 uint32_t updateval
= oupdateval
;
625 pthread_mutex_t
* omutex
= (pthread_mutex_t
*)mutex
;
628 uint32_t lgenval
, ugenval
, nval
, uval
, bits
;
629 volatile uint32_t * lseqaddr
, *useqaddr
;
630 uint64_t oldval64
, newval64
;
632 MUTEX_GETSEQ_ADDR(mutex
, lseqaddr
, useqaddr
);
635 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS
| DBG_FUNC_START
, (uint32_t)mutex
, oupdateval
, firstfit
, fromcond
, 0);
641 bits
= updateval
& PTHRW_BIT_MASK
;
644 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS
| DBG_FUNC_NONE
, (uint32_t)mutex
, 1, lgenval
, ugenval
, 0);
648 if ((updateval
& PTH_RWL_MTX_WAIT
) != 0) {
649 lgenval
= (updateval
& PTHRW_COUNT_MASK
) | (lgenval
& PTHRW_BIT_MASK
);
651 /* if from mutex_lock(), it will handle the rewind */
654 /* go block in the kernel with same lgenval as returned */
657 /* firsfit might not have EBIT */
659 if ((lgenval
& PTH_RWL_EBIT
) != 0)
663 } else if ((lgenval
& (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) == (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) {
664 /* fairshare mutex and the bits are already set, just update tid */
669 /* either firstfist or no E bit set */
670 /* update the bits */
671 oldval64
= (((uint64_t)ugenval
) << 32);
674 nval
= lgenval
| (PTH_RWL_KBIT
|PTH_RWL_EBIT
);
675 newval64
= (((uint64_t)uval
) << 32);
678 /* set s and b bit */
679 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) == TRUE
) {
681 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, nval
, uval
, 0);
683 if ((firstfit
!= 0) && (isebit
!= 0))
695 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS
| DBG_FUNC_NONE
, (uint32_t)mutex
, 4, nval
, uval
, 0);
699 /* succesful bits updation */
700 mutex
->m_tid
= selfid
;
702 (void)__kdebug_trace(_KSYN_TRACE_UM_MUBITS
| DBG_FUNC_END
, (uint32_t)mutex
, 0, 0, 0, 0);
707 /* firstfit failure */
710 if ((lgenval
& PTH_RWL_EBIT
) == 0)
716 /* called from condition variable code block again */
718 #if USE_COMPAGE /* [ */
719 updateval
= __psynch_mutexwait((pthread_mutex_t
*)lseqaddr
, lgenval
| PTH_RWL_RETRYBIT
, ugenval
, mutex
->m_tid
,
720 mutex
->mtxopts
.value
);
721 #else /* USECOMPAGE ][ */
722 updateval
= __psynch_mutexwait(omutex
, lgenval
| PTH_RWL_RETRYBIT
, ugenval
, mutex
->m_tid
,
723 mutex
->mtxopts
.value
);
724 #endif /* USE_COMPAGE ] */
725 if (updateval
== (uint32_t)-1) {
729 /* now update the bits */
738 __mtx_markprepost(npthread_mutex_t
*mutex
, uint32_t oupdateval
, int firstfit
)
740 uint32_t updateval
= oupdateval
;
741 int clearprepost
= 0;
742 uint32_t lgenval
, ugenval
,flags
;
743 volatile uint32_t * lseqaddr
, *useqaddr
;
744 uint64_t oldval64
, newval64
;
746 MUTEX_GETSEQ_ADDR(mutex
, lseqaddr
, useqaddr
);
749 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP
| DBG_FUNC_START
, (uint32_t)mutex
, oupdateval
, firstfit
, 0, 0);
756 if ((firstfit
!= 0) && ((updateval
& PTH_RWL_PBIT
) != 0)) {
757 flags
= mutex
->mtxopts
.value
;
763 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP
| DBG_FUNC_NONE
, (uint32_t)mutex
, 1, lgenval
, ugenval
, 0);
765 /* update the bits */
766 oldval64
= (((uint64_t)ugenval
) << 32);
769 if ((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
& PTHRW_COUNT_MASK
)) {
771 lgenval
&= ~PTH_RWL_PBIT
;
774 lgenval
|= PTH_RWL_PBIT
;
776 newval64
= (((uint64_t)ugenval
) << 32);
779 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) == TRUE
) {
781 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, lgenval
, ugenval
, 0);
784 if (clearprepost
!= 0)
785 __psynch_cvclrprepost(mutex
, lgenval
, ugenval
, 0, 0, lgenval
, (flags
| _PTHREAD_MTX_OPT_MUTEX
));
792 (void)__kdebug_trace(_KSYN_TRACE_UM_MARKPP
| DBG_FUNC_END
, (uint32_t)mutex
, 0, 0, 0, 0);
799 * For the new style mutex, interlocks are not held all the time.
800 * We needed the signature to be set in the end. And we need
801 * to protect against the code getting reorganized by compiler.
804 __pthread_mutex_set_signature(npthread_mutex_t
* mutex
)
806 mutex
->sig
= _PTHREAD_MUTEX_SIG
;
810 pthread_mutex_lock(pthread_mutex_t
*omutex
)
814 npthread_mutex_t
* mutex
= (npthread_mutex_t
*)omutex
;
815 int sig
= mutex
->sig
;
816 #if NEVERINCOMPAGE || !USE_COMPAGE
817 //uint32_t oldval, newval;
820 int gotlock
= 0, firstfit
= 0;
821 uint32_t updateval
, lgenval
, ugenval
, nval
, uval
;
822 volatile uint32_t * lseqaddr
, *useqaddr
;
823 uint64_t oldval64
, newval64
;
831 /* To provide backwards compat for apps using mutex incorrectly */
832 if ((sig
!= _PTHREAD_MUTEX_SIG
) && ((sig
& _PTHREAD_MUTEX_SIG_init_MASK
) != _PTHREAD_MUTEX_SIG_CMP
)) {
833 PLOCKSTAT_MUTEX_ERROR(omutex
, EINVAL
);
836 if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
) {
838 if ((mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
) == _PTHREAD_MUTEX_SIG_CMP
) {
839 /* static initializer, init the mutex */
840 if(retval
= _pthread_mutex_init(omutex
, NULL
, (mutex
->sig
& 0xf)) != 0){
842 PLOCKSTAT_MUTEX_ERROR(omutex
, retval
);
845 } else if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
) {
847 PLOCKSTAT_MUTEX_ERROR(omutex
, EINVAL
);
854 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_START
, (uint32_t)mutex
, 0, 0, 0, 0);
856 MUTEX_GETSEQ_ADDR(mutex
, lseqaddr
, useqaddr
);
858 self
= pthread_self();
859 (void) pthread_threadid_np(self
, &selfid
);
861 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
862 if (mutex
->m_tid
== selfid
) {
863 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
)
865 if (mutex
->mtxopts
.options
.lock_count
< USHRT_MAX
)
867 mutex
->mtxopts
.options
.lock_count
++;
868 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 1, 0);
872 PLOCKSTAT_MUTEX_ERROR(omutex
, retval
);
874 } else { /* PTHREAD_MUTEX_ERRORCHECK */
876 PLOCKSTAT_MUTEX_ERROR(omutex
, retval
);
883 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 1, 0, 0, 0);
886 #if USE_COMPAGE /* [ */
890 retval
= _commpage_pthread_mutex_lock(lseqaddr
, mutex
->mtxopts
.value
, selfid
, mask
, &mutex
->m_tid
, &sysret
);
893 } else if (retval
== 1) {
896 /* returns 0 on succesful update */
897 if (__mtx_updatebits( mutex
, updateval
, firstfit
, 0, selfid
) == 1) {
898 /* could not acquire, may be locked in ffit case */
900 LIBC_ABORT("comapge implementatin looping in libc \n");
906 else if (retval
== 3) {
907 cthread_set_errno_self(sysret
);
910 newval
= oldval
+ PTHRW_INC
;
912 /* to block in the kerenl again */
916 LIBC_ABORT("comapge implementation bombed \n");
920 #else /* USECOMPAGE ][ */
926 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, lgenval
, ugenval
, 0);
927 #endif /* _KSYN_TRACE_ */
929 if((lgenval
& PTH_RWL_EBIT
) == 0) {
935 oldval64
= (((uint64_t)ugenval
) << 32);
938 nval
= (lgenval
+ PTHRW_INC
) | (PTH_RWL_EBIT
|PTH_RWL_KBIT
);
939 newval64
= (((uint64_t)uval
) << 32);
942 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) == TRUE
) {
944 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, nval
, uval
, 0);
947 mutex
->m_tid
= selfid
;
957 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 3, nval
, uval
, 0);
959 firstfit
= (mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
961 updateval
= __psynch_mutexwait(omutex
, nval
| retrybit
, uval
, mutex
->m_tid
,
962 mutex
->mtxopts
.value
);
965 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 4, updateval
, 0, 0);
967 if (updateval
== (uint32_t)-1) {
971 /* returns 0 on succesful update; in firstfit it may fail with 1 */
972 if (__mtx_updatebits( mutex
, PTHRW_INC
| (PTH_RWL_KBIT
| PTH_RWL_EBIT
), firstfit
, 0, selfid
) == 1) {
973 /* could not acquire, may be locked in ffit case */
974 retrybit
= PTH_RWL_RETRYBIT
;
976 LIBC_ABORT("comapge implementatin looping in libc \n");
982 #endif /* USE_COMPAGE ] */
985 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
)
986 mutex
->mtxopts
.options
.lock_count
= 1;
989 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_END
, (uint32_t)mutex
, 0, 0, 0, 0);
995 * Attempt to lock a mutex, but don't block if this isn't possible.
998 pthread_mutex_trylock(pthread_mutex_t
*omutex
)
1000 npthread_mutex_t
* mutex
= (npthread_mutex_t
*)omutex
;
1001 int sig
= mutex
->sig
;
1006 uint32_t lgenval
, ugenval
, nval
, uval
;
1007 volatile uint32_t * lseqaddr
, *useqaddr
;
1008 uint64_t oldval64
, newval64
;
1010 /* To provide backwards compat for apps using mutex incorrectly */
1011 if ((sig
!= _PTHREAD_MUTEX_SIG
) && ((sig
& _PTHREAD_MUTEX_SIG_init_MASK
) != _PTHREAD_MUTEX_SIG_CMP
)) {
1012 PLOCKSTAT_MUTEX_ERROR(omutex
, EINVAL
);
1016 if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
) {
1018 if ((mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
) == _PTHREAD_MUTEX_SIG_CMP
) {
1019 /* static initializer, init the mutex */
1020 if((error
= _pthread_mutex_init(omutex
, NULL
, (mutex
->sig
& 0xf))) != 0){
1021 UNLOCK(mutex
->lock
);
1022 PLOCKSTAT_MUTEX_ERROR(omutex
, error
);
1025 } else if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
) {
1026 UNLOCK(mutex
->lock
);
1027 PLOCKSTAT_MUTEX_ERROR(omutex
, EINVAL
);
1030 UNLOCK(mutex
->lock
);
1033 MUTEX_GETSEQ_ADDR(mutex
, lseqaddr
, useqaddr
);
1035 self
= pthread_self();
1036 (void) pthread_threadid_np(self
, &selfid
);
1038 if (mutex
->mtxopts
.options
.type
!= PTHREAD_MUTEX_NORMAL
) {
1039 if (mutex
->m_tid
== selfid
) {
1040 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
)
1042 if (mutex
->mtxopts
.options
.lock_count
< USHRT_MAX
)
1044 mutex
->mtxopts
.options
.lock_count
++;
1045 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 1, 0);
1049 PLOCKSTAT_MUTEX_ERROR(omutex
, error
);
1051 } else { /* PTHREAD_MUTEX_ERRORCHECK */
1053 PLOCKSTAT_MUTEX_ERROR(omutex
, error
);
1059 lgenval
= *lseqaddr
;
1060 ugenval
= *useqaddr
;
1063 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, lgenval
, ugenval
, 0);
1064 #endif /* _KSYN_TRACE_ */
1067 oldval64
= (((uint64_t)ugenval
) << 32);
1068 oldval64
|= lgenval
;
1071 /* if we can acquire go ahead otherwise ensure it is still busy */
1072 if((lgenval
& PTH_RWL_EBIT
) == 0) {
1074 nval
= (lgenval
+ PTHRW_INC
) | (PTH_RWL_EBIT
|PTH_RWL_KBIT
);
1076 nval
= (lgenval
| PTH_RWL_TRYLKBIT
);
1080 newval64
= (((uint64_t)uval
) << 32);
1083 /* set s and b bit */
1084 if (OSAtomicCompareAndSwap64Barrier(oldval64
, newval64
, (volatile int64_t *)lseqaddr
) == TRUE
) {
1086 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 2, nval
, uval
, 0);
1089 mutex
->m_tid
= selfid
;
1090 if (mutex
->mtxopts
.options
.type
== PTHREAD_MUTEX_RECURSIVE
)
1091 mutex
->mtxopts
.options
.lock_count
= 1;
1092 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 1, 0);
1095 PLOCKSTAT_MUTEX_ERROR(omutex
, error
);
1102 (void)__kdebug_trace(_KSYN_TRACE_UM_LOCK
| DBG_FUNC_END
, (uint32_t)mutex
, 0xfafafafa, 0, error
, 0);
1109 * TODO: Priority inheritance stuff
1112 pthread_mutex_unlock(pthread_mutex_t
*omutex
)
1114 npthread_mutex_t
* mutex
= (npthread_mutex_t
*)omutex
;
1116 uint32_t mtxgen
, mtxugen
, flags
, notify
, updateval
;
1117 int sig
= mutex
->sig
;
1120 volatile uint32_t * lseqaddr
, *useqaddr
;
1123 /* To provide backwards compat for apps using mutex incorrectly */
1126 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK
| DBG_FUNC_START
, (uint32_t)mutex
, 0, 0, 0, 0);
1128 if ((sig
!= _PTHREAD_MUTEX_SIG
) && ((sig
& _PTHREAD_MUTEX_SIG_init_MASK
) != _PTHREAD_MUTEX_SIG_CMP
)) {
1129 PLOCKSTAT_MUTEX_ERROR(omutex
, EINVAL
);
1133 if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
) {
1135 if ((mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
) == _PTHREAD_MUTEX_SIG_CMP
) {
1136 /* static initializer, init the mutex */
1137 if((retval
= _pthread_mutex_init(omutex
, NULL
, (mutex
->sig
& 0xf))) != 0){
1138 UNLOCK(mutex
->lock
);
1139 PLOCKSTAT_MUTEX_ERROR(omutex
, retval
);
1142 } else if (mutex
->sig
!= _PTHREAD_MUTEX_SIG
) {
1143 UNLOCK(mutex
->lock
);
1144 PLOCKSTAT_MUTEX_ERROR(omutex
, EINVAL
);
1147 UNLOCK(mutex
->lock
);
1150 MUTEX_GETSEQ_ADDR(mutex
, lseqaddr
, useqaddr
);
1153 retval
= __mtx_droplock(mutex
, PTHRW_INC
, &flags
, NULL
, &mtxgen
, &mtxugen
);
1157 if ((flags
& _PTHREAD_MTX_OPT_NOTIFY
) != 0) {
1158 firstfit
= (mutex
->mtxopts
.options
.policy
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
);
1160 self
= pthread_self();
1161 (void) pthread_threadid_np(self
, &selfid
);
1164 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK
| DBG_FUNC_NONE
, (uint32_t)mutex
, 1, mtxgen
, mtxugen
, 0);
1166 #if USE_COMPAGE /* [ */
1167 if ((updateval
= __psynch_mutexdrop((pthread_mutex_t
*)lseqaddr
, mtxgen
, mtxugen
, mutex
->m_tid
, flags
)) == (uint32_t)-1)
1168 #else /* USECOMPAGE ][ */
1169 if ((updateval
= __psynch_mutexdrop(omutex
, mtxgen
, mtxugen
, mutex
->m_tid
, flags
))== (uint32_t)-1)
1170 #endif /* USE_COMPAGE ] */
1174 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK
| DBG_FUNC_END
, (uint32_t)mutex
, retval
, 0, 0, 0);
1178 else if (errno
== EINTR
)
1181 LIBC_ABORT("__p_mutexdrop failed with error %d\n", retval
);
1184 } else if (firstfit
== 1) {
1185 if ((updateval
& PTH_RWL_PBIT
) != 0) {
1186 __mtx_markprepost(mutex
, updateval
, firstfit
);
1191 (void)__kdebug_trace(_KSYN_TRACE_UM_UNLOCK
| DBG_FUNC_END
, (uint32_t)mutex
, 0, 0, 0, 0);
1198 * Initialize a mutex variable, possibly with additional attributes.
1201 _pthread_mutex_init(pthread_mutex_t
*omutex
, const pthread_mutexattr_t
*attr
, uint32_t static_type
)
1203 npthread_mutex_t
* mutex
= (npthread_mutex_t
*)omutex
;
1207 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
)
1209 mutex
->prioceiling
= attr
->prioceiling
;
1210 mutex
->mtxopts
.options
.protocol
= attr
->protocol
;
1211 mutex
->mtxopts
.options
.policy
= attr
->policy
;
1212 mutex
->mtxopts
.options
.type
= attr
->type
;
1213 mutex
->mtxopts
.options
.pshared
= attr
->pshared
;
1215 switch(static_type
) {
1217 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_ERRORCHECK
;
1220 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_RECURSIVE
;
1223 /* firstfit fall thru */
1225 mutex
->mtxopts
.options
.type
= PTHREAD_MUTEX_DEFAULT
;
1231 mutex
->prioceiling
= _PTHREAD_DEFAULT_PRIOCEILING
;
1232 mutex
->mtxopts
.options
.protocol
= _PTHREAD_DEFAULT_PROTOCOL
;
1233 if (static_type
!= 3)
1234 mutex
->mtxopts
.options
.policy
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
;
1236 mutex
->mtxopts
.options
.policy
= _PTHREAD_MUTEX_POLICY_FIRSTFIT
;
1237 mutex
->mtxopts
.options
.pshared
= _PTHREAD_DEFAULT_PSHARED
;
1240 mutex
->mtxopts
.options
.notify
= 0;
1241 mutex
->mtxopts
.options
.rfu
= 0;
1242 mutex
->mtxopts
.options
.hold
= 0;
1243 mutex
->mtxopts
.options
.mutex
= 1;
1244 mutex
->mtxopts
.options
.lock_count
= 0;
1245 /* address 8byte aligned? */
1246 if (((uintptr_t)mutex
& 0x07) != 0) {
1248 mutex
->mtxopts
.options
.misalign
= 1;
1249 #if defined(__LP64__)
1250 mutex
->m_lseqaddr
= &mutex
->m_seq
[0];
1251 mutex
->m_useqaddr
= &mutex
->m_seq
[1];
1252 #else /* __LP64__ */
1253 mutex
->m_lseqaddr
= &mutex
->m_seq
[1];
1254 mutex
->m_useqaddr
= &mutex
->m_seq
[2];
1255 #endif /* __LP64__ */
1258 mutex
->mtxopts
.options
.misalign
= 0;
1259 #if defined(__LP64__)
1260 mutex
->m_lseqaddr
= &mutex
->m_seq
[1];
1261 mutex
->m_useqaddr
= &mutex
->m_seq
[2];
1262 #else /* __LP64__ */
1263 mutex
->m_lseqaddr
= &mutex
->m_seq
[0];
1264 mutex
->m_useqaddr
= &mutex
->m_seq
[1];
1265 #endif /* __LP64__ */
1268 mutex
->m_seq
[0] = 0;
1269 mutex
->m_seq
[1] = 0;
1270 mutex
->m_seq
[2] = 0;
1271 mutex
->prioceiling
= 0;
1272 mutex
->priority
= 0;
1274 * For the new style mutex, interlocks are not held all the time.
1275 * We needed the signature to be set in the end. And we need
1276 * to protect against the code getting reorganized by compiler.
1277 * mutex->sig = _PTHREAD_MUTEX_SIG;
1279 __pthread_mutex_set_signature(mutex
);
1285 * Destroy a mutex variable.
1288 pthread_mutex_destroy(pthread_mutex_t
*omutex
)
1291 npthread_mutex_t
* mutex
= (npthread_mutex_t
*)omutex
;
1294 res
= _pthread_mutex_destroy_locked(omutex
);
1295 UNLOCK(mutex
->lock
);
1302 _pthread_mutex_destroy_locked(pthread_mutex_t
*omutex
)
1305 npthread_mutex_t
* mutex
= (npthread_mutex_t
*)omutex
;
1306 uint32_t lgenval
, ugenval
;
1307 volatile uint32_t * lseqaddr
, *useqaddr
;
1310 if (mutex
->sig
== _PTHREAD_MUTEX_SIG
)
1312 MUTEX_GETSEQ_ADDR(mutex
, lseqaddr
, useqaddr
);
1314 lgenval
= *(lseqaddr
);
1315 ugenval
= *(useqaddr
);
1316 if ((mutex
->m_tid
== (uint64_t)0) &&
1317 ((lgenval
& PTHRW_COUNT_MASK
) == (ugenval
& PTHRW_COUNT_MASK
)))
1319 mutex
->sig
= _PTHREAD_NO_SIG
;
1324 } else if((mutex
->sig
& _PTHREAD_MUTEX_SIG_init_MASK
)== _PTHREAD_MUTEX_SIG_CMP
) {
1325 mutex
->sig
= _PTHREAD_NO_SIG
;
1334 #endif /* !BUILDING_VARIANT ] */
1337 * Destroy a mutex attribute structure.
1340 pthread_mutexattr_destroy(pthread_mutexattr_t
*attr
)
1343 if (__unix_conforming
== 0)
1344 __unix_conforming
= 1;
1345 if (attr
->sig
!= _PTHREAD_MUTEX_ATTR_SIG
)
1347 #endif /* __DARWIN_UNIX03 */
1349 attr
->sig
= _PTHREAD_NO_SIG
; /* Uninitialized */