2  * Copyright (c) 2000-2003, 2007, 2008 Apple Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. Please obtain a copy of the License at 
  10  * http://www.opensource.apple.com/apsl/ and read it before using this 
  13  * The Original Code and all software distributed under the License are 
  14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  18  * Please see the License for the specific language governing rights and 
  19  * limitations under the License. 
  21  * @APPLE_LICENSE_HEADER_END@ 
  24  * Copyright 1996 1995 by Open Software Foundation, Inc. 1997 1996 1995 1994 1993 1992 1991 
  27  * Permission to use, copy, modify, and distribute this software and 
  28  * its documentation for any purpose and without fee is hereby granted, 
  29  * provided that the above copyright notice appears in all copies and 
  30  * that both the copyright notice and this permission notice appear in 
  31  * supporting documentation. 
  33  * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE 
  34  * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 
  35  * FOR A PARTICULAR PURPOSE. 
  37  * IN NO EVENT SHALL OSF BE LIABLE FOR ANY SPECIAL, INDIRECT, OR 
  38  * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM 
  39  * LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, 
  40  * NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION 
  41  * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 
  49  * POSIX Pthread Library 
  50  * -- Mutex variable support 
  55 #include "kern/kern_trace.h" 
  56 #include <sys/syscall.h> 
  59 #include "plockstat.h" 
  60 #else /* !PLOCKSTAT */ 
  61 #define PLOCKSTAT_MUTEX_SPIN(x) 
  62 #define PLOCKSTAT_MUTEX_SPUN(x, y, z) 
  63 #define PLOCKSTAT_MUTEX_ERROR(x, y) 
  64 #define PLOCKSTAT_MUTEX_BLOCK(x) 
  65 #define PLOCKSTAT_MUTEX_BLOCKED(x, y) 
  66 #define PLOCKSTAT_MUTEX_ACQUIRE(x, y, z) 
  67 #define PLOCKSTAT_MUTEX_RELEASE(x, y) 
  68 #endif /* PLOCKSTAT */ 
  70 #define PTHREAD_MUTEX_INIT_UNUSED 1 
  72 extern int __unix_conforming
; 
  74 #ifndef BUILDING_VARIANT 
  76 PTHREAD_NOEXPORT PTHREAD_WEAK 
// prevent inlining of return value into callers 
  78 _pthread_mutex_unlock_slow(pthread_mutex_t 
*omutex
); 
  80 PTHREAD_NOEXPORT PTHREAD_WEAK 
// prevent inlining of return value into callers 
  82 _pthread_mutex_lock_slow(pthread_mutex_t 
*omutex
, bool trylock
); 
  84 PTHREAD_NOEXPORT PTHREAD_WEAK 
// prevent inlining of return value into _pthread_mutex_lock 
  86 _pthread_mutex_lock_wait(pthread_mutex_t 
*omutex
, uint64_t newval64
, uint64_t oldtid
); 
  88 #endif /* BUILDING_VARIANT */ 
  90 #define DEBUG_TRACE_POINTS 0 
  92 #if DEBUG_TRACE_POINTS 
  93 extern int __syscall(int number
, ...); 
  94 #define DEBUG_TRACE(x, a, b, c, d) __syscall(SYS_kdebug_trace, TRACE_##x, a, b, c, d) 
  96 #define DEBUG_TRACE(x, a, b, c, d) do { } while(0) 
  99 #include <machine/cpu_capabilities.h> 
 101 static inline int _pthread_mutex_init(_pthread_mutex 
*mutex
, const pthread_mutexattr_t 
*attr
, uint32_t static_type
); 
 103 #if !__LITTLE_ENDIAN__ 
 104 #error MUTEX_GETSEQ_ADDR assumes little endian layout of 2 32-bit sequence words 
 107 PTHREAD_ALWAYS_INLINE
 
 109 MUTEX_GETSEQ_ADDR(_pthread_mutex 
*mutex
, 
 110                   volatile uint64_t **seqaddr
) 
 112         // 64-bit aligned address inside m_seq array (&m_seq[0] for aligned mutex) 
 113         // We don't require more than byte alignment on OS X. rdar://22278325 
 114         *seqaddr 
= (volatile uint64_t*)(((uintptr_t)mutex
->m_seq 
+ 0x7ul
) & ~0x7ul
); 
 117 PTHREAD_ALWAYS_INLINE
 
 119 MUTEX_GETTID_ADDR(_pthread_mutex 
*mutex
, 
 120                                   volatile uint64_t **tidaddr
) 
 122         // 64-bit aligned address inside m_tid array (&m_tid[0] for aligned mutex) 
 123         // We don't require more than byte alignment on OS X. rdar://22278325 
 124         *tidaddr 
= (volatile uint64_t*)(((uintptr_t)mutex
->m_tid 
+ 0x7ul
) & ~0x7ul
); 
 127 #ifndef BUILDING_VARIANT /* [ */ 
 128 #ifndef OS_UP_VARIANT_ONLY 
 130 #define BLOCK_FAIL_PLOCKSTAT    0 
 131 #define BLOCK_SUCCESS_PLOCKSTAT 1 
 134 /* This function is never called and exists to provide never-fired dtrace 
 135  * probes so that user d scripts don't get errors. 
 137 PTHREAD_NOEXPORT PTHREAD_USED
 
 139 _plockstat_never_fired(void)  
 141         PLOCKSTAT_MUTEX_SPIN(NULL
); 
 142         PLOCKSTAT_MUTEX_SPUN(NULL
, 0, 0); 
 147  * Initialize a mutex variable, possibly with additional attributes. 
 148  * Public interface - so don't trust the lock - initialize it first. 
 151 pthread_mutex_init(pthread_mutex_t 
*omutex
, const pthread_mutexattr_t 
*attr
) 
 154         /* conformance tests depend on not having this behavior */ 
 155         /* The test for this behavior is optional */ 
 156         if (_pthread_mutex_check_signature(mutex
)) 
 159         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 160         _PTHREAD_LOCK_INIT(mutex
->lock
); 
 161         return (_pthread_mutex_init(mutex
, attr
, 0x7)); 
 165 pthread_mutex_getprioceiling(const pthread_mutex_t 
*omutex
, int *prioceiling
) 
 168         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 169         if (_pthread_mutex_check_signature(mutex
)) { 
 170                 _PTHREAD_LOCK(mutex
->lock
); 
 171                 *prioceiling 
= mutex
->prioceiling
; 
 173                 _PTHREAD_UNLOCK(mutex
->lock
); 
 179 pthread_mutex_setprioceiling(pthread_mutex_t 
*omutex
, int prioceiling
, int *old_prioceiling
) 
 182         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 183         if (_pthread_mutex_check_signature(mutex
)) { 
 184                 _PTHREAD_LOCK(mutex
->lock
); 
 185                 if (prioceiling 
>= -999 || prioceiling 
<= 999) { 
 186                         *old_prioceiling 
= mutex
->prioceiling
; 
 187                         mutex
->prioceiling 
= prioceiling
; 
 190                 _PTHREAD_UNLOCK(mutex
->lock
); 
 196 pthread_mutexattr_getprioceiling(const pthread_mutexattr_t 
*attr
, int *prioceiling
) 
 199         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 200                 *prioceiling 
= attr
->prioceiling
; 
 207 pthread_mutexattr_getprotocol(const pthread_mutexattr_t 
*attr
, int *protocol
) 
 210         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 211                 *protocol 
= attr
->protocol
; 
 218 pthread_mutexattr_gettype(const pthread_mutexattr_t 
*attr
, int *type
) 
 221         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 229 pthread_mutexattr_getpshared(const pthread_mutexattr_t 
*attr
, int *pshared
) 
 232         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 233                 *pshared 
= (int)attr
->pshared
; 
 240 pthread_mutexattr_init(pthread_mutexattr_t 
*attr
) 
 242         attr
->prioceiling 
= _PTHREAD_DEFAULT_PRIOCEILING
; 
 243         attr
->protocol 
= _PTHREAD_DEFAULT_PROTOCOL
; 
 244         attr
->policy 
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
; 
 245         attr
->type 
= PTHREAD_MUTEX_DEFAULT
; 
 246         attr
->sig 
= _PTHREAD_MUTEX_ATTR_SIG
; 
 247         attr
->pshared 
= _PTHREAD_DEFAULT_PSHARED
; 
 252 pthread_mutexattr_setprioceiling(pthread_mutexattr_t 
*attr
, int prioceiling
) 
 255         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 256                 if (prioceiling 
>= -999 || prioceiling 
<= 999) { 
 257                         attr
->prioceiling 
= prioceiling
; 
 265 pthread_mutexattr_setprotocol(pthread_mutexattr_t 
*attr
, int protocol
) 
 268         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 270                         case PTHREAD_PRIO_NONE
: 
 271                         case PTHREAD_PRIO_INHERIT
: 
 272                         case PTHREAD_PRIO_PROTECT
: 
 273                                 attr
->protocol 
= protocol
; 
 282 pthread_mutexattr_setpolicy_np(pthread_mutexattr_t 
*attr
, int policy
) 
 285         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 287                         case _PTHREAD_MUTEX_POLICY_FAIRSHARE
: 
 288                         case _PTHREAD_MUTEX_POLICY_FIRSTFIT
: 
 289                                 attr
->policy 
= policy
; 
 298 pthread_mutexattr_settype(pthread_mutexattr_t 
*attr
, int type
) 
 301         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 303                         case PTHREAD_MUTEX_NORMAL
: 
 304                         case PTHREAD_MUTEX_ERRORCHECK
: 
 305                         case PTHREAD_MUTEX_RECURSIVE
: 
 306                         //case PTHREAD_MUTEX_DEFAULT: 
 323 pthread_yield_np(void)  
 330  * Temp: till pshared is fixed correctly 
 333 pthread_mutexattr_setpshared(pthread_mutexattr_t 
*attr
, int pshared
) 
 337         if (__unix_conforming 
== 0) { 
 338                 __unix_conforming 
= 1; 
 340 #endif /* __DARWIN_UNIX03 */ 
 342         if (attr
->sig 
== _PTHREAD_MUTEX_ATTR_SIG
) { 
 344                 if (( pshared 
== PTHREAD_PROCESS_PRIVATE
) || (pshared 
== PTHREAD_PROCESS_SHARED
)) 
 345 #else /* __DARWIN_UNIX03 */ 
 346                 if ( pshared 
== PTHREAD_PROCESS_PRIVATE
) 
 347 #endif /* __DARWIN_UNIX03 */ 
 349                         attr
->pshared 
= pshared
;  
 356 PTHREAD_NOEXPORT PTHREAD_WEAK 
// prevent inlining of return value into callers 
 358 _pthread_mutex_corruption_abort(_pthread_mutex 
*mutex
); 
 362 _pthread_mutex_corruption_abort(_pthread_mutex 
*mutex
) 
 364         PTHREAD_ABORT("pthread_mutex corruption: mutex %p owner changed in the middle of lock/unlock"); 
 365         return EINVAL
; // NOTREACHED 
 369  * Sequence numbers and TID: 
 371  * In steady (and uncontended) state, an unlocked mutex will 
 372  * look like A=[L4 U4 TID0]. When it is being locked, it transitions 
 373  * to B=[L5+KE U4 TID0] and then C=[L5+KE U4 TID940]. For an uncontended mutex, 
 374  * the unlock path will then transition to D=[L5 U4 TID0] and then finally 
 377  * If a contender comes in after B, the mutex will instead transition to E=[L6+KE U4 TID0] 
 378  * and then F=[L6+KE U4 TID940]. If a contender comes in after C, it will transition to 
 379  * F=[L6+KE U4 TID940] directly. In both cases, the contender will enter the kernel with either 
 380  * mutexwait(U4, TID0) or mutexwait(U4, TID940). The first owner will unlock the mutex 
 381  * by first updating the owner to G=[L6+KE U4 TID-1] and then doing the actual unlock to 
 382  * H=[L6+KE U5 TID=-1] before entering the kernel with mutexdrop(U5, -1) to signal the next waiter 
 383  * (potentially as a prepost). When the waiter comes out of the kernel, it will update the owner to 
 384  * I=[L6+KE U5 TID941]. An unlock at this point is simply J=[L6 U5 TID0] and then K=[L6 U6 TID0]. 
 386  * At various points along these timelines, since the sequence words and TID are written independently, 
 387  * a thread may get preempted and another thread might see inconsistent data. In the worst case, another 
 388  * thread may see the TID in the SWITCHING (-1) state or unlocked (0) state for longer because the 
 389  * owning thread was preempted. 
 393  * Drop the mutex unlock references from cond_wait. or mutex_unlock. 
 395 PTHREAD_ALWAYS_INLINE
 
 397 _pthread_mutex_unlock_updatebits(_pthread_mutex 
*mutex
, uint32_t *flagsp
, uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
) 
 399         bool firstfit 
= (mutex
->mtxopts
.options
.policy 
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
); 
 400         uint32_t lgenval
, ugenval
, flags
; 
 401         uint64_t oldtid
, newtid
; 
 402         volatile uint64_t *tidaddr
; 
 403         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 405         flags 
= mutex
->mtxopts
.value
; 
 406         flags 
&= ~_PTHREAD_MTX_OPT_NOTIFY
; // no notification by default 
 408         if (mutex
->mtxopts
.options
.type 
!= PTHREAD_MUTEX_NORMAL
) { 
 409                 uint64_t selfid 
= _pthread_selfid_direct(); 
 411                 if (*tidaddr 
!= selfid
) { 
 412                         //PTHREAD_ABORT("dropping recur or error mutex not owned by the thread"); 
 413                         PLOCKSTAT_MUTEX_ERROR((pthread_mutex_t 
*)mutex
, EPERM
); 
 415                 } else if (mutex
->mtxopts
.options
.type 
== PTHREAD_MUTEX_RECURSIVE 
&& 
 416                            --mutex
->mtxopts
.options
.lock_count
) { 
 417                         PLOCKSTAT_MUTEX_RELEASE((pthread_mutex_t 
*)mutex
, 1); 
 418                         if (flagsp 
!= NULL
) { 
 425         uint64_t oldval64
, newval64
; 
 426         volatile uint64_t *seqaddr
; 
 427         MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
 429         bool clearprepost
, clearnotify
, spurious
; 
 433                 lgenval 
= (uint32_t)oldval64
; 
 434                 ugenval 
= (uint32_t)(oldval64 
>> 32); 
 436                 clearprepost 
= false; 
 440                 int numwaiters 
= diff_genseq(lgenval
, ugenval
); // pending waiters 
 442                 if (numwaiters 
== 0) { 
 443                         // spurious unlock; do not touch tid 
 446                         ugenval 
+= PTHRW_INC
; 
 448                         if ((lgenval 
& PTHRW_COUNT_MASK
) == (ugenval 
& PTHRW_COUNT_MASK
)) { 
 449                                 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked 
 451                                 /* do not reset Ibit, just K&E */ 
 452                                 lgenval 
&= ~(PTH_RWL_KBIT 
| PTH_RWL_EBIT
); 
 454                                 newtid 
= 0; // clear owner 
 457                                         lgenval 
&= ~PTH_RWL_EBIT
; // reset E bit so another can acquire meanwhile 
 460                                         newtid 
= PTHREAD_MTX_TID_SWITCHING
; 
 462                                 // need to signal others waiting for mutex 
 463                                 flags 
|= _PTHREAD_MTX_OPT_NOTIFY
; 
 466                         if (newtid 
!= oldtid
) { 
 467                                 // We're giving up the mutex one way or the other, so go ahead and update the owner to SWITCHING 
 468                                 // or 0 so that once the CAS below succeeds, there is no stale ownership information. 
 469                                 // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner 
 471                                 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, newtid
, relaxed
)) { 
 472                                         // we own this mutex, nobody should be updating it except us 
 473                                         return _pthread_mutex_corruption_abort(mutex
); 
 478                 if (clearnotify 
|| spurious
) { 
 479                         flags 
&= ~_PTHREAD_MTX_OPT_NOTIFY
; 
 480                         if (firstfit 
&& ((lgenval 
& PTH_RWL_PBIT
) != 0)) { 
 482                                 lgenval 
&= ~PTH_RWL_PBIT
; 
 486                 newval64 
= (((uint64_t)ugenval
) << 32); 
 489         } while (!os_atomic_cmpxchg(seqaddr
, oldval64
, newval64
, release
)); 
 492                  __psynch_cvclrprepost(mutex
, lgenval
, ugenval
, 0, 0, lgenval
, (flags 
| _PTHREAD_MTX_OPT_MUTEX
)); 
 502                 *pmtxp 
= (uint32_t *)mutex
; 
 504         if (flagsp 
!= NULL
) { 
 513 __mtx_droplock(_pthread_mutex 
*mutex
, uint32_t *flagsp
, uint32_t **pmtxp
, uint32_t *mgenp
, uint32_t *ugenp
) 
 515         return _pthread_mutex_unlock_updatebits(mutex
, flagsp
, pmtxp
, mgenp
, ugenp
); 
 518 PTHREAD_ALWAYS_INLINE
 
 520 _pthread_mutex_lock_updatebits(_pthread_mutex 
*mutex
, uint64_t selfid
) 
 523         int firstfit 
= (mutex
->mtxopts
.options
.policy 
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
); 
 526         uint32_t lgenval
, ugenval
; 
 527         uint64_t oldval64
, newval64
; 
 528         volatile uint64_t *seqaddr
; 
 529         MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
 531         volatile uint64_t *tidaddr
; 
 532         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 538                         lgenval 
= (uint32_t)oldval64
; 
 539                         ugenval 
= (uint32_t)(oldval64 
>> 32); 
 541                         // E bit was set on first pass through the loop but is no longer 
 542                         // set. Apparently we spin until it arrives. 
 543                         // XXX: verify this is desired behavior. 
 544                 } while (isebit 
&& (lgenval 
& PTH_RWL_EBIT
) == 0); 
 547                         // first fit mutex now has the E bit set. Return 1. 
 553                         isebit 
= (lgenval 
& PTH_RWL_EBIT
) != 0; 
 554                 } else if ((lgenval 
& (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) == (PTH_RWL_KBIT
|PTH_RWL_EBIT
)) { 
 555                         // fairshare mutex and the bits are already set, just update tid 
 559                 // either first fit or no E bit set 
 561                 lgenval 
|= PTH_RWL_KBIT 
| PTH_RWL_EBIT
; 
 563                 newval64 
= (((uint64_t)ugenval
) << 32); 
 567                 // Retry if CAS fails, or if it succeeds with firstfit and E bit already set 
 568         } while (!os_atomic_cmpxchg(seqaddr
, oldval64
, newval64
, acquire
) || (firstfit 
&& isebit
)); 
 571                 if (!os_atomic_cmpxchg(tidaddr
, oldtid
, selfid
, relaxed
)) { 
 572                         // we own this mutex, nobody should be updating it except us 
 573                         return _pthread_mutex_corruption_abort(mutex
); 
 582 __mtx_markprepost(_pthread_mutex 
*mutex
, uint32_t updateval
, int firstfit
) 
 585         uint32_t lgenval
, ugenval
; 
 586         uint64_t oldval64
, newval64
; 
 588         volatile uint64_t *seqaddr
; 
 589         MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
 591         if (firstfit 
!= 0 && (updateval 
& PTH_RWL_PBIT
) != 0) { 
 596                         flags 
= mutex
->mtxopts
.value
; 
 599                         lgenval 
= (uint32_t)oldval64
; 
 600                         ugenval 
= (uint32_t)(oldval64 
>> 32); 
 602                         /* update the bits */ 
 603                         if ((lgenval 
& PTHRW_COUNT_MASK
) == (ugenval 
& PTHRW_COUNT_MASK
)) { 
 605                                 lgenval 
&= ~PTH_RWL_PBIT
; 
 607                                 lgenval 
|= PTH_RWL_PBIT
; 
 609                         newval64 
= (((uint64_t)ugenval
) << 32); 
 611                 } while (!os_atomic_cmpxchg(seqaddr
, oldval64
, newval64
, release
)); 
 613                 if (clearprepost 
!= 0) { 
 614                         __psynch_cvclrprepost(mutex
, lgenval
, ugenval
, 0, 0, lgenval
, (flags 
| _PTHREAD_MTX_OPT_MUTEX
)); 
 622 _pthread_mutex_check_init_slow(pthread_mutex_t 
*omutex
) 
 625         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 627         if (_pthread_mutex_check_signature_init(mutex
)) { 
 628                 _PTHREAD_LOCK(mutex
->lock
); 
 629                 if (_pthread_mutex_check_signature_init(mutex
)) { 
 630                         // initialize a statically initialized mutex to provide 
 631                         // compatibility for misbehaving applications. 
 632                         // (unlock should not be the first operation on a mutex) 
 633                         res 
= _pthread_mutex_init(mutex
, NULL
, (mutex
->sig 
& 0xf)); 
 634                 } else if (_pthread_mutex_check_signature(mutex
)) { 
 637                 _PTHREAD_UNLOCK(mutex
->lock
); 
 638         } else if (_pthread_mutex_check_signature(mutex
)) { 
 642                 PLOCKSTAT_MUTEX_ERROR(omutex
, res
); 
 647 PTHREAD_ALWAYS_INLINE
 
 649 _pthread_mutex_check_init(pthread_mutex_t 
*omutex
) 
 652         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 654         if (!_pthread_mutex_check_signature(mutex
)) { 
 655                 return _pthread_mutex_check_init_slow(omutex
); 
 662 _pthread_mutex_lock_wait(pthread_mutex_t 
*omutex
, uint64_t newval64
, uint64_t oldtid
) 
 664         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 665         uint32_t lgenval 
= (uint32_t)newval64
; 
 666         uint32_t ugenval 
= (uint32_t)(newval64 
>> 32); 
 668         volatile uint64_t *tidaddr
; 
 669         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 670         uint64_t selfid 
= _pthread_selfid_direct(); 
 672         PLOCKSTAT_MUTEX_BLOCK(omutex
); 
 676                         updateval 
= __psynch_mutexwait(omutex
, lgenval
, ugenval
, oldtid
, mutex
->mtxopts
.value
); 
 678                 } while (updateval 
== (uint32_t)-1); 
 680                 // returns 0 on succesful update; in firstfit it may fail with 1 
 681         } while (_pthread_mutex_lock_updatebits(mutex
, selfid
) == 1); 
 682         PLOCKSTAT_MUTEX_BLOCKED(omutex
, BLOCK_SUCCESS_PLOCKSTAT
); 
 688 _pthread_mutex_lock_slow(pthread_mutex_t 
*omutex
, bool trylock
) 
 691         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 693         res 
= _pthread_mutex_check_init(omutex
); 
 699         volatile uint64_t *tidaddr
; 
 700         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 701         uint64_t selfid 
= _pthread_selfid_direct(); 
 703         if (mutex
->mtxopts
.options
.type 
!= PTHREAD_MUTEX_NORMAL
) { 
 704                 if (*tidaddr 
== selfid
) { 
 705                         if (mutex
->mtxopts
.options
.type 
== PTHREAD_MUTEX_RECURSIVE
) { 
 706                                 if (mutex
->mtxopts
.options
.lock_count 
< USHRT_MAX
) { 
 707                                         mutex
->mtxopts
.options
.lock_count
++; 
 708                                         PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 1, 0); 
 712                                         PLOCKSTAT_MUTEX_ERROR(omutex
, res
); 
 714                         } else if (trylock
) { /* PTHREAD_MUTEX_ERRORCHECK */ 
 715                                 // <rdar://problem/16261552> as per OpenGroup, trylock cannot 
 716                                 // return EDEADLK on a deadlock, it should return EBUSY. 
 718                                 PLOCKSTAT_MUTEX_ERROR(omutex
, res
); 
 719                         } else  { /* PTHREAD_MUTEX_ERRORCHECK */ 
 721                                 PLOCKSTAT_MUTEX_ERROR(omutex
, res
); 
 727         uint64_t oldval64
, newval64
; 
 728         volatile uint64_t *seqaddr
; 
 729         MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
 731         uint32_t lgenval
, ugenval
; 
 732         bool gotlock 
= false; 
 737                 lgenval 
= (uint32_t)oldval64
; 
 738                 ugenval 
= (uint32_t)(oldval64 
>> 32); 
 740                 gotlock 
= ((lgenval 
& PTH_RWL_EBIT
) == 0); 
 742                 if (trylock 
&& !gotlock
) { 
 743                         // A trylock on a held lock will fail immediately. But since 
 744                         // we did not load the sequence words atomically, perform a 
 745                         // no-op CAS64 to ensure that nobody has unlocked concurrently. 
 747                         // Increment the lock sequence number and force the lock into E+K 
 748                         // mode, whether "gotlock" is true or not. 
 749                         lgenval 
+= PTHRW_INC
; 
 750                         lgenval 
|= PTH_RWL_EBIT 
| PTH_RWL_KBIT
; 
 753                 newval64 
= (((uint64_t)ugenval
) << 32); 
 757         } while (!os_atomic_cmpxchg(seqaddr
, oldval64
, newval64
, acquire
)); 
 760                 os_atomic_store(tidaddr
, selfid
, relaxed
); 
 762                 DEBUG_TRACE(psynch_mutex_ulock
, omutex
, lgenval
, ugenval
, selfid
); 
 763                 PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 0, 0); 
 764         } else if (trylock
) { 
 766                 DEBUG_TRACE(psynch_mutex_utrylock_failed
, omutex
, lgenval
, ugenval
, oldtid
); 
 767                 PLOCKSTAT_MUTEX_ERROR(omutex
, res
); 
 769                 res 
= _pthread_mutex_lock_wait(omutex
, newval64
, oldtid
); 
 772         if (res 
== 0 && mutex
->mtxopts
.options
.type 
== PTHREAD_MUTEX_RECURSIVE
) { 
 773                 mutex
->mtxopts
.options
.lock_count 
= 1; 
 776         PLOCKSTAT_MUTEX_ACQUIRE(omutex
, 0, 0); 
 781 #endif // OS_UP_VARIANT_ONLY 
 783 PTHREAD_ALWAYS_INLINE
 
 785 _pthread_mutex_lock(pthread_mutex_t 
*omutex
, bool trylock
) 
 787 #if PLOCKSTAT || DEBUG_TRACE_POINTS 
 788         if (PLOCKSTAT_MUTEX_ACQUIRE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() || 
 789                         DEBUG_TRACE_POINTS
) { 
 790                 return _pthread_mutex_lock_slow(omutex
, trylock
); 
 793         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 794         if (!_pthread_mutex_check_signature_fast(mutex
)) { 
 795                 return _pthread_mutex_lock_slow(omutex
, trylock
); 
 799         volatile uint64_t *tidaddr
; 
 800         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 801         uint64_t selfid 
= _pthread_selfid_direct(); 
 803         uint64_t oldval64
, newval64
; 
 804         volatile uint64_t *seqaddr
; 
 805         MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
 807         uint32_t lgenval
, ugenval
; 
 808         bool gotlock 
= false; 
 813                 lgenval 
= (uint32_t)oldval64
; 
 814                 ugenval 
= (uint32_t)(oldval64 
>> 32); 
 816                 gotlock 
= ((lgenval 
& PTH_RWL_EBIT
) == 0); 
 818                 if (trylock 
&& !gotlock
) { 
 819                         // A trylock on a held lock will fail immediately. But since 
 820                         // we did not load the sequence words atomically, perform a 
 821                         // no-op CAS64 to ensure that nobody has unlocked concurrently. 
 823                         // Increment the lock sequence number and force the lock into E+K 
 824                         // mode, whether "gotlock" is true or not. 
 825                         lgenval 
+= PTHRW_INC
; 
 826                         lgenval 
|= PTH_RWL_EBIT 
| PTH_RWL_KBIT
; 
 829                 newval64 
= (((uint64_t)ugenval
) << 32); 
 833         } while (!os_atomic_cmpxchg(seqaddr
, oldval64
, newval64
, acquire
)); 
 835         if (os_fastpath(gotlock
)) { 
 836                 os_atomic_store(tidaddr
, selfid
, relaxed
); 
 838         } else if (trylock
) { 
 841                 return _pthread_mutex_lock_wait(omutex
, newval64
, oldtid
); 
 845 PTHREAD_NOEXPORT_VARIANT
 
 847 pthread_mutex_lock(pthread_mutex_t 
*mutex
) 
 849         return _pthread_mutex_lock(mutex
, false); 
 852 PTHREAD_NOEXPORT_VARIANT
 
 854 pthread_mutex_trylock(pthread_mutex_t 
*mutex
) 
 856         return _pthread_mutex_lock(mutex
, true); 
 859 #ifndef OS_UP_VARIANT_ONLY 
 862  * TODO: Priority inheritance stuff 
 867 _pthread_mutex_unlock_drop(pthread_mutex_t 
*omutex
, uint64_t newval64
, uint32_t flags
) 
 870         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 871         uint32_t lgenval 
= (uint32_t)newval64
; 
 872         uint32_t ugenval 
= (uint32_t)(newval64 
>> 32); 
 875         int firstfit 
= (mutex
->mtxopts
.options
.policy 
== _PTHREAD_MUTEX_POLICY_FIRSTFIT
); 
 876         volatile uint64_t *tidaddr
; 
 877         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 879         updateval 
= __psynch_mutexdrop(omutex
, lgenval
, ugenval
, *tidaddr
, flags
); 
 881         if (updateval 
== (uint32_t)-1) { 
 888                         PTHREAD_ABORT("__p_mutexdrop failed with error %d", res
); 
 891         } else if (firstfit 
== 1) { 
 892                 if ((updateval 
& PTH_RWL_PBIT
) != 0) { 
 893                         __mtx_markprepost(mutex
, updateval
, firstfit
); 
 901 _pthread_mutex_unlock_slow(pthread_mutex_t 
*omutex
) 
 904         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 905         uint32_t mtxgen
, mtxugen
, flags
; 
 907         // Initialize static mutexes for compatibility with misbehaving 
 908         // applications (unlock should not be the first operation on a mutex). 
 909         res 
= _pthread_mutex_check_init(omutex
); 
 914         res 
= _pthread_mutex_unlock_updatebits(mutex
, &flags
, NULL
, &mtxgen
, &mtxugen
); 
 919         if ((flags 
& _PTHREAD_MTX_OPT_NOTIFY
) != 0) { 
 921                 newval64 
= (((uint64_t)mtxugen
) << 32); 
 923                 return _pthread_mutex_unlock_drop(omutex
, newval64
, flags
); 
 925                 volatile uint64_t *tidaddr
; 
 926                 MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 927                 DEBUG_TRACE(psynch_mutex_uunlock
, omutex
, mtxgen
, mtxugen
, *tidaddr
); 
 933 #endif // OS_UP_VARIANT_ONLY 
 935 PTHREAD_NOEXPORT_VARIANT
 
 937 pthread_mutex_unlock(pthread_mutex_t 
*omutex
) 
 939 #if PLOCKSTAT || DEBUG_TRACE_POINTS 
 940         if (PLOCKSTAT_MUTEX_RELEASE_ENABLED() || PLOCKSTAT_MUTEX_ERROR_ENABLED() || 
 941                         DEBUG_TRACE_POINTS
) { 
 942                 return _pthread_mutex_unlock_slow(omutex
); 
 945         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
 946         if (!_pthread_mutex_check_signature_fast(mutex
)) { 
 947                 return _pthread_mutex_unlock_slow(omutex
); 
 950         volatile uint64_t *tidaddr
; 
 951         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
 953         uint64_t oldval64
, newval64
; 
 954         volatile uint64_t *seqaddr
; 
 955         MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
 957         uint32_t lgenval
, ugenval
; 
 961                 lgenval 
= (uint32_t)oldval64
; 
 962                 ugenval 
= (uint32_t)(oldval64 
>> 32); 
 964                 int numwaiters 
= diff_genseq(lgenval
, ugenval
); // pending waiters 
 966                 if (numwaiters 
== 0) { 
 967                         // spurious unlock; do not touch tid 
 969                         ugenval 
+= PTHRW_INC
; 
 971                         if ((lgenval 
& PTHRW_COUNT_MASK
) == (ugenval 
& PTHRW_COUNT_MASK
)) { 
 972                                 // our unlock sequence matches to lock sequence, so if the CAS is successful, the mutex is unlocked 
 974                                 /* do not reset Ibit, just K&E */ 
 975                                 lgenval 
&= ~(PTH_RWL_KBIT 
| PTH_RWL_EBIT
); 
 977                                 return _pthread_mutex_unlock_slow(omutex
); 
 980                         // We're giving up the mutex one way or the other, so go ahead and update the owner 
 981                         // to 0 so that once the CAS below succeeds, there is no stale ownership information. 
 982                         // If the CAS of the seqaddr fails, we may loop, but it's still valid for the owner 
 984                         os_atomic_store(tidaddr
, 0, relaxed
); 
 987                 newval64 
= (((uint64_t)ugenval
) << 32); 
 990         } while (!os_atomic_cmpxchg(seqaddr
, oldval64
, newval64
, release
)); 
 995 #ifndef OS_UP_VARIANT_ONLY 
 999 _pthread_mutex_init(_pthread_mutex 
*mutex
, const pthread_mutexattr_t 
*attr
, 
1000                 uint32_t static_type
) 
1002         mutex
->mtxopts
.value 
= 0; 
1003         mutex
->mtxopts
.options
.mutex 
= 1; 
1005                 if (attr
->sig 
!= _PTHREAD_MUTEX_ATTR_SIG
) { 
1008                 mutex
->prioceiling 
= attr
->prioceiling
; 
1009                 mutex
->mtxopts
.options
.protocol 
= attr
->protocol
; 
1010                 mutex
->mtxopts
.options
.policy 
= attr
->policy
; 
1011                 mutex
->mtxopts
.options
.type 
= attr
->type
; 
1012                 mutex
->mtxopts
.options
.pshared 
= attr
->pshared
; 
1014                 switch (static_type
) { 
1016                                 mutex
->mtxopts
.options
.type 
= PTHREAD_MUTEX_ERRORCHECK
; 
1019                                 mutex
->mtxopts
.options
.type 
= PTHREAD_MUTEX_RECURSIVE
; 
1022                                 /* firstfit fall thru */ 
1024                                 mutex
->mtxopts
.options
.type 
= PTHREAD_MUTEX_DEFAULT
; 
1030                 mutex
->prioceiling 
= _PTHREAD_DEFAULT_PRIOCEILING
; 
1031                 mutex
->mtxopts
.options
.protocol 
= _PTHREAD_DEFAULT_PROTOCOL
; 
1032                 if (static_type 
!= 3) { 
1033                         mutex
->mtxopts
.options
.policy 
= _PTHREAD_MUTEX_POLICY_FAIRSHARE
; 
1035                         mutex
->mtxopts
.options
.policy 
= _PTHREAD_MUTEX_POLICY_FIRSTFIT
; 
1037                 mutex
->mtxopts
.options
.pshared 
= _PTHREAD_DEFAULT_PSHARED
; 
1039         mutex
->priority 
= 0; 
1041         volatile uint64_t *seqaddr
; 
1042         MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
1043         volatile uint64_t *tidaddr
; 
1044         MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
1045 #if PTHREAD_MUTEX_INIT_UNUSED 
1046         if ((uint32_t*)tidaddr 
!= mutex
->m_tid
) { 
1047                 mutex
->mtxopts
.options
.misalign 
= 1; 
1048                 __builtin_memset(mutex
->m_tid
, 0xff, sizeof(mutex
->m_tid
)); 
1050         __builtin_memset(mutex
->m_mis
, 0xff, sizeof(mutex
->m_mis
)); 
1051 #endif // PTHREAD_MUTEX_INIT_UNUSED 
1055         long sig 
= _PTHREAD_MUTEX_SIG
; 
1056         if (mutex
->mtxopts
.options
.type 
== PTHREAD_MUTEX_NORMAL 
&& 
1057                         mutex
->mtxopts
.options
.policy 
== _PTHREAD_MUTEX_POLICY_FAIRSHARE
) { 
1058                 // rdar://18148854 _pthread_mutex_lock & pthread_mutex_unlock fastpath 
1059                 sig 
= _PTHREAD_MUTEX_SIG_fast
; 
1062 #if PTHREAD_MUTEX_INIT_UNUSED 
1063         // For detecting copied mutexes and smashes during debugging 
1064         uint32_t sig32 
= (uint32_t)sig
; 
1065 #if defined(__LP64__) 
1066         uintptr_t guard 
=  ~(uintptr_t)mutex
; // use ~ to hide from leaks 
1067         __builtin_memcpy(mutex
->_reserved
, &guard
, sizeof(guard
)); 
1068         mutex
->_reserved
[2] = sig32
; 
1069         mutex
->_reserved
[3] = sig32
; 
1070         mutex
->_pad 
= sig32
; 
1072         mutex
->_reserved
[0] = sig32
; 
1074 #endif // PTHREAD_MUTEX_INIT_UNUSED 
1076         // Ensure all contents are properly set before setting signature. 
1077 #if defined(__LP64__) 
1078         // For binary compatibility reasons we cannot require natural alignment of 
1079         // the 64bit 'sig' long value in the struct. rdar://problem/21610439 
1080         uint32_t *sig32_ptr 
= (uint32_t*)&mutex
->sig
; 
1081         uint32_t *sig32_val 
= (uint32_t*)&sig
; 
1082         *(sig32_ptr
+1) = *(sig32_val
+1); 
1083         os_atomic_store(sig32_ptr
, *sig32_val
, release
); 
1085         os_atomic_store2o(mutex
, sig
, sig
, release
); 
1092 pthread_mutex_destroy(pthread_mutex_t 
*omutex
) 
1094         _pthread_mutex 
*mutex 
= (_pthread_mutex 
*)omutex
; 
1098         _PTHREAD_LOCK(mutex
->lock
); 
1099         if (_pthread_mutex_check_signature(mutex
)) { 
1100                 uint32_t lgenval
, ugenval
; 
1102                 volatile uint64_t *seqaddr
; 
1103                 MUTEX_GETSEQ_ADDR(mutex
, &seqaddr
); 
1104                 volatile uint64_t *tidaddr
; 
1105                 MUTEX_GETTID_ADDR(mutex
, &tidaddr
); 
1107                 oldval64 
= *seqaddr
; 
1108                 lgenval 
= (uint32_t)oldval64
; 
1109                 ugenval 
= (uint32_t)(oldval64 
>> 32); 
1110                 if ((*tidaddr 
== (uint64_t)0) && 
1111                     ((lgenval 
& PTHRW_COUNT_MASK
) == (ugenval 
& PTHRW_COUNT_MASK
))) { 
1112                         mutex
->sig 
= _PTHREAD_NO_SIG
; 
1117         } else if (_pthread_mutex_check_signature_init(mutex
)) { 
1118                 mutex
->sig 
= _PTHREAD_NO_SIG
; 
1121         _PTHREAD_UNLOCK(mutex
->lock
); 
1126 #endif // OS_UP_VARIANT_ONLY 
1128 #endif /* !BUILDING_VARIANT ] */ 
1130 #ifndef OS_UP_VARIANT_ONLY 
1132  * Destroy a mutex attribute structure. 
1135 pthread_mutexattr_destroy(pthread_mutexattr_t 
*attr
) 
1138         if (__unix_conforming 
== 0) { 
1139                 __unix_conforming 
= 1; 
1141         if (attr
->sig 
!= _PTHREAD_MUTEX_ATTR_SIG
) { 
1144 #endif /* __DARWIN_UNIX03 */ 
1146         attr
->sig 
= _PTHREAD_NO_SIG
; 
1150 #endif // OS_UP_VARIANT_ONLY