2  * Copyright (c) 2013 Apple Inc. All rights reserved. 
   4  * @APPLE_APACHE_LICENSE_HEADER_START@ 
   6  * Licensed under the Apache License, Version 2.0 (the "License"); 
   7  * you may not use this file except in compliance with the License. 
   8  * You may obtain a copy of the License at 
  10  *     http://www.apache.org/licenses/LICENSE-2.0 
  12  * Unless required by applicable law or agreed to in writing, software 
  13  * distributed under the License is distributed on an "AS IS" BASIS, 
  14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  15  * See the License for the specific language governing permissions and 
  16  * limitations under the License. 
  18  * @APPLE_APACHE_LICENSE_HEADER_END@ 
  21 #define OS_UNFAIR_LOCK_INLINE 1 
  22 #include "lock_internal.h" 
  23 #include "os/internal.h" 
  25 #include "libkern/OSAtomic.h" 
  27 #include "os/lock_private.h" 
  28 #include "os/once_private.h" 
  30 #include <mach/mach_init.h> 
  31 #include <mach/mach_traps.h> 
  32 #include <mach/thread_switch.h> 
  33 #include <mach/mach_time.h> 
  37 #pragma mark _os_lock_base_t 
  39 OS_NOINLINE OS_NORETURN OS_COLD
 
  40 void _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
); 
  43 OS_LOCK_STRUCT_DECL_INTERNAL(base
); 
  44 OS_USED 
static OS_LOCK_TYPE_STRUCT_DECL(base
); 
  47 os_lock_lock(os_lock_t l
) 
  49         return l
._osl_base
->osl_type
->osl_lock(l
); 
  53 os_lock_trylock(os_lock_t l
) 
  55         return l
._osl_base
->osl_type
->osl_trylock(l
); 
  59 os_lock_unlock(os_lock_t l
) 
  61         return l
._osl_base
->osl_type
->osl_unlock(l
); 
  64 OS_NOINLINE OS_NORETURN OS_COLD
 
  66 _os_lock_corruption_abort(void *lock_ptr OS_UNUSED
, uintptr_t lock_value
) 
  68         __LIBPLATFORM_CLIENT_CRASH__(lock_value
, "os_lock is corrupt"); 
  73 #pragma mark OSSpinLock 
  75 OS_NOEXPORT OS_NOINLINE 
void _OSSpinLockLockSlow(volatile OSSpinLock 
*l
); 
  77 OS_ATOMIC_EXPORT 
void OSSpinLockLock(volatile OSSpinLock 
*l
); 
  78 OS_ATOMIC_EXPORT 
bool OSSpinLockTry(volatile OSSpinLock 
*l
); 
  79 OS_ATOMIC_EXPORT 
int spin_lock_try(volatile OSSpinLock 
*l
); 
  80 OS_ATOMIC_EXPORT 
void OSSpinLockUnlock(volatile OSSpinLock 
*l
); 
  82 #if TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR 
  83 static const OSSpinLock _OSSpinLockLocked 
= 1; 
  85 static const OSSpinLock _OSSpinLockLocked 
= -1; 
  93 #define OS_LOCK_SPIN_SPIN_TRIES 1000 
  94 #define OS_LOCK_SPIN_PAUSE() os_hardware_pause() 
  99 _os_lock_yield_deadline(mach_msg_timeout_t timeout
) 
 101         uint64_t abstime 
= timeout 
* NSEC_PER_MSEC
; 
 102 #if !(defined(__i386__) || defined(__x86_64__)) 
 103         mach_timebase_info_data_t tbi
; 
 104         kern_return_t kr 
= mach_timebase_info(&tbi
); 
 105         if (kr
) return UINT64_MAX
; 
 106         abstime 
*= tbi
.denom
; 
 107         abstime 
/= tbi
.numer
; 
 109         return mach_absolute_time() + abstime
; 
 114 _os_lock_yield_until(uint64_t deadline
) 
 116         return mach_absolute_time() < deadline
; 
 121 _OSSpinLockLockYield(volatile OSSpinLock 
*l
) 
 123         int option 
= SWITCH_OPTION_DEPRESS
; 
 124         mach_msg_timeout_t timeout 
= 1; 
 125         uint64_t deadline 
= _os_lock_yield_deadline(timeout
); 
 127         while (unlikely(lock 
= *l
)) { 
 129                 if (unlikely(lock 
!= _OSSpinLockLocked
)) { 
 130                         _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
); 
 132                 thread_switch(MACH_PORT_NULL
, option
, timeout
); 
 133                 if (option 
== SWITCH_OPTION_WAIT
) { 
 135                 } else if (!_os_lock_yield_until(deadline
)) { 
 136                         option 
= SWITCH_OPTION_WAIT
; 
 139         bool r 
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
); 
 140         if (likely(r
)) return; 
 146 _OSSpinLockLockSlow(volatile OSSpinLock 
*l
) 
 148         return _OSSpinLockLockYield(l
); // Don't spin on UP 
 150 #else // !OS_ATOMIC_UP 
 152 _OSSpinLockLockSlow(volatile OSSpinLock 
*l
) 
 154         uint32_t tries 
= OS_LOCK_SPIN_SPIN_TRIES
; 
 156         while (unlikely(lock 
= *l
)) { 
 158                 if (unlikely(lock 
!= _OSSpinLockLocked
)) { 
 159                         return _os_lock_corruption_abort((void *)l
, (uintptr_t)lock
); 
 161                 if (unlikely(!tries
--)) return _OSSpinLockLockYield(l
); 
 162                 OS_LOCK_SPIN_PAUSE(); 
 164         bool r 
= os_atomic_cmpxchgv(l
, 0, _OSSpinLockLocked
, &lock
, acquire
); 
 165         if (likely(r
)) return; 
 168 #endif // !OS_ATOMIC_UP 
 172 #if OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK && !TARGET_OS_SIMULATOR 
 174 typedef struct _os_nospin_lock_s 
*_os_nospin_lock_t
; 
 176 OS_ATOMIC_EXPORT 
void _os_nospin_lock_lock(_os_nospin_lock_t lock
); 
 177 OS_ATOMIC_EXPORT 
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
); 
 178 OS_ATOMIC_EXPORT 
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
); 
 181 OSSpinLockLock(volatile OSSpinLock 
*l
) 
 183         OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
); 
 184         OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
); 
 185         return _os_nospin_lock_lock((_os_nospin_lock_t
)l
); 
 189 OSSpinLockTry(volatile OSSpinLock 
*l
) 
 191         return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
); 
 195 spin_lock_try(volatile OSSpinLock 
*l
) 
 197         OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
); 
 198         return _os_nospin_lock_trylock((_os_nospin_lock_t
)l
); 
 202 OSSpinLockUnlock(volatile OSSpinLock 
*l
) 
 204         OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
); 
 205         OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
); 
 206         return _os_nospin_lock_unlock((_os_nospin_lock_t
)l
); 
 209 #undef OS_ATOMIC_ALIAS 
 210 #define OS_ATOMIC_ALIAS(n, o) 
 211 static void _OSSpinLockLock(volatile OSSpinLock 
*l
); 
 212 #undef OSSpinLockLock 
 213 #define OSSpinLockLock _OSSpinLockLock 
 214 static bool _OSSpinLockTry(volatile OSSpinLock 
*l
); 
 216 #define OSSpinLockTry _OSSpinLockTry 
 217 static __unused 
int __spin_lock_try(volatile OSSpinLock 
*l
); 
 219 #define spin_lock_try __spin_lock_try 
 220 static void _OSSpinLockUnlock(volatile OSSpinLock 
*l
); 
 221 #undef OSSpinLockUnlock 
 222 #define OSSpinLockUnlock _OSSpinLockUnlock 
 224 #endif // OS_LOCK_OSSPINLOCK_IS_NOSPINLOCK 
 227 OSSpinLockLock(volatile OSSpinLock 
*l
) 
 229         OS_ATOMIC_ALIAS(spin_lock
, OSSpinLockLock
); 
 230         OS_ATOMIC_ALIAS(_spin_lock
, OSSpinLockLock
); 
 231         bool r 
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
); 
 232         if (likely(r
)) return; 
 233         return _OSSpinLockLockSlow(l
); 
 237 OSSpinLockTry(volatile OSSpinLock 
*l
) 
 239         bool r 
= os_atomic_cmpxchg(l
, 0, _OSSpinLockLocked
, acquire
); 
 244 spin_lock_try(volatile OSSpinLock 
*l
) // <rdar://problem/13316060> 
 246         OS_ATOMIC_ALIAS(_spin_lock_try
, spin_lock_try
); 
 247         return OSSpinLockTry(l
); 
 251 OSSpinLockUnlock(volatile OSSpinLock 
*l
) 
 253         OS_ATOMIC_ALIAS(spin_unlock
, OSSpinLockUnlock
); 
 254         OS_ATOMIC_ALIAS(_spin_unlock
, OSSpinLockUnlock
); 
 255         os_atomic_store(l
, 0, release
); 
 260 #pragma mark os_lock_spin_t 
 262 OS_LOCK_STRUCT_DECL_INTERNAL(spin
, 
 263         OSSpinLock 
volatile osl_spinlock
; 
 265 OS_LOCK_METHODS_DECL(spin
); 
 266 OS_LOCK_TYPE_INSTANCE(spin
); 
 269 _os_lock_spin_lock(_os_lock_spin_t l
) 
 271         return OSSpinLockLock(&l
->osl_spinlock
); 
 275 _os_lock_spin_trylock(_os_lock_spin_t l
) 
 277         return OSSpinLockTry(&l
->osl_spinlock
); 
 281 _os_lock_spin_unlock(_os_lock_spin_t l
) 
 283         return OSSpinLockUnlock(&l
->osl_spinlock
); 
 288 #pragma mark os_lock_owner_t 
 290 #ifndef __TSD_MACH_THREAD_SELF 
 291 #define __TSD_MACH_THREAD_SELF 3 
 294 typedef mach_port_name_t os_lock_owner_t
; 
 295 #define OS_LOCK_NO_OWNER MACH_PORT_NULL 
 298 OS_ALWAYS_INLINE OS_CONST
 
 299 static inline os_lock_owner_t
 
 300 _os_lock_owner_get_self(void) 
 302         os_lock_owner_t self
; 
 303         self 
= (os_lock_owner_t
)_os_tsd_get_direct(__TSD_MACH_THREAD_SELF
); 
 308 OS_NOINLINE OS_NORETURN OS_COLD
 
 310 _os_lock_recursive_abort(os_lock_owner_t owner
) 
 312         __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an " 
 318 #pragma mark os_lock_handoff_t 
 320 OS_LOCK_STRUCT_DECL_INTERNAL(handoff
, 
 321         os_lock_owner_t 
volatile osl_owner
; 
 323 OS_LOCK_METHODS_DECL(handoff
); 
 324 OS_LOCK_TYPE_INSTANCE(handoff
); 
 326 #define OS_LOCK_HANDOFF_YIELD_TRIES 100 
 330 _os_lock_handoff_lock_slow(_os_lock_handoff_t l
) 
 332         int option 
= SWITCH_OPTION_OSLOCK_DEPRESS
; 
 333         mach_msg_timeout_t timeout 
= 1; 
 334         uint32_t tries 
= OS_LOCK_HANDOFF_YIELD_TRIES
; 
 335         os_lock_owner_t self 
= _os_lock_owner_get_self(), owner
; 
 336         while (unlikely(owner 
= l
->osl_owner
)) { 
 338                 if (unlikely(owner 
== self
)) return _os_lock_recursive_abort(self
); 
 339                 // Yield until tries first hits zero, then permanently switch to wait 
 340                 if (unlikely(!tries
--)) option 
= SWITCH_OPTION_OSLOCK_WAIT
; 
 341                 thread_switch(owner
, option
, timeout
); 
 342                 // Redrive the handoff every 1ms until switching to wait 
 343                 if (option 
== SWITCH_OPTION_OSLOCK_WAIT
) timeout
++; 
 345         bool r 
= os_atomic_cmpxchgv2o(l
, osl_owner
, MACH_PORT_NULL
, self
, &owner
, 
 347         if (likely(r
)) return; 
 352 _os_lock_handoff_lock(_os_lock_handoff_t l
) 
 354         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 355         bool r 
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
); 
 356         if (likely(r
)) return; 
 357         return _os_lock_handoff_lock_slow(l
); 
 361 _os_lock_handoff_trylock(_os_lock_handoff_t l
) 
 363         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 364         bool r 
= os_atomic_cmpxchg2o(l
, osl_owner
, MACH_PORT_NULL
, self
, acquire
); 
 369 _os_lock_handoff_unlock(_os_lock_handoff_t l
) 
 371         os_atomic_store2o(l
, osl_owner
, MACH_PORT_NULL
, release
); 
 376 #pragma mark os_ulock_value_t 
 378 #include <sys/errno.h> 
 379 #include <sys/ulock.h> 
 381 typedef os_lock_owner_t os_ulock_value_t
; 
 383 // This assumes that all thread mach port values always have the low bit set! 
 384 // Clearing this bit is used to communicate the existence of waiters to unlock. 
 385 #define OS_ULOCK_NOWAITERS_BIT ((os_ulock_value_t)1u) 
 386 #define OS_ULOCK_OWNER(value) ((value) | OS_ULOCK_NOWAITERS_BIT) 
 388 #define OS_ULOCK_ANONYMOUS_OWNER MACH_PORT_DEAD 
 389 #define OS_ULOCK_IS_OWNER(value, self, allow_anonymous_owner) ({ \ 
 390                 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner == (self)) && \ 
 391                 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); }) 
 392 #define OS_ULOCK_IS_NOT_OWNER(value, self, allow_anonymous_owner) ({ \ 
 393                 os_lock_owner_t _owner = OS_ULOCK_OWNER(value); (_owner != (self)) && \ 
 394                 (!(allow_anonymous_owner) || _owner != OS_ULOCK_ANONYMOUS_OWNER); }) 
 397 #pragma mark os_unfair_lock 
 399 typedef struct _os_unfair_lock_s 
{ 
 400         os_ulock_value_t oul_value
; 
 401 } *_os_unfair_lock_t
; 
 403 _Static_assert(sizeof(struct os_unfair_lock_s
) == 
 404                 sizeof(struct _os_unfair_lock_s
), "os_unfair_lock size mismatch"); 
 406 OS_ATOMIC_EXPORT 
void os_unfair_lock_lock(os_unfair_lock_t lock
); 
 407 OS_ATOMIC_EXPORT 
void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
, 
 408                 os_unfair_lock_options_t options
); 
 409 OS_ATOMIC_EXPORT 
bool os_unfair_lock_trylock(os_unfair_lock_t lock
); 
 410 OS_ATOMIC_EXPORT 
void os_unfair_lock_unlock(os_unfair_lock_t lock
); 
 412 OS_ATOMIC_EXPORT 
void os_unfair_lock_lock_no_tsd_4libpthread( 
 413                 os_unfair_lock_t lock
); 
 414 OS_ATOMIC_EXPORT 
void os_unfair_lock_unlock_no_tsd_4libpthread( 
 415                 os_unfair_lock_t lock
); 
 416 OS_ATOMIC_EXPORT 
void os_unfair_lock_lock_with_options_4Libc( 
 417                 os_unfair_lock_t lock
, os_unfair_lock_options_t options
); 
 418 OS_ATOMIC_EXPORT 
void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock
); 
 420 OS_NOINLINE OS_NORETURN OS_COLD
 
 421 void _os_unfair_lock_recursive_abort(os_lock_owner_t owner
); 
 422 OS_NOINLINE OS_NORETURN OS_COLD
 
 423 void _os_unfair_lock_unowned_abort(os_lock_owner_t owner
); 
 424 OS_NOINLINE OS_NORETURN OS_COLD
 
 425 void _os_unfair_lock_corruption_abort(os_ulock_value_t current
); 
 427 _Static_assert(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION 
== 
 428                 ULF_WAIT_WORKQ_DATA_CONTENTION
, 
 429                 "check value for OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION"); 
 430 _Static_assert(OS_UNFAIR_LOCK_ADAPTIVE_SPIN 
== 
 431                 ULF_WAIT_ADAPTIVE_SPIN
, 
 432                 "check value for OS_UNFAIR_LOCK_ADAPTIVE_SPIN"); 
 433 #define OS_UNFAIR_LOCK_OPTIONS_MASK \ 
 434                 (os_unfair_lock_options_t)(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION | \ 
 435                                 OS_UNFAIR_LOCK_ADAPTIVE_SPIN) 
 436 #define OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER 0x01000000u 
 439 OS_NOINLINE OS_NORETURN OS_COLD
 
 441 _os_unfair_lock_recursive_abort(os_lock_owner_t owner
) 
 443         __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an " 
 447 OS_NOINLINE OS_NORETURN OS_COLD
 
 449 _os_unfair_lock_unowned_abort(os_lock_owner_t owner
) 
 451         __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_unfair_lock not " 
 452                         "owned by current thread"); 
 455 OS_NOINLINE OS_NORETURN OS_COLD
 
 457 _os_unfair_lock_corruption_abort(os_ulock_value_t current
) 
 459         __LIBPLATFORM_CLIENT_CRASH__(current
, "os_unfair_lock is corrupt"); 
 465 _os_unfair_lock_lock_slow(_os_unfair_lock_t l
, os_lock_owner_t self
, 
 466                 os_unfair_lock_options_t options
) 
 468         os_unfair_lock_options_t allow_anonymous_owner 
= 
 469                         options 
& OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
; 
 470         options 
&= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
; 
 471         if (unlikely(options 
& ~OS_UNFAIR_LOCK_OPTIONS_MASK
)) { 
 472                 __LIBPLATFORM_CLIENT_CRASH__(options
, "Invalid options"); 
 474         os_ulock_value_t current
, new, waiters_mask 
= 0; 
 475         while (unlikely((current 
= os_atomic_load2o(l
, oul_value
, relaxed
)) != 
 478                 if (unlikely(OS_ULOCK_IS_OWNER(current
, self
, allow_anonymous_owner
))) { 
 479                         return _os_unfair_lock_recursive_abort(self
); 
 481                 new = current 
& ~OS_ULOCK_NOWAITERS_BIT
; 
 482                 if (current 
!= new) { 
 483                         // Clear nowaiters bit in lock value before waiting 
 484                         if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
, 
 490                 int ret 
= __ulock_wait(UL_UNFAIR_LOCK 
| ULF_NO_ERRNO 
| options
, 
 492                 if (unlikely(ret 
< 0)) { 
 498                                 _os_unfair_lock_corruption_abort(current
); 
 501                                 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure"); 
 505                         // If there are more waiters, unset nowaiters bit when acquiring lock 
 506                         waiters_mask 
= OS_ULOCK_NOWAITERS_BIT
; 
 509         new = self 
& ~waiters_mask
; 
 510         bool r 
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new, 
 512         if (unlikely(!r
)) goto _retry
; 
 517 _os_unfair_lock_unlock_slow(_os_unfair_lock_t l
, os_ulock_value_t current
, 
 518                 os_lock_owner_t self
, os_unfair_lock_options_t options
) 
 520         os_unfair_lock_options_t allow_anonymous_owner 
= 
 521                         options 
& OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
; 
 522         options 
&= ~OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
; 
 523         if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, allow_anonymous_owner
))) { 
 524                 return _os_unfair_lock_unowned_abort(OS_ULOCK_OWNER(current
)); 
 526         if (current 
& OS_ULOCK_NOWAITERS_BIT
) { 
 527                 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters"); 
 530                 int ret 
= __ulock_wake(UL_UNFAIR_LOCK 
| ULF_NO_ERRNO
, l
, 0); 
 531                 if (unlikely(ret 
< 0)) { 
 538                                 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure"); 
 546 os_unfair_lock_lock(os_unfair_lock_t lock
) 
 548         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 549         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 550         bool r 
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
); 
 551         if (likely(r
)) return; 
 552         return _os_unfair_lock_lock_slow(l
, self
, OS_UNFAIR_LOCK_NONE
); 
 556 os_unfair_lock_lock_with_options(os_unfair_lock_t lock
, 
 557                 os_unfair_lock_options_t options
) 
 559         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 560         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 561         bool r 
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
); 
 562         if (likely(r
)) return; 
 563         return _os_unfair_lock_lock_slow(l
, self
, options
); 
 567 os_unfair_lock_trylock(os_unfair_lock_t lock
) 
 569         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 570         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 571         bool r 
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
); 
 576 os_unfair_lock_unlock(os_unfair_lock_t lock
) 
 578         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 579         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 580         os_ulock_value_t current
; 
 581         current 
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
); 
 582         if (likely(current 
== self
)) return; 
 583         return _os_unfair_lock_unlock_slow(l
, current
, self
, 0); 
 587 os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock
) 
 589         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 590         os_lock_owner_t self 
= OS_ULOCK_ANONYMOUS_OWNER
; 
 591         bool r 
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
); 
 592         if (likely(r
)) return; 
 593         return _os_unfair_lock_lock_slow(l
, self
, 
 594                         OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
| 
 595                         OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
); 
 599 os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock
) 
 601         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 602         os_lock_owner_t self 
= OS_ULOCK_ANONYMOUS_OWNER
; 
 603         os_ulock_value_t current
; 
 604         current 
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
); 
 605         if (likely(current 
== self
)) return; 
 606         return _os_unfair_lock_unlock_slow(l
, current
, self
, 
 607                         OS_UNFAIR_LOCK_ALLOW_ANONYMOUS_OWNER
); 
 612 os_unfair_lock_assert_owner(os_unfair_lock_t lock
) 
 614         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 615         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 616         os_ulock_value_t current 
= os_atomic_load2o(l
, oul_value
, relaxed
); 
 617         if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, 0))) { 
 618                 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: " 
 619                                 "Lock unexpectedly not owned by current thread"); 
 624 os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
) 
 626         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 627         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 628         os_ulock_value_t current 
= os_atomic_load2o(l
, oul_value
, relaxed
); 
 629         if (unlikely(OS_ULOCK_IS_OWNER(current
, self
, 0))) { 
 630                 __LIBPLATFORM_CLIENT_CRASH__(current
, "Assertion failed: " 
 631                                 "Lock unexpectedly owned by current thread"); 
 637 #pragma mark os_unfair_recursive_lock 
 640 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock
, 
 641                 os_unfair_lock_options_t options
); 
 644 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock
); 
 647 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
); 
 650 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock
); 
 653 void os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock
); 
 656 static inline os_lock_owner_t
 
 657 _os_unfair_lock_owner(os_unfair_lock_t lock
) 
 659         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 660         return OS_ULOCK_OWNER(os_atomic_load(&l
->oul_value
, relaxed
)); 
 665 os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock
) 
 667         return _os_unfair_lock_owner(&lock
->ourl_lock
) == 
 668                         _os_lock_owner_get_self(); 
 673 os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock
, 
 674                 os_unfair_lock_options_t options
) 
 676         os_lock_owner_t cur
, self 
= _os_lock_owner_get_self(); 
 677         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)&lock
->ourl_lock
; 
 679         if (likely(os_atomic_cmpxchgv2o(l
, oul_value
, 
 680                         OS_LOCK_NO_OWNER
, self
, &cur
, acquire
))) { 
 684         if (OS_ULOCK_OWNER(cur
) == self
) { 
 689         return _os_unfair_lock_lock_slow(l
, self
, options
); 
 693 os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock
) 
 695         os_lock_owner_t cur
, self 
= _os_lock_owner_get_self(); 
 696         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)&lock
->ourl_lock
; 
 698         if (likely(os_atomic_cmpxchgv2o(l
, oul_value
, 
 699                         OS_LOCK_NO_OWNER
, self
, &cur
, acquire
))) { 
 703         if (likely(OS_ULOCK_OWNER(cur
) == self
)) { 
 714 _os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
, 
 715                 os_lock_owner_t self
) 
 717         if (unlikely(lock
->ourl_count
)) { 
 718                 os_lock_owner_t cur 
= _os_unfair_lock_owner(&lock
->ourl_lock
); 
 719                 if (unlikely(cur 
!= self
)) { 
 720                         _os_unfair_lock_unowned_abort(cur
); 
 726         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)lock
; 
 727         os_ulock_value_t current
; 
 728         current 
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
); 
 729         if (likely(current 
== self
)) return; 
 730         return _os_unfair_lock_unlock_slow(l
, current
, self
, 0); 
 734 os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
) 
 736         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 737         _os_unfair_recursive_lock_unlock(lock
, self
); 
 741 os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock
) 
 743         os_lock_owner_t cur 
= _os_unfair_lock_owner(&lock
->ourl_lock
); 
 744         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 745         if (likely(cur 
== self
)) { 
 746                 _os_unfair_recursive_lock_unlock(lock
, self
); 
 753 os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock
) 
 755         _os_unfair_lock_t l 
= (_os_unfair_lock_t
)&lock
->ourl_lock
; 
 757         if (os_atomic_load(&l
->oul_value
, relaxed
) == OS_LOCK_NO_OWNER
) { 
 758                 __LIBPLATFORM_CLIENT_CRASH__(0, "Lock was not held"); 
 760         if (lock
->ourl_count
) { 
 761                 os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 763                 os_atomic_store(&l
->oul_value
, self
, relaxed
); 
 765                 os_atomic_store(&l
->oul_value
, OS_LOCK_NO_OWNER
, relaxed
); 
 771 #pragma mark _os_lock_unfair_t 
 773 OS_LOCK_STRUCT_DECL_INTERNAL(unfair
, 
 774         os_unfair_lock osl_unfair_lock
; 
 776 OS_LOCK_METHODS_DECL(unfair
); 
 777 OS_LOCK_TYPE_INSTANCE(unfair
); 
 780 _os_lock_unfair_lock(_os_lock_unfair_t l
) 
 782         return os_unfair_lock_lock(&l
->osl_unfair_lock
); 
 786 _os_lock_unfair_trylock(_os_lock_unfair_t l
) 
 788         return os_unfair_lock_trylock(&l
->osl_unfair_lock
); 
 792 _os_lock_unfair_unlock(_os_lock_unfair_t l
) 
 794         return os_unfair_lock_unlock(&l
->osl_unfair_lock
); 
 799 #pragma mark _os_nospin_lock 
 801 typedef struct _os_nospin_lock_s 
{ 
 802         os_ulock_value_t oul_value
; 
 803 } _os_nospin_lock
, *_os_nospin_lock_t
; 
 805 _Static_assert(sizeof(OSSpinLock
) == 
 806                 sizeof(struct _os_nospin_lock_s
), "os_nospin_lock size mismatch"); 
 808 OS_ATOMIC_EXPORT 
void _os_nospin_lock_lock(_os_nospin_lock_t lock
); 
 809 OS_ATOMIC_EXPORT 
bool _os_nospin_lock_trylock(_os_nospin_lock_t lock
); 
 810 OS_ATOMIC_EXPORT 
void _os_nospin_lock_unlock(_os_nospin_lock_t lock
); 
 815 _os_nospin_lock_lock_slow(_os_nospin_lock_t l
) 
 817         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 818         os_ulock_value_t current
, new, waiters_mask 
= 0; 
 819         uint32_t timeout 
= 1; 
 820         while (unlikely((current 
= os_atomic_load2o(l
, oul_value
, relaxed
)) != 
 823                 new = current 
& ~OS_ULOCK_NOWAITERS_BIT
; 
 824                 // For safer compatibility with OSSpinLock where _OSSpinLockLocked may 
 825                 // be 1, check that new didn't become 0 (unlocked) by clearing this bit 
 826                 if (current 
!= new && new) { 
 827                         // Clear nowaiters bit in lock value before waiting 
 828                         if (!os_atomic_cmpxchgv2o(l
, oul_value
, current
, new, ¤t
, 
 834                 int ret 
= __ulock_wait(UL_COMPARE_AND_WAIT 
| ULF_NO_ERRNO
, l
, current
, 
 836                 if (unlikely(ret 
< 0)) { 
 845                                 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure"); 
 849                         // If there are more waiters, unset nowaiters bit when acquiring lock 
 850                         waiters_mask 
= OS_ULOCK_NOWAITERS_BIT
; 
 853         new = self 
& ~waiters_mask
; 
 854         bool r 
= os_atomic_cmpxchgv2o(l
, oul_value
, OS_LOCK_NO_OWNER
, new, 
 856         if (unlikely(!r
)) goto _retry
; 
 861 _os_nospin_lock_unlock_slow(_os_nospin_lock_t l
, os_ulock_value_t current
) 
 863         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 864         if (unlikely(OS_ULOCK_OWNER(current
) != self
)) { 
 865                 return; // no unowned_abort for drop-in compatibility with OSSpinLock 
 867         if (current 
& OS_ULOCK_NOWAITERS_BIT
) { 
 868                 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters"); 
 871                 int ret 
= __ulock_wake(UL_COMPARE_AND_WAIT 
| ULF_NO_ERRNO
, l
, 0); 
 872                 if (unlikely(ret 
< 0)) { 
 879                                 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure"); 
 887 _os_nospin_lock_lock(_os_nospin_lock_t l
) 
 889         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 890         bool r 
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
); 
 891         if (likely(r
)) return; 
 892         return _os_nospin_lock_lock_slow(l
); 
 896 _os_nospin_lock_trylock(_os_nospin_lock_t l
) 
 898         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 899         bool r 
= os_atomic_cmpxchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, self
, acquire
); 
 904 _os_nospin_lock_unlock(_os_nospin_lock_t l
) 
 906         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
 907         os_ulock_value_t current
; 
 908         current 
= os_atomic_xchg2o(l
, oul_value
, OS_LOCK_NO_OWNER
, release
); 
 909         if (likely(current 
== self
)) return; 
 910         return _os_nospin_lock_unlock_slow(l
, current
); 
 915 #pragma mark _os_lock_nospin_t 
 917 OS_LOCK_STRUCT_DECL_INTERNAL(nospin
, 
 918         _os_nospin_lock osl_nospin_lock
; 
 920 OS_LOCK_METHODS_DECL(nospin
); 
 921 OS_LOCK_TYPE_INSTANCE(nospin
); 
 924 _os_lock_nospin_lock(_os_lock_nospin_t l
) 
 926         return _os_nospin_lock_lock(&l
->osl_nospin_lock
); 
 930 _os_lock_nospin_trylock(_os_lock_nospin_t l
) 
 932         return _os_nospin_lock_trylock(&l
->osl_nospin_lock
); 
 936 _os_lock_nospin_unlock(_os_lock_nospin_t l
) 
 938         return _os_nospin_lock_unlock(&l
->osl_nospin_lock
); 
 943 #pragma mark os_once_t 
 945 typedef struct os_once_gate_s 
{ 
 947                 os_ulock_value_t ogo_lock
; 
 950 } os_once_gate_s
, *os_once_gate_t
; 
 952 #define OS_ONCE_INIT ((uintptr_t)0l) 
 953 #define OS_ONCE_DONE (~(uintptr_t)0l) 
 955 #if defined(__i386__) || defined(__x86_64__) 
 956 #define OS_ONCE_USE_QUIESCENT_COUNTER 0 
 958 #define OS_ONCE_USE_QUIESCENT_COUNTER 1 
 961 OS_ATOMIC_EXPORT 
void _os_once(os_once_t 
*val
, void *ctxt
, os_function_t func
); 
 962 OS_ATOMIC_EXPORT 
void __os_once_reset(os_once_t 
*val
); 
 964 OS_NOINLINE OS_NORETURN OS_COLD
 
 965 void _os_once_gate_recursive_abort(os_lock_owner_t owner
); 
 966 OS_NOINLINE OS_NORETURN OS_COLD
 
 967 void _os_once_gate_unowned_abort(os_lock_owner_t owner
); 
 968 OS_NOINLINE OS_NORETURN OS_COLD
 
 969 void _os_once_gate_corruption_abort(os_ulock_value_t current
); 
 972 OS_NOINLINE OS_NORETURN OS_COLD
 
 974 _os_once_gate_recursive_abort(os_lock_owner_t owner
) 
 976         __LIBPLATFORM_CLIENT_CRASH__(owner
, "Trying to recursively lock an " 
 980 OS_NOINLINE OS_NORETURN OS_COLD
 
 982 _os_once_gate_unowned_abort(os_lock_owner_t owner
) 
 984         __LIBPLATFORM_CLIENT_CRASH__(owner
, "Unlock of an os_once_t not " 
 985                         "owned by current thread"); 
 988 OS_NOINLINE OS_NORETURN OS_COLD
 
 990 _os_once_gate_corruption_abort(os_ulock_value_t current
) 
 992         __LIBPLATFORM_CLIENT_CRASH__(current
, "os_once_t is corrupt"); 
 996 #if OS_ONCE_USE_QUIESCENT_COUNTER 
 997 #define OS_ONCE_MAKE_GEN(gen)  (((gen) << 2) + OS_ULOCK_NOWAITERS_BIT) 
 998 #define OS_ONCE_IS_GEN(gen)    (((gen) & 3) == OS_ULOCK_NOWAITERS_BIT) 
1000 // the _COMM_PAGE_CPU_QUIESCENT_COUNTER value is incremented every time 
1001 // all CPUs have performed a context switch. 
1003 // To make sure all CPUs context switched at least once since `gen`, 
1004 // we need to observe 4 increments, see libdispatch/src/shims/lock.h 
1005 #define OS_ONCE_GEN_SAFE_DELTA  (4 << 2) 
1008 static inline uintptr_t 
1009 _os_once_generation(void) 
1011         uintptr_t value 
= *(volatile uintptr_t *)_COMM_PAGE_CPU_QUIESCENT_COUNTER
; 
1012         return OS_ONCE_MAKE_GEN(value
); 
1016 static inline uintptr_t 
1017 _os_once_mark_quiescing(os_once_gate_t og
) 
1019         return os_atomic_xchg(&og
->ogo_once
, _os_once_generation(), release
); 
1024 _os_once_mark_done_if_quiesced(os_once_gate_t og
, uintptr_t gen
) 
1026         if (_os_once_generation() - gen 
>= OS_ONCE_GEN_SAFE_DELTA
) { 
1027                 os_atomic_store(&og
->ogo_once
, OS_ONCE_DONE
, relaxed
); 
1032 static inline uintptr_t 
1033 _os_once_mark_done(os_once_gate_t og
) 
1035         return os_atomic_xchg(&og
->ogo_once
, OS_ONCE_DONE
, release
); 
1041 _os_once_gate_broadcast(os_once_gate_t og
, os_ulock_value_t current
, 
1042                 os_lock_owner_t self
) 
1044         if (unlikely(OS_ULOCK_IS_NOT_OWNER(current
, self
, 0))) { 
1045                 return _os_once_gate_unowned_abort(OS_ULOCK_OWNER(current
)); 
1047         if (current 
& OS_ULOCK_NOWAITERS_BIT
) { 
1048                 __LIBPLATFORM_INTERNAL_CRASH__(current
, "unlock_slow with no waiters"); 
1051                 int ret 
= __ulock_wake(UL_UNFAIR_LOCK 
| ULF_NO_ERRNO 
| ULF_WAKE_ALL
, 
1053                 if (unlikely(ret 
< 0)) { 
1060                                 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wake failure"); 
1069 _os_once_callout(os_once_gate_t og
, void *ctxt
, os_function_t func
, 
1070                 os_lock_owner_t self
) 
1076 #if OS_ONCE_USE_QUIESCENT_COUNTER 
1077         v 
= _os_once_mark_quiescing(og
); 
1079         v 
= _os_once_mark_done(og
); 
1081         if (likely((os_ulock_value_t
)v 
== self
)) return; 
1082         _os_once_gate_broadcast(og
, (os_ulock_value_t
)v
, self
); 
1087 _os_once_gate_wait(os_once_gate_t og
, void *ctxt
, os_function_t func
, 
1088                 os_lock_owner_t self
) 
1093                 os_atomic_rmw_loop(&og
->ogo_once
, old
, new, relaxed
, { 
1094                         if (old 
== OS_ONCE_DONE
) { 
1095                                 os_atomic_rmw_loop_give_up(return); 
1096 #if OS_ONCE_USE_QUIESCENT_COUNTER 
1097                         } else if (OS_ONCE_IS_GEN(old
)) { 
1098                                 os_atomic_rmw_loop_give_up({ 
1099                                         os_atomic_thread_fence(acquire
); 
1100                                         return _os_once_mark_done_if_quiesced(og
, old
); 
1103                         } else if (old 
== OS_ONCE_INIT
) { 
1104                                 // __os_once_reset was used, try to become the new initializer 
1105                                 new = (uintptr_t)self
; 
1107                                 new = old 
& ~(uintptr_t)OS_ULOCK_NOWAITERS_BIT
; 
1108                                 if (new == old
) os_atomic_rmw_loop_give_up(break); 
1111                 if (old 
== OS_ONCE_INIT
) { 
1112                         // see comment in _os_once, pairs with the release barrier 
1113                         // in __os_once_reset() 
1114                         os_atomic_thread_fence(acquire
); 
1115                         return _os_once_callout(og
, ctxt
, func
, self
); 
1117                 if (unlikely(OS_ULOCK_IS_OWNER((os_lock_owner_t
)old
, self
, 0))) { 
1118                         return _os_once_gate_recursive_abort(self
); 
1120                 int ret 
= __ulock_wait(UL_UNFAIR_LOCK 
| ULF_NO_ERRNO
, 
1121                                 &og
->ogo_lock
, (os_ulock_value_t
)new, 0); 
1122                 if (unlikely(ret 
< 0)) { 
1128                                 _os_once_gate_corruption_abort((os_lock_owner_t
)old
); 
1131                                 __LIBPLATFORM_INTERNAL_CRASH__(-ret
, "ulock_wait failure"); 
1137 // Atomically resets the once value to zero and then signals all 
1138 // pending waiters to return from their __ulock_wait() 
1140 __os_once_reset(os_once_t 
*val
) 
1142         os_once_gate_t og 
= (os_once_gate_t
)val
; 
1143         os_lock_owner_t self 
= _os_lock_owner_get_self(); 
1146         v 
= os_atomic_xchg(&og
->ogo_once
, OS_ONCE_INIT
, release
); 
1147         if (likely((os_ulock_value_t
)v 
== self
)) return; 
1148         return _os_once_gate_broadcast(og
, (os_ulock_value_t
)v
, self
); 
1152 _os_once(os_once_t 
*val
, void *ctxt
, os_function_t func
) 
1154         os_once_gate_t og 
= (os_once_gate_t
)val
; 
1155         os_lock_owner_t self
; 
1158 #if OS_ONCE_USE_QUIESCENT_COUNTER 
1159         v 
= os_atomic_load(&og
->ogo_once
, acquire
); 
1160         if (likely(OS_ONCE_IS_GEN(v
))) { 
1161                 return _os_once_mark_done_if_quiesced(og
, v
); 
1165         self 
= _os_lock_owner_get_self(); 
1166         v 
= (uintptr_t)self
; 
1168         // The acquire barrier pairs with the release in __os_once_reset() 
1169         // for cases when a previous initializer failed. 
1170         if (likely(os_atomic_cmpxchg(&og
->ogo_once
, OS_ONCE_INIT
, v
, acquire
))) { 
1171                 return _os_once_callout(og
, ctxt
, func
, self
); 
1173         return _os_once_gate_wait(og
, ctxt
, func
, self
);