2  * Copyright (c) 2013-2016 Apple Inc. All rights reserved. 
   4  * @APPLE_APACHE_LICENSE_HEADER_START@ 
   6  * Licensed under the Apache License, Version 2.0 (the "License"); 
   7  * you may not use this file except in compliance with the License. 
   8  * You may obtain a copy of the License at 
  10  *     http://www.apache.org/licenses/LICENSE-2.0 
  12  * Unless required by applicable law or agreed to in writing, software 
  13  * distributed under the License is distributed on an "AS IS" BASIS, 
  14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
  15  * See the License for the specific language governing permissions and 
  16  * limitations under the License. 
  18  * @APPLE_APACHE_LICENSE_HEADER_END@ 
  21 #ifndef __OS_LOCK_PRIVATE__ 
  22 #define __OS_LOCK_PRIVATE__ 
  24 #include <Availability.h> 
  25 #include <TargetConditionals.h> 
  26 #include <sys/cdefs.h> 
  30 #include <os/base_private.h> 
  33 OS_ASSUME_NONNULL_BEGIN
 
  39 #define OS_LOCK_SPI_VERSION 20160406 
  45  * Pointer to one of the os_lock variants. 
  48 #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s 
  49 #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type 
  50 #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type) 
  52 #define OS_LOCK(type) os_lock_##type##_s 
  53 #define OS_LOCK_STRUCT(type) struct OS_LOCK(type) 
  55 #if defined(__cplusplus) && __cplusplus >= 201103L 
  57 #define OS_LOCK_DECL(type, size) \ 
  58                 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \ 
  60                         OS_LOCK_TYPE_STRUCT(type) * const osl_type OS_UNUSED; \ 
  61                         uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \ 
  63             constexpr OS_LOCK(type)() : \ 
  64                                 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \ 
  66 #define OS_LOCK_INIT(type) {} 
  68 typedef OS_LOCK_STRUCT(base
) { 
  70         constexpr OS_LOCK(base
)() {} 
  75 #define OS_LOCK_DECL(type, size) \ 
  76                 typedef OS_LOCK_STRUCT(type) { \ 
  77                         OS_LOCK_TYPE_STRUCT(type) * const osl_type; \ 
  78                         uintptr_t _osl_##type##_opaque[size-1]; \ 
  81 #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), } 
  83 #ifndef OS_LOCK_T_MEMBER 
  84 #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type 
  87 typedef OS_TRANSPARENT_UNION 
union { 
  88         OS_LOCK_T_MEMBER(base
); 
  89         OS_LOCK_T_MEMBER(unfair
); 
  90         OS_LOCK_T_MEMBER(nospin
); 
  91         OS_LOCK_T_MEMBER(spin
); 
  92         OS_LOCK_T_MEMBER(handoff
); 
  93         OS_LOCK_T_MEMBER(eliding
); 
  94         OS_LOCK_T_MEMBER(transactional
); 
 100  * @typedef os_lock_unfair_s 
 103  * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but 
 104  * waits in the kernel to be woken up by an unlock. The lock value contains 
 105  * ownership information that the system may use to attempt to resolve priority 
 109  * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with 
 110  * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker 
 111  * can potentially immediately reacquire the lock before a woken up waiter gets 
 112  * an opportunity to attempt to acquire the lock, so starvation is possibile. 
 114  * Must be initialized with OS_LOCK_UNFAIR_INIT 
 116 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) 
 117 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) 
 118 OS_EXPORT 
OS_LOCK_TYPE_DECL(unfair
); 
 119 OS_LOCK_DECL(unfair
, 2); 
 120 #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair) 
 123  * @typedef os_lock_nospin_s 
 126  * os_lock variant that does not spin on contention but waits in the kernel to 
 127  * be woken up by an unlock. No attempt to resolve priority inversions is made 
 128  * so os_unfair_lock or os_lock_unfair_s should generally be preferred. 
 131  * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with 
 132  * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker 
 133  * can potentially immediately reacquire the lock before a woken up waiter gets 
 134  * an opportunity to attempt to acquire the lock, so starvation is possibile. 
 136  * Must be initialized with OS_LOCK_NOSPIN_INIT 
 138 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0) 
 139 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0) 
 140 OS_EXPORT 
OS_LOCK_TYPE_DECL(nospin
); 
 141 OS_LOCK_DECL(nospin
, 2); 
 142 #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin) 
 145  * @typedef os_lock_spin_s 
 148  * Deprecated os_lock variant that on contention starts by spinning trying to 
 149  * acquire the lock, then depressing the priority of the current thread and 
 150  * finally blocking the thread waiting for the lock to become available. 
 151  * Equivalent to OSSpinLock and equally not recommended, see discussion in 
 152  * libkern/OSAtomic.h headerdoc. 
 155  * Spinlocks are intended to be held only for very brief periods of time. The 
 156  * critical section must not make syscalls and should avoid touching areas of 
 157  * memory that may trigger a page fault, in particular if the critical section 
 158  * may be executing on threads of widely differing priorities or on a mix of 
 159  * IO-throttled and unthrottled threads. 
 161  * Must be initialized with OS_LOCK_SPIN_INIT 
 163 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
) 
 164 OS_EXPORT 
OS_LOCK_TYPE_DECL(spin
); 
 165 OS_LOCK_DECL(spin
, 2); 
 166 #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin) 
 169  * @typedef os_lock_handoff_s 
 172  * os_lock variant that on contention hands off the current kernel thread to the 
 173  * lock-owning userspace thread (if it is not running), temporarily overriding 
 174  * its priority and IO throttle if necessary. 
 177  * Intended for use in limited circumstances where the critical section might 
 178  * be executing on threads of widely differing priorities or on a mix of 
 179  * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would 
 180  * be likely to encounter a priority inversion. 
 182  * IMPORTANT: This lock variant is NOT intended as a general replacement for all 
 183  * uses of os_lock_spin_s or OSSpinLock. 
 185  * Must be initialized with OS_LOCK_HANDOFF_INIT 
 187 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
) 
 188 OS_EXPORT 
OS_LOCK_TYPE_DECL(handoff
); 
 189 OS_LOCK_DECL(handoff
, 2); 
 190 #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff) 
 193 #if !TARGET_OS_IPHONE 
 195  * @typedef os_lock_eliding_s 
 198  * os_lock variant that uses hardware lock elision support if available to allow 
 199  * multiple processors to concurrently execute a critical section as long as 
 200  * they don't perform conflicting operations on each other's data. In case of 
 201  * conflict, the lock reverts to exclusive operation and os_lock_spin_s behavior 
 202  * on contention (at potential extra cost for the aborted attempt at lock-elided 
 203  * concurrent execution). If hardware HLE support is not present, this lock 
 204  * variant behaves like os_lock_spin_s. 
 207  * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware 
 208  * with HLE support to ensure the data access pattern and length of the critical 
 209  * section allows lock-elided execution to succeed frequently enough to offset 
 210  * the cost of any aborted concurrent execution. 
 212  * Must be initialized with OS_LOCK_ELIDING_INIT 
 214 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_NA
) 
 215 OS_EXPORT 
OS_LOCK_TYPE_DECL(eliding
); 
 216 OS_LOCK_DECL(eliding
, 8) OS_ALIGNED(64); 
 217 #define OS_LOCK_ELIDING_INIT OS_LOCK_INIT(eliding) 
 220  * @typedef os_lock_transactional_s 
 223  * os_lock variant that uses hardware restricted transactional memory support if 
 224  * available to allow multiple processors to concurrently execute the critical 
 225  * section as a transactional region. If transactional execution aborts, the 
 226  * lock reverts to exclusive operation and os_lock_spin_s behavior on contention 
 227  * (at potential extra cost for the aborted attempt at transactional concurrent 
 228  * execution). If hardware RTM support is not present, this lock variant behaves 
 229  * like os_lock_eliding_s. 
 232  * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware 
 233  * with RTM support to ensure the data access pattern and length of the critical 
 234  * section allows transactional execution to succeed frequently enough to offset 
 235  * the cost of any aborted transactions. 
 237  * Must be initialized with OS_LOCK_TRANSACTIONAL_INIT 
 239 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_NA
) 
 240 OS_EXPORT 
OS_LOCK_TYPE_DECL(transactional
); 
 241 OS_LOCK_DECL(transactional
, 8) OS_ALIGNED(64); 
 242 #define OS_LOCK_TRANSACTIONAL_INIT OS_LOCK_INIT(transactional) 
 248  * @function os_lock_lock 
 251  * Locks an os_lock variant. 
 254  * Pointer to one of the os_lock variants. 
 256 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
) 
 257 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 258 void os_lock_lock(os_lock_t lock
); 
 261  * @function os_lock_trylock 
 264  * Locks an os_lock variant if it is not already locked. 
 267  * Pointer to one of the os_lock variants. 
 270  * Returns true if the lock was succesfully locked and false if the lock was 
 273 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
) 
 274 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 275 bool os_lock_trylock(os_lock_t lock
); 
 278  * @function os_lock_unlock 
 281  * Unlocks an os_lock variant. 
 284  * Pointer to one of the os_lock variants. 
 286 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
) 
 287 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 288 void os_lock_unlock(os_lock_t lock
); 
 290 /*! @group os_unfair_lock SPI 
 293  * Replacement for the deprecated OSSpinLock. Does not spin on contention but 
 294  * waits in the kernel to be woken up by an unlock. The opaque lock value 
 295  * contains thread ownership information that the system may use to attempt to 
 296  * resolve priority inversions. 
 298  * This lock must be unlocked from the same thread that locked it, attemps to 
 299  * unlock from a different thread will cause an assertion aborting the process. 
 301  * This lock must not be accessed from multiple processes or threads via shared 
 302  * or multiply-mapped memory, the lock implementation relies on the address of 
 303  * the lock value and owning process. 
 306  * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an 
 307  * unlocker can potentially immediately reacquire the lock before a woken up 
 308  * waiter gets an opportunity to attempt to acquire the lock. This may be 
 309  * advantageous for performance reasons, but also makes starvation of waiters a 
 312  * Must be initialized with OS_UNFAIR_LOCK_INIT 
 316  * @typedef os_unfair_lock_options_t 
 318  * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION 
 319  * This flag informs the runtime that the specified lock is used for data 
 320  * synchronization and that the lock owner is always able to make progress 
 321  * toward releasing the lock without the help of another thread in the same 
 322  * process. This hint will cause the workqueue subsystem to not create new 
 323  * threads to offset for threads waiting for the lock. 
 325  * When this flag is used, the code running under the critical section should 
 326  * be well known and under your control  (Generally it should not call into 
 329 OS_ENUM(os_unfair_lock_options
, uint32_t, 
 331                 OS_UNFAIR_LOCK_AVAILABILITY 
= 0x00000000, 
 332         OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
 
 333                 OS_UNFAIR_LOCK_AVAILABILITY 
= 0x00010000, 
 337  * @function os_unfair_lock_lock_with_options 
 340  * Locks an os_unfair_lock. 
 343  * Pointer to an os_unfair_lock. 
 346  * Options to alter the behavior of the lock. See os_unfair_lock_options_t. 
 348 OS_UNFAIR_LOCK_AVAILABILITY
 
 349 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 350 void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
, 
 351                 os_unfair_lock_options_t options
); 
 354  * @function os_unfair_lock_assert_owner 
 357  * Asserts that the calling thread is the current owner of the specified 
 361  * If the lock is currently owned by the calling thread, this function returns. 
 363  * If the lock is unlocked or owned by a different thread, this function 
 364  * asserts and terminates the process. 
 367  * Pointer to an os_unfair_lock. 
 369 OS_UNFAIR_LOCK_AVAILABILITY
 
 370 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 371 void os_unfair_lock_assert_owner(os_unfair_lock_t lock
); 
 374  * @function os_unfair_lock_assert_not_owner 
 377  * Asserts that the calling thread is not the current owner of the specified 
 381  * If the lock is unlocked or owned by a different thread, this function 
 384  * If the lock is currently owned by the current thread, this function asserts 
 385  * and terminates the process. 
 388  * Pointer to an os_unfair_lock. 
 390 OS_UNFAIR_LOCK_AVAILABILITY
 
 391 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 392 void os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
); 
 394 /*! @group os_unfair_lock no-TSD interfaces 
 396  * Like the above, but don't require being on a thread with valid TSD, so they 
 397  * can be called from injected mach-threads.  The normal routines use the TSD 
 398  * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the 
 399  * locked value instead.  As a result, they will be unable to resolve priority 
 402  * This should only be used by libpthread. 
 405 OS_UNFAIR_LOCK_AVAILABILITY
 
 406 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 407 void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock
); 
 409 OS_UNFAIR_LOCK_AVAILABILITY
 
 410 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
 
 411 void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock
); 
 415 OS_ASSUME_NONNULL_END
 
 417 /*! @group Inline os_unfair_lock interfaces 
 419  * Inline versions of the os_unfair_lock fastpath. 
 421  * Intended exclusively for special highly performance-sensitive cases where the 
 422  * function calls to the os_unfair_lock API entrypoints add measurable overhead. 
 424  * Do not use in frameworks to implement synchronization API primitives that are 
 425  * exposed to developers, that would lead to false positives for that API from 
 426  * tools such as ThreadSanitizer. 
 428  * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!! 
 429  * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT 
 430  *          REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED! 
 431  * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!! 
 433  * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning 
 434  * above and still wish to use these interfaces. 
 437 #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE 
 439 #include <pthread/tsd_private.h> 
 443 #if !(__has_include(<atomic>) && __has_feature(cxx_atomic)) 
 444 #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics 
 447 typedef std::atomic
<os_unfair_lock
> _os_atomic_unfair_lock
; 
 448 #define OSLOCK_STD(_a) std::_a 
 451 #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic)) 
 452 #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics 
 454 #include <stdatomic.h> 
 455 typedef _Atomic(os_unfair_lock
) _os_atomic_unfair_lock
; 
 456 #define OSLOCK_STD(_a) _a 
 459 OS_ASSUME_NONNULL_BEGIN
 
 461 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L 
 462 #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0}) 
 463 #elif defined(__cplusplus) && __cplusplus >= 201103L 
 464 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{}) 
 465 #elif defined(__cplusplus) 
 466 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock()) 
 468 #define OS_UNFAIR_LOCK_UNLOCKED {0} 
 472  * @function os_unfair_lock_lock_inline 
 475  * Locks an os_unfair_lock. 
 478  * Pointer to an os_unfair_lock. 
 480 OS_UNFAIR_LOCK_AVAILABILITY
 
 481 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
 
 483 os_unfair_lock_lock_inline(os_unfair_lock_t lock
) 
 485         if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock
); 
 486         uintptr_t mts 
= (uintptr_t)_pthread_getspecific_direct( 
 487                         _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
); 
 488         os_unfair_lock unlocked 
= OS_UNFAIR_LOCK_UNLOCKED
, locked 
= { mts 
}; 
 489         if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)( 
 490                         (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
, 
 491                         OSLOCK_STD(memory_order_acquire
), 
 492                         OSLOCK_STD(memory_order_relaxed
))) { 
 493                 return os_unfair_lock_lock(lock
); 
 498  * @function os_unfair_lock_lock_with_options_inline 
 501  * Locks an os_unfair_lock. 
 504  * Pointer to an os_unfair_lock. 
 507  * Options to alter the behavior of the lock. See os_unfair_lock_options_t. 
 509 OS_UNFAIR_LOCK_AVAILABILITY
 
 510 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
 
 512 os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock
, 
 513                 os_unfair_lock_options_t options
) 
 515         if (!_pthread_has_direct_tsd()) { 
 516                 return os_unfair_lock_lock_with_options(lock
, options
); 
 518         uintptr_t mts 
= (uintptr_t)_pthread_getspecific_direct( 
 519                         _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
); 
 520         os_unfair_lock unlocked 
= OS_UNFAIR_LOCK_UNLOCKED
, locked 
= { mts 
}; 
 521         if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)( 
 522                         (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
, 
 523                         OSLOCK_STD(memory_order_acquire
), 
 524                         OSLOCK_STD(memory_order_relaxed
))) { 
 525                 return os_unfair_lock_lock_with_options(lock
, options
); 
 530  * @function os_unfair_lock_trylock_inline 
 533  * Locks an os_unfair_lock if it is not already locked. 
 536  * It is invalid to surround this function with a retry loop, if this function 
 537  * returns false, the program must be able to proceed without having acquired 
 538  * the lock, or it must call os_unfair_lock_lock_inline() instead. 
 541  * Pointer to an os_unfair_lock. 
 544  * Returns true if the lock was succesfully locked and false if the lock was 
 547 OS_UNFAIR_LOCK_AVAILABILITY
 
 548 OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
 
 550 os_unfair_lock_trylock_inline(os_unfair_lock_t lock
) 
 552         if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock
); 
 553         uintptr_t mts 
= (uintptr_t)_pthread_getspecific_direct( 
 554                         _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
); 
 555         os_unfair_lock unlocked 
= OS_UNFAIR_LOCK_UNLOCKED
, locked 
= { mts 
}; 
 556         return OSLOCK_STD(atomic_compare_exchange_strong_explicit
)( 
 557                         (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
, 
 558                         OSLOCK_STD(memory_order_acquire
), OSLOCK_STD(memory_order_relaxed
)); 
 562  * @function os_unfair_lock_unlock_inline 
 565  * Unlocks an os_unfair_lock. 
 568  * Pointer to an os_unfair_lock. 
 570 OS_UNFAIR_LOCK_AVAILABILITY
 
 571 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
 
 573 os_unfair_lock_unlock_inline(os_unfair_lock_t lock
) 
 575         if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock
); 
 576         uintptr_t mts 
= (uintptr_t)_pthread_getspecific_direct( 
 577                         _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
); 
 578         os_unfair_lock unlocked 
= OS_UNFAIR_LOCK_UNLOCKED
, locked 
= { mts 
}; 
 579         if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)( 
 580                         (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
, 
 581                         OSLOCK_STD(memory_order_release
), 
 582                         OSLOCK_STD(memory_order_relaxed
))) { 
 583                 return os_unfair_lock_unlock(lock
); 
 588  * @function os_unfair_lock_lock_inline_no_tsd_4libpthread 
 591  * Locks an os_unfair_lock, without requiring valid TSD. 
 593  * This should only be used by libpthread. 
 596  * Pointer to an os_unfair_lock. 
 598 OS_UNFAIR_LOCK_AVAILABILITY
 
 599 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
 
 601 os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock
) 
 603         uintptr_t mts 
= (uintptr_t)MACH_PORT_DEAD
; 
 604         os_unfair_lock unlocked 
= OS_UNFAIR_LOCK_UNLOCKED
, locked 
= { mts 
}; 
 605         if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)( 
 606                         (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
, 
 607                         OSLOCK_STD(memory_order_acquire
), 
 608                         OSLOCK_STD(memory_order_relaxed
))) { 
 609                 return os_unfair_lock_lock_no_tsd_4libpthread(lock
); 
 614  * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread 
 617  * Unlocks an os_unfair_lock, without requiring valid TSD. 
 619  * This should only be used by libpthread. 
 622  * Pointer to an os_unfair_lock. 
 624 OS_UNFAIR_LOCK_AVAILABILITY
 
 625 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
 
 627 os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock
) 
 629         uintptr_t mts 
= (uintptr_t)MACH_PORT_DEAD
; 
 630         os_unfair_lock unlocked 
= OS_UNFAIR_LOCK_UNLOCKED
, locked 
= { mts 
}; 
 631         if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)( 
 632                         (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
, 
 633                         OSLOCK_STD(memory_order_release
), 
 634                         OSLOCK_STD(memory_order_relaxed
))) { 
 635                 return os_unfair_lock_unlock_no_tsd_4libpthread(lock
); 
 639 OS_ASSUME_NONNULL_END
 
 647 #endif // OS_UNFAIR_LOCK_INLINE 
 649 #endif // __OS_LOCK_PRIVATE__