2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __OS_LOCK_PRIVATE__
22 #define __OS_LOCK_PRIVATE__
24 #include <Availability.h>
25 #include <TargetConditionals.h>
26 #include <sys/cdefs.h>
30 #include <os/base_private.h>
33 OS_ASSUME_NONNULL_BEGIN
39 #define OS_LOCK_SPI_VERSION 20171006
45 * Pointer to one of the os_lock variants.
48 #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
49 #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
50 #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
52 #define OS_LOCK(type) os_lock_##type##_s
53 #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
55 #if defined(__cplusplus) && __cplusplus >= 201103L
57 #define OS_LOCK_DECL(type, size) \
58 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
60 OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
61 uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
63 constexpr OS_LOCK(type)() : \
64 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
66 #define OS_LOCK_INIT(type) {}
68 typedef OS_LOCK_STRUCT(base
) {
70 constexpr OS_LOCK(base
)() {}
75 #define OS_LOCK_DECL(type, size) \
76 typedef OS_LOCK_STRUCT(type) { \
77 OS_LOCK_TYPE_STRUCT(type) * osl_type; \
78 uintptr_t _osl_##type##_opaque[size-1]; \
81 #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
83 #ifndef OS_LOCK_T_MEMBER
84 #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
87 typedef OS_TRANSPARENT_UNION
union {
88 OS_LOCK_T_MEMBER(base
);
89 OS_LOCK_T_MEMBER(unfair
);
90 OS_LOCK_T_MEMBER(nospin
);
91 OS_LOCK_T_MEMBER(spin
);
92 OS_LOCK_T_MEMBER(handoff
);
98 * @typedef os_lock_unfair_s
101 * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
102 * waits in the kernel to be woken up by an unlock. The lock value contains
103 * ownership information that the system may use to attempt to resolve priority
107 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
108 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
109 * can potentially immediately reacquire the lock before a woken up waiter gets
110 * an opportunity to attempt to acquire the lock, so starvation is possibile.
112 * Must be initialized with OS_LOCK_UNFAIR_INIT
114 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
115 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
116 OS_EXPORT
OS_LOCK_TYPE_DECL(unfair
);
117 OS_LOCK_DECL(unfair
, 2);
118 #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
121 * @typedef os_lock_nospin_s
124 * os_lock variant that does not spin on contention but waits in the kernel to
125 * be woken up by an unlock. No attempt to resolve priority inversions is made
126 * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
129 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
130 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
131 * can potentially immediately reacquire the lock before a woken up waiter gets
132 * an opportunity to attempt to acquire the lock, so starvation is possibile.
134 * Must be initialized with OS_LOCK_NOSPIN_INIT
136 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
137 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
138 OS_EXPORT
OS_LOCK_TYPE_DECL(nospin
);
139 OS_LOCK_DECL(nospin
, 2);
140 #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
143 * @typedef os_lock_spin_s
146 * Deprecated os_lock variant that on contention starts by spinning trying to
147 * acquire the lock, then depressing the priority of the current thread and
148 * finally blocking the thread waiting for the lock to become available.
149 * Equivalent to OSSpinLock and equally not recommended, see discussion in
150 * libkern/OSAtomic.h headerdoc.
153 * Spinlocks are intended to be held only for very brief periods of time. The
154 * critical section must not make syscalls and should avoid touching areas of
155 * memory that may trigger a page fault, in particular if the critical section
156 * may be executing on threads of widely differing priorities or on a mix of
157 * IO-throttled and unthrottled threads.
159 * Must be initialized with OS_LOCK_SPIN_INIT
161 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
162 OS_EXPORT
OS_LOCK_TYPE_DECL(spin
);
163 OS_LOCK_DECL(spin
, 2);
164 #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
167 * @typedef os_lock_handoff_s
170 * os_lock variant that on contention hands off the current kernel thread to the
171 * lock-owning userspace thread (if it is not running), temporarily overriding
172 * its priority and IO throttle if necessary.
175 * Intended for use in limited circumstances where the critical section might
176 * be executing on threads of widely differing priorities or on a mix of
177 * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
178 * be likely to encounter a priority inversion.
180 * IMPORTANT: This lock variant is NOT intended as a general replacement for all
181 * uses of os_lock_spin_s or OSSpinLock.
183 * Must be initialized with OS_LOCK_HANDOFF_INIT
185 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
186 OS_EXPORT
OS_LOCK_TYPE_DECL(handoff
);
187 OS_LOCK_DECL(handoff
, 2);
188 #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
194 * @function os_lock_lock
197 * Locks an os_lock variant.
200 * Pointer to one of the os_lock variants.
202 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
203 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
204 void os_lock_lock(os_lock_t lock
);
207 * @function os_lock_trylock
210 * Locks an os_lock variant if it is not already locked.
213 * Pointer to one of the os_lock variants.
216 * Returns true if the lock was succesfully locked and false if the lock was
219 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
220 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
221 bool os_lock_trylock(os_lock_t lock
);
224 * @function os_lock_unlock
227 * Unlocks an os_lock variant.
230 * Pointer to one of the os_lock variants.
232 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
233 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
234 void os_lock_unlock(os_lock_t lock
);
236 /*! @group os_unfair_lock SPI
239 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
240 * waits in the kernel to be woken up by an unlock. The opaque lock value
241 * contains thread ownership information that the system may use to attempt to
242 * resolve priority inversions.
244 * This lock must be unlocked from the same thread that locked it, attempts to
245 * unlock from a different thread will cause an assertion aborting the process.
247 * This lock must not be accessed from multiple processes or threads via shared
248 * or multiply-mapped memory, the lock implementation relies on the address of
249 * the lock value and owning process.
252 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
253 * unlocker can potentially immediately reacquire the lock before a woken up
254 * waiter gets an opportunity to attempt to acquire the lock. This may be
255 * advantageous for performance reasons, but also makes starvation of waiters a
258 * Must be initialized with OS_UNFAIR_LOCK_INIT
262 * @typedef os_unfair_lock_options_t
264 * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
265 * This flag informs the runtime that the specified lock is used for data
266 * synchronization and that the lock owner is always able to make progress
267 * toward releasing the lock without the help of another thread in the same
268 * process. This hint will cause the workqueue subsystem to not create new
269 * threads to offset for threads waiting for the lock.
271 * When this flag is used, the code running under the critical section should
272 * be well known and under your control (Generally it should not call into
275 * @const OS_UNFAIR_LOCK_ADAPTIVE_SPIN
276 * This flag allows for the kernel to use adaptive spinning when the holder
277 * of the lock is currently on core. This should only be used for locks
278 * where the protected critical section is always extremely short.
280 OS_OPTIONS(os_unfair_lock_options
, uint32_t,
281 OS_UNFAIR_LOCK_NONE
OS_SWIFT_NAME(None
)
282 OS_UNFAIR_LOCK_AVAILABILITY
= 0x00000000,
283 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
OS_SWIFT_NAME(DataSynchronization
)
284 OS_UNFAIR_LOCK_AVAILABILITY
= 0x00010000,
285 OS_UNFAIR_LOCK_ADAPTIVE_SPIN
OS_SWIFT_NAME(AdaptiveSpin
)
286 __API_AVAILABLE(macos(10.15), ios(13.0),
287 tvos(13.0), watchos(6.0), bridgeos(4.0)) = 0x00040000,
291 #define OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(name) \
292 static const os_unfair_lock_options_t \
293 name##_FOR_SWIFT OS_SWIFT_NAME(name) = name
294 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_NONE
);
295 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
);
296 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_ADAPTIVE_SPIN
);
297 #undef OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT
301 * @function os_unfair_lock_lock_with_options
304 * Locks an os_unfair_lock.
307 * Pointer to an os_unfair_lock.
310 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
312 OS_UNFAIR_LOCK_AVAILABILITY
313 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
314 void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
315 os_unfair_lock_options_t options
);
317 /*! @group os_unfair_lock no-TSD interfaces
319 * Like the above, but don't require being on a thread with valid TSD, so they
320 * can be called from injected mach-threads. The normal routines use the TSD
321 * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
322 * locked value instead. As a result, they will be unable to resolve priority
325 * This should only be used by libpthread.
328 OS_UNFAIR_LOCK_AVAILABILITY
329 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
330 void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock
);
332 OS_UNFAIR_LOCK_AVAILABILITY
333 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
334 void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock
);
336 /*! @group os_unfair_recursive_lock SPI
339 * Similar to os_unfair_lock, but recursive.
342 * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
345 #define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
346 __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
347 __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
349 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
350 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
351 ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
352 #elif defined(__cplusplus) && __cplusplus >= 201103L
353 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
354 (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
355 #elif defined(__cplusplus)
356 #define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
357 (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
359 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
360 {OS_UNFAIR_LOCK_INIT, 0}
361 #endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
364 * @typedef os_unfair_recursive_lock
367 * Low-level lock that allows waiters to block efficiently on contention.
370 * See os_unfair_lock.
373 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
374 typedef struct os_unfair_recursive_lock_s
{
375 os_unfair_lock ourl_lock
;
377 } os_unfair_recursive_lock
, *os_unfair_recursive_lock_t
;
380 * @function os_unfair_recursive_lock_lock_with_options
383 * See os_unfair_lock_lock_with_options
385 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
386 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
387 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock
,
388 os_unfair_lock_options_t options
);
391 * @function os_unfair_recursive_lock_lock
394 * See os_unfair_lock_lock
396 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
397 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
399 os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock
)
401 os_unfair_recursive_lock_lock_with_options(lock
, OS_UNFAIR_LOCK_NONE
);
405 * @function os_unfair_recursive_lock_trylock
408 * See os_unfair_lock_trylock
410 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
411 OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
412 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock
);
415 * @function os_unfair_recursive_lock_unlock
418 * See os_unfair_lock_unlock
420 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
421 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
422 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
);
425 * @function os_unfair_recursive_lock_tryunlock4objc
428 * See os_unfair_lock_unlock
430 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
431 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
432 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock
);
435 * @function os_unfair_recursive_lock_assert_owner
438 * See os_unfair_lock_assert_owner
440 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
441 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
443 os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock
)
445 os_unfair_lock_assert_owner(&lock
->ourl_lock
);
449 * @function os_unfair_recursive_lock_assert_not_owner
452 * See os_unfair_lock_assert_not_owner
454 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
455 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
457 os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock
)
459 os_unfair_lock_assert_not_owner(&lock
->ourl_lock
);
463 * @function os_unfair_recursive_lock_owned
466 * This function is reserved for the use of people who want to soft-fault
467 * when locking models have been violated.
470 * This is meant for SQLite use to detect existing misuse of the API surface,
471 * and is not meant for anything else than calling os_log_fault() when such
472 * contracts are violated.
474 * There's little point to use this value for logic as the
475 * os_unfair_recursive_lock is already recursive anyway.
477 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
478 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
479 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
481 os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock
);
484 * @function os_unfair_recursive_lock_unlock_forked_child
487 * Function to be used in an atfork child handler to unlock a recursive unfair
491 * This function helps with handling recursive locks in the presence of fork.
493 * It is typical to setup atfork handlers that will:
494 * - take the lock in the pre-fork handler,
495 * - drop the lock in the parent handler,
496 * - reset the lock in the forked child.
498 * However, because a recursive lock may have been held by the current thread
499 * already, reseting needs to act like an unlock. This function serves for this
500 * purpose. Unlike os_unfair_recursive_lock_unlock(), this function will fixup
501 * the lock ownership to match the new identity of the thread after fork().
503 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
504 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
505 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
507 os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock
);
509 #if __has_attribute(cleanup)
512 * @function os_unfair_lock_scoped_guard_unlock
515 * Used by os_unfair_lock_lock_scoped_guard
517 OS_UNFAIR_LOCK_AVAILABILITY
518 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
520 os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull
* _Nonnull lock
)
522 os_unfair_lock_unlock(*lock
);
526 * @function os_unfair_lock_lock_scoped_guard
529 * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
530 * automatically called when the enclosing C scope ends.
533 * Name for the variable holding the guard.
536 * Pointer to an os_unfair_lock.
538 * @see os_unfair_lock_lock
539 * @see os_unfair_lock_unlock
541 #define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
543 __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
545 os_unfair_lock_lock(guard_name)
547 #endif // __has_attribute(cleanup)
551 OS_ASSUME_NONNULL_END
553 /*! @group Inline os_unfair_lock interfaces
555 * Inline versions of the os_unfair_lock fastpath.
557 * Intended exclusively for special highly performance-sensitive cases where the
558 * function calls to the os_unfair_lock API entrypoints add measurable overhead.
560 * Do not use in frameworks to implement synchronization API primitives that are
561 * exposed to developers, that would lead to false positives for that API from
562 * tools such as ThreadSanitizer.
564 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
565 * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
566 * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
567 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
569 * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
570 * above and still wish to use these interfaces.
573 #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
575 #include <pthread/tsd_private.h>
579 #if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
580 #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
583 typedef std::atomic
<os_unfair_lock
> _os_atomic_unfair_lock
;
584 #define OSLOCK_STD(_a) std::_a
587 #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
588 #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
590 #include <stdatomic.h>
591 typedef _Atomic(os_unfair_lock
) _os_atomic_unfair_lock
;
592 #define OSLOCK_STD(_a) _a
595 OS_ASSUME_NONNULL_BEGIN
597 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
598 #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
599 #elif defined(__cplusplus) && __cplusplus >= 201103L
600 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
601 #elif defined(__cplusplus)
602 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
604 #define OS_UNFAIR_LOCK_UNLOCKED {0}
608 * @function os_unfair_lock_lock_inline
611 * Locks an os_unfair_lock.
614 * Pointer to an os_unfair_lock.
616 OS_UNFAIR_LOCK_AVAILABILITY
617 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
619 os_unfair_lock_lock_inline(os_unfair_lock_t lock
)
621 if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock
);
622 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
623 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
624 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
625 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
626 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
627 OSLOCK_STD(memory_order_acquire
),
628 OSLOCK_STD(memory_order_relaxed
))) {
629 return os_unfair_lock_lock(lock
);
634 * @function os_unfair_lock_lock_with_options_inline
637 * Locks an os_unfair_lock.
640 * Pointer to an os_unfair_lock.
643 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
645 OS_UNFAIR_LOCK_AVAILABILITY
646 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
648 os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock
,
649 os_unfair_lock_options_t options
)
651 if (!_pthread_has_direct_tsd()) {
652 return os_unfair_lock_lock_with_options(lock
, options
);
654 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
655 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
656 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
657 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
658 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
659 OSLOCK_STD(memory_order_acquire
),
660 OSLOCK_STD(memory_order_relaxed
))) {
661 return os_unfair_lock_lock_with_options(lock
, options
);
666 * @function os_unfair_lock_trylock_inline
669 * Locks an os_unfair_lock if it is not already locked.
672 * It is invalid to surround this function with a retry loop, if this function
673 * returns false, the program must be able to proceed without having acquired
674 * the lock, or it must call os_unfair_lock_lock_inline() instead.
677 * Pointer to an os_unfair_lock.
680 * Returns true if the lock was succesfully locked and false if the lock was
683 OS_UNFAIR_LOCK_AVAILABILITY
684 OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
686 os_unfair_lock_trylock_inline(os_unfair_lock_t lock
)
688 if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock
);
689 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
690 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
691 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
692 return OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
693 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
694 OSLOCK_STD(memory_order_acquire
), OSLOCK_STD(memory_order_relaxed
));
698 * @function os_unfair_lock_unlock_inline
701 * Unlocks an os_unfair_lock.
704 * Pointer to an os_unfair_lock.
706 OS_UNFAIR_LOCK_AVAILABILITY
707 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
709 os_unfair_lock_unlock_inline(os_unfair_lock_t lock
)
711 if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock
);
712 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
713 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
714 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
715 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
716 (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
,
717 OSLOCK_STD(memory_order_release
),
718 OSLOCK_STD(memory_order_relaxed
))) {
719 return os_unfair_lock_unlock(lock
);
724 * @function os_unfair_lock_lock_inline_no_tsd_4libpthread
727 * Locks an os_unfair_lock, without requiring valid TSD.
729 * This should only be used by libpthread.
732 * Pointer to an os_unfair_lock.
734 OS_UNFAIR_LOCK_AVAILABILITY
735 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
737 os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock
)
739 uint32_t mts
= MACH_PORT_DEAD
;
740 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
741 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
742 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
743 OSLOCK_STD(memory_order_acquire
),
744 OSLOCK_STD(memory_order_relaxed
))) {
745 return os_unfair_lock_lock_no_tsd_4libpthread(lock
);
750 * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread
753 * Unlocks an os_unfair_lock, without requiring valid TSD.
755 * This should only be used by libpthread.
758 * Pointer to an os_unfair_lock.
760 OS_UNFAIR_LOCK_AVAILABILITY
761 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
763 os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock
)
765 uint32_t mts
= MACH_PORT_DEAD
;
766 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
767 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
768 (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
,
769 OSLOCK_STD(memory_order_release
),
770 OSLOCK_STD(memory_order_relaxed
))) {
771 return os_unfair_lock_unlock_no_tsd_4libpthread(lock
);
775 OS_ASSUME_NONNULL_END
783 #endif // OS_UNFAIR_LOCK_INLINE
785 #endif // __OS_LOCK_PRIVATE__