2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __OS_LOCK_PRIVATE__
22 #define __OS_LOCK_PRIVATE__
24 #include <Availability.h>
25 #include <TargetConditionals.h>
26 #include <sys/cdefs.h>
30 #include <mach/port.h>
31 #include <os/base_private.h>
34 OS_ASSUME_NONNULL_BEGIN
40 #define OS_LOCK_SPI_VERSION 20190424
46 * Pointer to one of the os_lock variants.
49 #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
50 #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
51 #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
53 #define OS_LOCK(type) os_lock_##type##_s
54 #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
56 #if defined(__cplusplus) && __cplusplus >= 201103L
58 #define OS_LOCK_DECL(type, size) \
59 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
61 OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
62 uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
64 constexpr OS_LOCK(type)() : \
65 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
67 #define OS_LOCK_INIT(type) {}
69 typedef OS_LOCK_STRUCT(base
) {
71 constexpr OS_LOCK(base
)() {}
76 #define OS_LOCK_DECL(type, size) \
77 typedef OS_LOCK_STRUCT(type) { \
78 OS_LOCK_TYPE_STRUCT(type) * osl_type; \
79 uintptr_t _osl_##type##_opaque[size-1]; \
82 #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
84 #ifndef OS_LOCK_T_MEMBER
85 #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
88 typedef OS_TRANSPARENT_UNION
union {
89 OS_LOCK_T_MEMBER(base
);
90 OS_LOCK_T_MEMBER(unfair
);
91 OS_LOCK_T_MEMBER(nospin
);
92 OS_LOCK_T_MEMBER(spin
);
93 OS_LOCK_T_MEMBER(handoff
);
99 * @typedef os_lock_unfair_s
102 * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
103 * waits in the kernel to be woken up by an unlock. The lock value contains
104 * ownership information that the system may use to attempt to resolve priority
108 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
109 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
110 * can potentially immediately reacquire the lock before a woken up waiter gets
111 * an opportunity to attempt to acquire the lock, so starvation is possibile.
113 * Must be initialized with OS_LOCK_UNFAIR_INIT
115 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
116 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
117 OS_EXPORT
OS_LOCK_TYPE_DECL(unfair
);
118 OS_LOCK_DECL(unfair
, 2);
119 #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
122 * @typedef os_lock_nospin_s
125 * os_lock variant that does not spin on contention but waits in the kernel to
126 * be woken up by an unlock. No attempt to resolve priority inversions is made
127 * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
130 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
131 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
132 * can potentially immediately reacquire the lock before a woken up waiter gets
133 * an opportunity to attempt to acquire the lock, so starvation is possibile.
135 * Must be initialized with OS_LOCK_NOSPIN_INIT
137 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
138 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
139 OS_EXPORT
OS_LOCK_TYPE_DECL(nospin
);
140 OS_LOCK_DECL(nospin
, 2);
141 #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
144 * @typedef os_lock_spin_s
147 * Deprecated os_lock variant that on contention starts by spinning trying to
148 * acquire the lock, then depressing the priority of the current thread and
149 * finally blocking the thread waiting for the lock to become available.
150 * Equivalent to OSSpinLock and equally not recommended, see discussion in
151 * libkern/OSAtomic.h headerdoc.
154 * Spinlocks are intended to be held only for very brief periods of time. The
155 * critical section must not make syscalls and should avoid touching areas of
156 * memory that may trigger a page fault, in particular if the critical section
157 * may be executing on threads of widely differing priorities or on a mix of
158 * IO-throttled and unthrottled threads.
160 * Must be initialized with OS_LOCK_SPIN_INIT
162 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
163 OS_EXPORT
OS_LOCK_TYPE_DECL(spin
);
164 OS_LOCK_DECL(spin
, 2);
165 #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
168 * @typedef os_lock_handoff_s
171 * os_lock variant that on contention hands off the current kernel thread to the
172 * lock-owning userspace thread (if it is not running), temporarily overriding
173 * its priority and IO throttle if necessary.
176 * Intended for use in limited circumstances where the critical section might
177 * be executing on threads of widely differing priorities or on a mix of
178 * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
179 * be likely to encounter a priority inversion.
181 * IMPORTANT: This lock variant is NOT intended as a general replacement for all
182 * uses of os_lock_spin_s or OSSpinLock.
184 * Must be initialized with OS_LOCK_HANDOFF_INIT
186 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
187 OS_EXPORT
OS_LOCK_TYPE_DECL(handoff
);
188 OS_LOCK_DECL(handoff
, 2);
189 #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
195 * @function os_lock_lock
198 * Locks an os_lock variant.
201 * Pointer to one of the os_lock variants.
203 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
204 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
205 void os_lock_lock(os_lock_t lock
);
208 * @function os_lock_trylock
211 * Locks an os_lock variant if it is not already locked.
214 * Pointer to one of the os_lock variants.
217 * Returns true if the lock was succesfully locked and false if the lock was
220 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
221 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
222 bool os_lock_trylock(os_lock_t lock
);
225 * @function os_lock_unlock
228 * Unlocks an os_lock variant.
231 * Pointer to one of the os_lock variants.
233 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
234 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
235 void os_lock_unlock(os_lock_t lock
);
237 /*! @group os_unfair_lock SPI
240 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
241 * waits in the kernel to be woken up by an unlock. The opaque lock value
242 * contains thread ownership information that the system may use to attempt to
243 * resolve priority inversions.
245 * This lock must be unlocked from the same thread that locked it, attempts to
246 * unlock from a different thread will cause an assertion aborting the process.
248 * This lock must not be accessed from multiple processes or threads via shared
249 * or multiply-mapped memory, the lock implementation relies on the address of
250 * the lock value and owning process.
253 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
254 * unlocker can potentially immediately reacquire the lock before a woken up
255 * waiter gets an opportunity to attempt to acquire the lock. This may be
256 * advantageous for performance reasons, but also makes starvation of waiters a
259 * Must be initialized with OS_UNFAIR_LOCK_INIT
263 * @typedef os_unfair_lock_options_t
265 * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
266 * This flag informs the runtime that the specified lock is used for data
267 * synchronization and that the lock owner is always able to make progress
268 * toward releasing the lock without the help of another thread in the same
269 * process. This hint will cause the workqueue subsystem to not create new
270 * threads to offset for threads waiting for the lock.
272 * When this flag is used, the code running under the critical section should
273 * be well known and under your control (Generally it should not call into
276 * @const OS_UNFAIR_LOCK_ADAPTIVE_SPIN
277 * This flag allows for the kernel to use adaptive spinning when the holder
278 * of the lock is currently on core. This should only be used for locks
279 * where the protected critical section is always extremely short.
281 OS_OPTIONS(os_unfair_lock_options
, uint32_t,
282 OS_UNFAIR_LOCK_NONE
OS_SWIFT_NAME(None
)
283 OS_UNFAIR_LOCK_AVAILABILITY
= 0x00000000,
284 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
OS_SWIFT_NAME(DataSynchronization
)
285 OS_UNFAIR_LOCK_AVAILABILITY
= 0x00010000,
286 OS_UNFAIR_LOCK_ADAPTIVE_SPIN
OS_SWIFT_NAME(AdaptiveSpin
)
287 __API_AVAILABLE(macos(10.15), ios(13.0),
288 tvos(13.0), watchos(6.0), bridgeos(4.0)) = 0x00040000,
292 #define OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(name) \
293 static const os_unfair_lock_options_t \
294 name##_FOR_SWIFT OS_SWIFT_NAME(name) = name
295 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_NONE
);
296 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
);
297 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_ADAPTIVE_SPIN
);
298 #undef OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT
302 * @function os_unfair_lock_lock_with_options
305 * Locks an os_unfair_lock.
308 * Pointer to an os_unfair_lock.
311 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
313 OS_UNFAIR_LOCK_AVAILABILITY
314 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
315 void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
316 os_unfair_lock_options_t options
);
319 * @group os_unfair_lock no-TSD interfaces
321 * Like the above, but don't require being on a thread with valid TSD, so they
322 * can be called from injected mach-threads.
324 * The normal routines use the TSD value for mach_thread_self(), but mach
325 * threads do not have TSDs. Instead these functions require the value for
326 * mach_thread_self() to be passed explicitly.
328 * This should only be used directly by libpthread.
330 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
331 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
332 void os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock
,
333 os_unfair_lock_options_t options
, mach_port_t mts
);
335 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
336 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
337 void os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock
, mach_port_t mts
);
340 /*! @group os_unfair_recursive_lock SPI
343 * Similar to os_unfair_lock, but recursive.
346 * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
349 #define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
350 __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
351 __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
353 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
354 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
355 ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
356 #elif defined(__cplusplus) && __cplusplus >= 201103L
357 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
358 (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
359 #elif defined(__cplusplus)
360 #define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
361 (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
363 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
364 {OS_UNFAIR_LOCK_INIT, 0}
365 #endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
368 * @typedef os_unfair_recursive_lock
371 * Low-level lock that allows waiters to block efficiently on contention.
374 * See os_unfair_lock.
377 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
378 typedef struct os_unfair_recursive_lock_s
{
379 os_unfair_lock ourl_lock
;
381 } os_unfair_recursive_lock
, *os_unfair_recursive_lock_t
;
384 * @function os_unfair_recursive_lock_lock_with_options
387 * See os_unfair_lock_lock_with_options
389 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
390 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
391 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock
,
392 os_unfair_lock_options_t options
);
395 * @function os_unfair_recursive_lock_lock
398 * See os_unfair_lock_lock
400 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
401 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
403 os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock
)
405 os_unfair_recursive_lock_lock_with_options(lock
, OS_UNFAIR_LOCK_NONE
);
409 * @function os_unfair_recursive_lock_trylock
412 * See os_unfair_lock_trylock
414 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
415 OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
416 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock
);
419 * @function os_unfair_recursive_lock_unlock
422 * See os_unfair_lock_unlock
424 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
425 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
426 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock
);
429 * @function os_unfair_recursive_lock_tryunlock4objc
432 * See os_unfair_lock_unlock
434 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
435 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
436 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock
);
439 * @function os_unfair_recursive_lock_assert_owner
442 * See os_unfair_lock_assert_owner
444 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
445 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
447 os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock
)
449 os_unfair_lock_assert_owner(&lock
->ourl_lock
);
453 * @function os_unfair_recursive_lock_assert_not_owner
456 * See os_unfair_lock_assert_not_owner
458 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
459 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
461 os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock
)
463 os_unfair_lock_assert_not_owner(&lock
->ourl_lock
);
467 * @function os_unfair_recursive_lock_owned
470 * This function is reserved for the use of people who want to soft-fault
471 * when locking models have been violated.
474 * This is meant for SQLite use to detect existing misuse of the API surface,
475 * and is not meant for anything else than calling os_log_fault() when such
476 * contracts are violated.
478 * There's little point to use this value for logic as the
479 * os_unfair_recursive_lock is already recursive anyway.
481 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
482 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
483 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
485 os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock
);
488 * @function os_unfair_recursive_lock_unlock_forked_child
491 * Function to be used in an atfork child handler to unlock a recursive unfair
495 * This function helps with handling recursive locks in the presence of fork.
497 * It is typical to setup atfork handlers that will:
498 * - take the lock in the pre-fork handler,
499 * - drop the lock in the parent handler,
500 * - reset the lock in the forked child.
502 * However, because a recursive lock may have been held by the current thread
503 * already, reseting needs to act like an unlock. This function serves for this
504 * purpose. Unlike os_unfair_recursive_lock_unlock(), this function will fixup
505 * the lock ownership to match the new identity of the thread after fork().
507 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
508 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
509 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
511 os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock
);
513 #if __has_attribute(cleanup)
516 * @function os_unfair_lock_scoped_guard_unlock
519 * Used by os_unfair_lock_lock_scoped_guard
521 OS_UNFAIR_LOCK_AVAILABILITY
522 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
524 os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull
* _Nonnull lock
)
526 os_unfair_lock_unlock(*lock
);
530 * @function os_unfair_lock_lock_scoped_guard
533 * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
534 * automatically called when the enclosing C scope ends.
537 * Name for the variable holding the guard.
540 * Pointer to an os_unfair_lock.
542 * @see os_unfair_lock_lock
543 * @see os_unfair_lock_unlock
545 #define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
547 __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
549 os_unfair_lock_lock(guard_name)
551 #endif // __has_attribute(cleanup)
555 OS_ASSUME_NONNULL_END
557 /*! @group Inline os_unfair_lock interfaces
559 * Inline versions of the os_unfair_lock fastpath.
561 * Intended exclusively for special highly performance-sensitive cases where the
562 * function calls to the os_unfair_lock API entrypoints add measurable overhead.
564 * Do not use in frameworks to implement synchronization API primitives that are
565 * exposed to developers, that would lead to false positives for that API from
566 * tools such as ThreadSanitizer.
568 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
569 * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
570 * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
571 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
573 * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
574 * above and still wish to use these interfaces.
577 #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
579 #include <pthread/tsd_private.h>
583 #if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
584 #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
587 typedef std::atomic
<os_unfair_lock
> _os_atomic_unfair_lock
;
588 #define OSLOCK_STD(_a) std::_a
591 #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
592 #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
594 #include <stdatomic.h>
595 typedef _Atomic(os_unfair_lock
) _os_atomic_unfair_lock
;
596 #define OSLOCK_STD(_a) _a
599 OS_ASSUME_NONNULL_BEGIN
601 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
602 #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
603 #elif defined(__cplusplus) && __cplusplus >= 201103L
604 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
605 #elif defined(__cplusplus)
606 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
608 #define OS_UNFAIR_LOCK_UNLOCKED {0}
612 * @function os_unfair_lock_lock_no_tsd_inline
615 * Locks an os_unfair_lock, without requiring valid TSD.
617 * This should only be used directly by libpthread.
620 * Pointer to an os_unfair_lock.
622 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
623 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
625 os_unfair_lock_lock_no_tsd_inline(os_unfair_lock_t lock
,
626 os_unfair_lock_options_t options
, mach_port_t mts
)
628 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
629 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
630 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
631 OSLOCK_STD(memory_order_acquire
),
632 OSLOCK_STD(memory_order_relaxed
))) {
633 return os_unfair_lock_lock_no_tsd(lock
, options
, mts
);
638 * @function os_unfair_lock_unlock_no_tsd_inline
641 * Unlocks an os_unfair_lock, without requiring valid TSD.
643 * This should only be used directly by libpthread.
646 * Pointer to an os_unfair_lock.
648 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
649 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
651 os_unfair_lock_unlock_no_tsd_inline(os_unfair_lock_t lock
, mach_port_t mts
)
653 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
654 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
655 (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
,
656 OSLOCK_STD(memory_order_release
),
657 OSLOCK_STD(memory_order_relaxed
))) {
658 return os_unfair_lock_unlock_no_tsd(lock
, mts
);
663 * @function os_unfair_lock_lock_inline
666 * Locks an os_unfair_lock.
669 * Pointer to an os_unfair_lock.
671 OS_UNFAIR_LOCK_AVAILABILITY
672 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
674 os_unfair_lock_lock_inline(os_unfair_lock_t lock
)
676 if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock
);
677 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
678 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
679 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
680 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
681 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
682 OSLOCK_STD(memory_order_acquire
),
683 OSLOCK_STD(memory_order_relaxed
))) {
684 return os_unfair_lock_lock(lock
);
689 * @function os_unfair_lock_lock_with_options_inline
692 * Locks an os_unfair_lock.
695 * Pointer to an os_unfair_lock.
698 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
700 OS_UNFAIR_LOCK_AVAILABILITY
701 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
703 os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock
,
704 os_unfair_lock_options_t options
)
706 if (!_pthread_has_direct_tsd()) {
707 return os_unfair_lock_lock_with_options(lock
, options
);
709 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
710 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
711 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
712 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
713 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
714 OSLOCK_STD(memory_order_acquire
),
715 OSLOCK_STD(memory_order_relaxed
))) {
716 return os_unfair_lock_lock_with_options(lock
, options
);
721 * @function os_unfair_lock_trylock_inline
724 * Locks an os_unfair_lock if it is not already locked.
727 * It is invalid to surround this function with a retry loop, if this function
728 * returns false, the program must be able to proceed without having acquired
729 * the lock, or it must call os_unfair_lock_lock_inline() instead.
732 * Pointer to an os_unfair_lock.
735 * Returns true if the lock was succesfully locked and false if the lock was
738 OS_UNFAIR_LOCK_AVAILABILITY
739 OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
741 os_unfair_lock_trylock_inline(os_unfair_lock_t lock
)
743 if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock
);
744 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
745 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
746 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
747 return OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
748 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
749 OSLOCK_STD(memory_order_acquire
), OSLOCK_STD(memory_order_relaxed
));
753 * @function os_unfair_lock_unlock_inline
756 * Unlocks an os_unfair_lock.
759 * Pointer to an os_unfair_lock.
761 OS_UNFAIR_LOCK_AVAILABILITY
762 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
764 os_unfair_lock_unlock_inline(os_unfair_lock_t lock
)
766 if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock
);
767 uint32_t mts
= (uint32_t)(uintptr_t)_pthread_getspecific_direct(
768 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
769 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_UNLOCKED
, locked
= { mts
};
770 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
771 (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
,
772 OSLOCK_STD(memory_order_release
),
773 OSLOCK_STD(memory_order_relaxed
))) {
774 return os_unfair_lock_unlock(lock
);
778 OS_ASSUME_NONNULL_END
786 #endif // OS_UNFAIR_LOCK_INLINE
788 #endif // __OS_LOCK_PRIVATE__