2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
21 #ifndef __OS_LOCK_PRIVATE__
22 #define __OS_LOCK_PRIVATE__
24 #include <Availability.h>
25 #include <TargetConditionals.h>
26 #include <sys/cdefs.h>
30 #include <os/base_private.h>
33 OS_ASSUME_NONNULL_BEGIN
39 #define OS_LOCK_SPI_VERSION 20160406
45 * Pointer to one of the os_lock variants.
48 #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
49 #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
50 #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
52 #define OS_LOCK(type) os_lock_##type##_s
53 #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
55 #if defined(__cplusplus) && __cplusplus >= 201103L
57 #define OS_LOCK_DECL(type, size) \
58 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
60 OS_LOCK_TYPE_STRUCT(type) * const osl_type OS_UNUSED; \
61 uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
63 constexpr OS_LOCK(type)() : \
64 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
66 #define OS_LOCK_INIT(type) {}
68 typedef OS_LOCK_STRUCT(base
) {
70 constexpr OS_LOCK(base
)() {}
75 #define OS_LOCK_DECL(type, size) \
76 typedef OS_LOCK_STRUCT(type) { \
77 OS_LOCK_TYPE_STRUCT(type) * const osl_type; \
78 uintptr_t _osl_##type##_opaque[size-1]; \
81 #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
83 #ifndef OS_LOCK_T_MEMBER
84 #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
87 typedef OS_TRANSPARENT_UNION
union {
88 OS_LOCK_T_MEMBER(base
);
89 OS_LOCK_T_MEMBER(unfair
);
90 OS_LOCK_T_MEMBER(nospin
);
91 OS_LOCK_T_MEMBER(spin
);
92 OS_LOCK_T_MEMBER(handoff
);
93 OS_LOCK_T_MEMBER(eliding
);
94 OS_LOCK_T_MEMBER(transactional
);
100 * @typedef os_lock_unfair_s
103 * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
104 * waits in the kernel to be woken up by an unlock. The lock value contains
105 * ownership information that the system may use to attempt to resolve priority
109 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
110 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
111 * can potentially immediately reacquire the lock before a woken up waiter gets
112 * an opportunity to attempt to acquire the lock, so starvation is possibile.
114 * Must be initialized with OS_LOCK_UNFAIR_INIT
116 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
117 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
118 OS_EXPORT
OS_LOCK_TYPE_DECL(unfair
);
119 OS_LOCK_DECL(unfair
, 2);
120 #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
123 * @typedef os_lock_nospin_s
126 * os_lock variant that does not spin on contention but waits in the kernel to
127 * be woken up by an unlock. No attempt to resolve priority inversions is made
128 * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
131 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
132 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
133 * can potentially immediately reacquire the lock before a woken up waiter gets
134 * an opportunity to attempt to acquire the lock, so starvation is possibile.
136 * Must be initialized with OS_LOCK_NOSPIN_INIT
138 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
139 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
140 OS_EXPORT
OS_LOCK_TYPE_DECL(nospin
);
141 OS_LOCK_DECL(nospin
, 2);
142 #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
145 * @typedef os_lock_spin_s
148 * Deprecated os_lock variant that on contention starts by spinning trying to
149 * acquire the lock, then depressing the priority of the current thread and
150 * finally blocking the thread waiting for the lock to become available.
151 * Equivalent to OSSpinLock and equally not recommended, see discussion in
152 * libkern/OSAtomic.h headerdoc.
155 * Spinlocks are intended to be held only for very brief periods of time. The
156 * critical section must not make syscalls and should avoid touching areas of
157 * memory that may trigger a page fault, in particular if the critical section
158 * may be executing on threads of widely differing priorities or on a mix of
159 * IO-throttled and unthrottled threads.
161 * Must be initialized with OS_LOCK_SPIN_INIT
163 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
164 OS_EXPORT
OS_LOCK_TYPE_DECL(spin
);
165 OS_LOCK_DECL(spin
, 2);
166 #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
169 * @typedef os_lock_handoff_s
172 * os_lock variant that on contention hands off the current kernel thread to the
173 * lock-owning userspace thread (if it is not running), temporarily overriding
174 * its priority and IO throttle if necessary.
177 * Intended for use in limited circumstances where the critical section might
178 * be executing on threads of widely differing priorities or on a mix of
179 * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
180 * be likely to encounter a priority inversion.
182 * IMPORTANT: This lock variant is NOT intended as a general replacement for all
183 * uses of os_lock_spin_s or OSSpinLock.
185 * Must be initialized with OS_LOCK_HANDOFF_INIT
187 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
188 OS_EXPORT
OS_LOCK_TYPE_DECL(handoff
);
189 OS_LOCK_DECL(handoff
, 2);
190 #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
193 #if !TARGET_OS_IPHONE
195 * @typedef os_lock_eliding_s
198 * os_lock variant that uses hardware lock elision support if available to allow
199 * multiple processors to concurrently execute a critical section as long as
200 * they don't perform conflicting operations on each other's data. In case of
201 * conflict, the lock reverts to exclusive operation and os_lock_spin_s behavior
202 * on contention (at potential extra cost for the aborted attempt at lock-elided
203 * concurrent execution). If hardware HLE support is not present, this lock
204 * variant behaves like os_lock_spin_s.
207 * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
208 * with HLE support to ensure the data access pattern and length of the critical
209 * section allows lock-elided execution to succeed frequently enough to offset
210 * the cost of any aborted concurrent execution.
212 * Must be initialized with OS_LOCK_ELIDING_INIT
214 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_NA
)
215 OS_EXPORT
OS_LOCK_TYPE_DECL(eliding
);
216 OS_LOCK_DECL(eliding
, 8) OS_ALIGNED(64);
217 #define OS_LOCK_ELIDING_INIT OS_LOCK_INIT(eliding)
220 * @typedef os_lock_transactional_s
223 * os_lock variant that uses hardware restricted transactional memory support if
224 * available to allow multiple processors to concurrently execute the critical
225 * section as a transactional region. If transactional execution aborts, the
226 * lock reverts to exclusive operation and os_lock_spin_s behavior on contention
227 * (at potential extra cost for the aborted attempt at transactional concurrent
228 * execution). If hardware RTM support is not present, this lock variant behaves
229 * like os_lock_eliding_s.
232 * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
233 * with RTM support to ensure the data access pattern and length of the critical
234 * section allows transactional execution to succeed frequently enough to offset
235 * the cost of any aborted transactions.
237 * Must be initialized with OS_LOCK_TRANSACTIONAL_INIT
239 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_NA
)
240 OS_EXPORT
OS_LOCK_TYPE_DECL(transactional
);
241 OS_LOCK_DECL(transactional
, 8) OS_ALIGNED(64);
242 #define OS_LOCK_TRANSACTIONAL_INIT OS_LOCK_INIT(transactional)
248 * @function os_lock_lock
251 * Locks an os_lock variant.
254 * Pointer to one of the os_lock variants.
256 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
257 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
258 void os_lock_lock(os_lock_t lock
);
261 * @function os_lock_trylock
264 * Locks an os_lock variant if it is not already locked.
267 * Pointer to one of the os_lock variants.
270 * Returns true if the lock was succesfully locked and false if the lock was
273 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
274 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
275 bool os_lock_trylock(os_lock_t lock
);
278 * @function os_lock_unlock
281 * Unlocks an os_lock variant.
284 * Pointer to one of the os_lock variants.
286 __OSX_AVAILABLE_STARTING(__MAC_10_9
,__IPHONE_7_0
)
287 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
288 void os_lock_unlock(os_lock_t lock
);
290 /*! @group os_unfair_lock SPI
293 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
294 * waits in the kernel to be woken up by an unlock. The opaque lock value
295 * contains thread ownership information that the system may use to attempt to
296 * resolve priority inversions.
298 * This lock must be unlocked from the same thread that locked it, attemps to
299 * unlock from a different thread will cause an assertion aborting the process.
301 * This lock must not be accessed from multiple processes or threads via shared
302 * or multiply-mapped memory, the lock implementation relies on the address of
303 * the lock value and owning process.
306 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
307 * unlocker can potentially immediately reacquire the lock before a woken up
308 * waiter gets an opportunity to attempt to acquire the lock. This may be
309 * advantageous for performance reasons, but also makes starvation of waiters a
312 * Must be initialized with OS_UNFAIR_LOCK_INIT
316 * @typedef os_unfair_lock_options_t
318 * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
319 * This flag informs the runtime that the specified lock is used for data
320 * synchronization and that the lock owner is always able to make progress
321 * toward releasing the lock without the help of another thread in the same
322 * process. This hint will cause the workqueue subsystem to not create new
323 * threads to offset for threads waiting for the lock.
325 * When this flag is used, the code running under the critical section should
326 * be well known and under your control (Generally it should not call into
329 OS_ENUM(os_unfair_lock_options
, uint32_t,
331 OS_UNFAIR_LOCK_AVAILABILITY
= 0x00000000,
332 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
333 OS_UNFAIR_LOCK_AVAILABILITY
= 0x00010000,
337 * @function os_unfair_lock_lock_with_options
340 * Locks an os_unfair_lock.
343 * Pointer to an os_unfair_lock.
346 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
348 OS_UNFAIR_LOCK_AVAILABILITY
349 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
350 void os_unfair_lock_lock_with_options(os_unfair_lock_t lock
,
351 os_unfair_lock_options_t options
);
354 * @function os_unfair_lock_assert_owner
357 * Asserts that the calling thread is the current owner of the specified
361 * If the lock is currently owned by the calling thread, this function returns.
363 * If the lock is unlocked or owned by a different thread, this function
364 * asserts and terminates the process.
367 * Pointer to an os_unfair_lock.
369 OS_UNFAIR_LOCK_AVAILABILITY
370 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
371 void os_unfair_lock_assert_owner(os_unfair_lock_t lock
);
374 * @function os_unfair_lock_assert_not_owner
377 * Asserts that the calling thread is not the current owner of the specified
381 * If the lock is unlocked or owned by a different thread, this function
384 * If the lock is currently owned by the current thread, this function asserts
385 * and terminates the process.
388 * Pointer to an os_unfair_lock.
390 OS_UNFAIR_LOCK_AVAILABILITY
391 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
392 void os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
);
394 /*! @group os_unfair_lock variant for consumption by Libc
396 OS_UNFAIR_LOCK_AVAILABILITY
397 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
398 void os_unfair_lock_lock_with_options_4Libc(os_unfair_lock_t lock
,
399 os_unfair_lock_options_t options
);
401 OS_UNFAIR_LOCK_AVAILABILITY
402 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
403 void os_unfair_lock_unlock_4Libc(os_unfair_lock_t lock
);
407 OS_ASSUME_NONNULL_END
409 /*! @group Inline os_unfair_lock interfaces
411 * Inline versions of the os_unfair_lock fastpath.
413 * Intended exclusively for special highly performance-sensitive cases where the
414 * function calls to the os_unfair_lock API entrypoints add measurable overhead.
416 * Do not use in frameworks to implement synchronization API primitives that are
417 * exposed to developers, that would lead to false positives for that API from
418 * tools such as ThreadSanitizer.
420 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
421 * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
422 * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
423 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
425 * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
426 * above and still wish to use these interfaces.
429 #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
431 #include <pthread/tsd_private.h>
435 #if !(__has_include(<atomic>) && __has_feature(cxx_atomic))
436 #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
439 typedef std::atomic
<os_unfair_lock
> _os_atomic_unfair_lock
;
440 #define OSLOCK_STD(_a) std::_a
443 #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
444 #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
446 #include <stdatomic.h>
447 typedef _Atomic(os_unfair_lock
) _os_atomic_unfair_lock
;
448 #define OSLOCK_STD(_a) _a
451 OS_ASSUME_NONNULL_BEGIN
454 * @function os_unfair_lock_lock_inline
457 * Locks an os_unfair_lock.
460 * Pointer to an os_unfair_lock.
462 OS_UNFAIR_LOCK_AVAILABILITY
463 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
465 os_unfair_lock_lock_inline(os_unfair_lock_t lock
)
467 if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock
);
468 uintptr_t mts
= (uintptr_t)_pthread_getspecific_direct(
469 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
470 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_INIT
, locked
= { mts
};
471 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
472 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
473 OSLOCK_STD(memory_order_acquire
),
474 OSLOCK_STD(memory_order_relaxed
))) {
475 return os_unfair_lock_lock(lock
);
480 * @function os_unfair_lock_lock_with_options_inline
483 * Locks an os_unfair_lock.
486 * Pointer to an os_unfair_lock.
489 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
491 OS_UNFAIR_LOCK_AVAILABILITY
492 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
494 os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock
,
495 os_unfair_lock_options_t options
)
497 if (!_pthread_has_direct_tsd()) {
498 return os_unfair_lock_lock_with_options(lock
, options
);
500 uintptr_t mts
= (uintptr_t)_pthread_getspecific_direct(
501 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
502 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_INIT
, locked
= { mts
};
503 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
504 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
505 OSLOCK_STD(memory_order_acquire
),
506 OSLOCK_STD(memory_order_relaxed
))) {
507 return os_unfair_lock_lock_with_options(lock
, options
);
512 * @function os_unfair_lock_trylock_inline
515 * Locks an os_unfair_lock if it is not already locked.
518 * It is invalid to surround this function with a retry loop, if this function
519 * returns false, the program must be able to proceed without having acquired
520 * the lock, or it must call os_unfair_lock_lock_inline() instead.
523 * Pointer to an os_unfair_lock.
526 * Returns true if the lock was succesfully locked and false if the lock was
529 OS_UNFAIR_LOCK_AVAILABILITY
530 OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
532 os_unfair_lock_trylock_inline(os_unfair_lock_t lock
)
534 if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock
);
535 uintptr_t mts
= (uintptr_t)_pthread_getspecific_direct(
536 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
537 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_INIT
, locked
= { mts
};
538 return OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
539 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
540 OSLOCK_STD(memory_order_acquire
), OSLOCK_STD(memory_order_relaxed
));
544 * @function os_unfair_lock_unlock_inline
547 * Unlocks an os_unfair_lock.
550 * Pointer to an os_unfair_lock.
552 OS_UNFAIR_LOCK_AVAILABILITY
553 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
555 os_unfair_lock_unlock_inline(os_unfair_lock_t lock
)
557 if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock
);
558 uintptr_t mts
= (uintptr_t)_pthread_getspecific_direct(
559 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF
);
560 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_INIT
, locked
= { mts
};
561 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
562 (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
,
563 OSLOCK_STD(memory_order_release
),
564 OSLOCK_STD(memory_order_relaxed
))) {
565 return os_unfair_lock_unlock(lock
);
569 /*! @group os_unfair_lock no-TSD interfaces
571 * Like the above, but don't require being on a thread with valid TSD, so they
572 * can be called from injected mach-threads. The normal routines use the TSD
573 * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
574 * locked value instead. As a result, they will be unable to resolve priority
577 * This should only be used by libpthread.
580 OS_UNFAIR_LOCK_AVAILABILITY
581 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
582 void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock
);
584 OS_UNFAIR_LOCK_AVAILABILITY
585 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
586 void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock
);
589 * @function os_unfair_lock_lock_inline_no_tsd_4libpthread
592 * Locks an os_unfair_lock, without requiring valid TSD.
594 * This should only be used by libpthread.
597 * Pointer to an os_unfair_lock.
599 OS_UNFAIR_LOCK_AVAILABILITY
600 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
602 os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock
)
604 uintptr_t mts
= (uintptr_t)MACH_PORT_DEAD
;
605 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_INIT
, locked
= { mts
};
606 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
607 (_os_atomic_unfair_lock
*)lock
, &unlocked
, locked
,
608 OSLOCK_STD(memory_order_acquire
),
609 OSLOCK_STD(memory_order_relaxed
))) {
610 return os_unfair_lock_lock_no_tsd_4libpthread(lock
);
615 * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread
618 * Unlocks an os_unfair_lock, without requiring valid TSD.
620 * This should only be used by libpthread.
623 * Pointer to an os_unfair_lock.
625 OS_UNFAIR_LOCK_AVAILABILITY
626 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
628 os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock
)
630 uintptr_t mts
= (uintptr_t)MACH_PORT_DEAD
;
631 os_unfair_lock unlocked
= OS_UNFAIR_LOCK_INIT
, locked
= { mts
};
632 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit
)(
633 (_os_atomic_unfair_lock
*)lock
, &locked
, unlocked
,
634 OSLOCK_STD(memory_order_release
),
635 OSLOCK_STD(memory_order_relaxed
))) {
636 return os_unfair_lock_unlock_no_tsd_4libpthread(lock
);
640 OS_ASSUME_NONNULL_END
648 #endif // OS_UNFAIR_LOCK_INLINE
650 #endif // __OS_LOCK_PRIVATE__