2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
24 #include <Availability.h>
25 #include <sys/cdefs.h>
31 OS_ASSUME_NONNULL_BEGIN
37 #define OS_LOCK_API_VERSION 20160309
41 #define OS_UNFAIR_LOCK_AVAILABILITY \
42 __API_AVAILABLE(macos(10.12), ios(10.0), tvos(10.0), watchos(3.0))
45 * @typedef os_unfair_lock
48 * Low-level lock that allows waiters to block efficiently on contention.
50 * In general, higher level synchronization primitives such as those provided by
51 * the pthread or dispatch subsystems should be preferred.
53 * The values stored in the lock should be considered opaque and implementation
54 * defined, they contain thread ownership information that the system may use
55 * to attempt to resolve priority inversions.
57 * This lock must be unlocked from the same thread that locked it, attempts to
58 * unlock from a different thread will cause an assertion aborting the process.
60 * This lock must not be accessed from multiple processes or threads via shared
61 * or multiply-mapped memory, the lock implementation relies on the address of
62 * the lock value and owning process.
64 * Must be initialized with OS_UNFAIR_LOCK_INIT
67 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
68 * waits in the kernel to be woken up by an unlock.
70 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
71 * unlocker can potentially immediately reacquire the lock before a woken up
72 * waiter gets an opportunity to attempt to acquire the lock. This may be
73 * advantageous for performance reasons, but also makes starvation of waiters a
76 OS_UNFAIR_LOCK_AVAILABILITY
77 typedef struct os_unfair_lock_s
{
78 uint32_t _os_unfair_lock_opaque
;
79 } os_unfair_lock
, *os_unfair_lock_t
;
81 #ifndef OS_UNFAIR_LOCK_INIT
82 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
83 #define OS_UNFAIR_LOCK_INIT ((os_unfair_lock){0})
84 #elif defined(__cplusplus) && __cplusplus >= 201103L
85 #define OS_UNFAIR_LOCK_INIT (os_unfair_lock{})
86 #elif defined(__cplusplus)
87 #define OS_UNFAIR_LOCK_INIT (os_unfair_lock())
89 #define OS_UNFAIR_LOCK_INIT {0}
91 #endif // OS_UNFAIR_LOCK_INIT
94 * @function os_unfair_lock_lock
97 * Locks an os_unfair_lock.
100 * Pointer to an os_unfair_lock.
102 OS_UNFAIR_LOCK_AVAILABILITY
103 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
104 void os_unfair_lock_lock(os_unfair_lock_t lock
);
107 * @function os_unfair_lock_trylock
110 * Locks an os_unfair_lock if it is not already locked.
113 * It is invalid to surround this function with a retry loop, if this function
114 * returns false, the program must be able to proceed without having acquired
115 * the lock, or it must call os_unfair_lock_lock() directly (a retry loop around
116 * os_unfair_lock_trylock() amounts to an inefficient implementation of
117 * os_unfair_lock_lock() that hides the lock waiter from the system and prevents
118 * resolution of priority inversions).
121 * Pointer to an os_unfair_lock.
124 * Returns true if the lock was succesfully locked and false if the lock was
127 OS_UNFAIR_LOCK_AVAILABILITY
128 OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
129 bool os_unfair_lock_trylock(os_unfair_lock_t lock
);
132 * @function os_unfair_lock_unlock
135 * Unlocks an os_unfair_lock.
138 * Pointer to an os_unfair_lock.
140 OS_UNFAIR_LOCK_AVAILABILITY
141 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
142 void os_unfair_lock_unlock(os_unfair_lock_t lock
);
145 * @function os_unfair_lock_assert_owner
148 * Asserts that the calling thread is the current owner of the specified
152 * If the lock is currently owned by the calling thread, this function returns.
154 * If the lock is unlocked or owned by a different thread, this function
155 * asserts and terminates the process.
158 * Pointer to an os_unfair_lock.
160 OS_UNFAIR_LOCK_AVAILABILITY
161 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
162 void os_unfair_lock_assert_owner(os_unfair_lock_t lock
);
165 * @function os_unfair_lock_assert_not_owner
168 * Asserts that the calling thread is not the current owner of the specified
172 * If the lock is unlocked or owned by a different thread, this function
175 * If the lock is currently owned by the current thread, this function asserts
176 * and terminates the process.
179 * Pointer to an os_unfair_lock.
181 OS_UNFAIR_LOCK_AVAILABILITY
182 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
183 void os_unfair_lock_assert_not_owner(os_unfair_lock_t lock
);
187 OS_ASSUME_NONNULL_END
189 #endif // __OS_LOCK__