]> git.saurik.com Git - apple/libplatform.git/blame - private/os/lock_private.h
libplatform-254.40.4.tar.gz
[apple/libplatform.git] / private / os / lock_private.h
CommitLineData
ada7c492
A
1/*
2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21#ifndef __OS_LOCK_PRIVATE__
22#define __OS_LOCK_PRIVATE__
23
24#include <Availability.h>
25#include <TargetConditionals.h>
26#include <sys/cdefs.h>
27#include <stddef.h>
28#include <stdint.h>
29#include <stdbool.h>
442fbc9d 30#include <mach/port.h>
ada7c492
A
31#include <os/base_private.h>
32#include <os/lock.h>
33
34OS_ASSUME_NONNULL_BEGIN
35
36/*! @header
37 * Low-level lock SPI
38 */
39
442fbc9d 40#define OS_LOCK_SPI_VERSION 20190424
ada7c492
A
41
42/*!
43 * @typedef os_lock_t
44 *
45 * @abstract
46 * Pointer to one of the os_lock variants.
47 */
48
49#define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
50#define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
51#define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
52
53#define OS_LOCK(type) os_lock_##type##_s
54#define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
55
56#if defined(__cplusplus) && __cplusplus >= 201103L
57
58#define OS_LOCK_DECL(type, size) \
59 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
60 private: \
438624e0 61 OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
ada7c492
A
62 uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
63 public: \
89154bfb 64 constexpr OS_LOCK(type)() : \
ada7c492
A
65 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
66 } OS_LOCK(type)
67#define OS_LOCK_INIT(type) {}
68
69typedef OS_LOCK_STRUCT(base) {
70 protected:
71 constexpr OS_LOCK(base)() {}
72} *os_lock_t;
73
74#else
75
76#define OS_LOCK_DECL(type, size) \
77 typedef OS_LOCK_STRUCT(type) { \
438624e0 78 OS_LOCK_TYPE_STRUCT(type) * osl_type; \
ada7c492
A
79 uintptr_t _osl_##type##_opaque[size-1]; \
80 } OS_LOCK(type)
81
82#define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
83
84#ifndef OS_LOCK_T_MEMBER
85#define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
86#endif
87
88typedef OS_TRANSPARENT_UNION union {
89 OS_LOCK_T_MEMBER(base);
90 OS_LOCK_T_MEMBER(unfair);
91 OS_LOCK_T_MEMBER(nospin);
92 OS_LOCK_T_MEMBER(spin);
93 OS_LOCK_T_MEMBER(handoff);
ada7c492
A
94} os_lock_t;
95
96#endif
97
98/*!
99 * @typedef os_lock_unfair_s
100 *
101 * @abstract
102 * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
103 * waits in the kernel to be woken up by an unlock. The lock value contains
104 * ownership information that the system may use to attempt to resolve priority
105 * inversions.
106 *
107 * @discussion
108 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
109 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
110 * can potentially immediately reacquire the lock before a woken up waiter gets
111 * an opportunity to attempt to acquire the lock, so starvation is possibile.
112 *
113 * Must be initialized with OS_LOCK_UNFAIR_INIT
114 */
115__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
116__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
117OS_EXPORT OS_LOCK_TYPE_DECL(unfair);
118OS_LOCK_DECL(unfair, 2);
119#define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
120
121/*!
122 * @typedef os_lock_nospin_s
123 *
124 * @abstract
125 * os_lock variant that does not spin on contention but waits in the kernel to
126 * be woken up by an unlock. No attempt to resolve priority inversions is made
127 * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
128 *
129 * @discussion
130 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
131 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
132 * can potentially immediately reacquire the lock before a woken up waiter gets
133 * an opportunity to attempt to acquire the lock, so starvation is possibile.
134 *
135 * Must be initialized with OS_LOCK_NOSPIN_INIT
136 */
137__OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
138__TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
139OS_EXPORT OS_LOCK_TYPE_DECL(nospin);
140OS_LOCK_DECL(nospin, 2);
141#define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
142
143/*!
144 * @typedef os_lock_spin_s
145 *
146 * @abstract
147 * Deprecated os_lock variant that on contention starts by spinning trying to
148 * acquire the lock, then depressing the priority of the current thread and
149 * finally blocking the thread waiting for the lock to become available.
150 * Equivalent to OSSpinLock and equally not recommended, see discussion in
151 * libkern/OSAtomic.h headerdoc.
152 *
153 * @discussion
154 * Spinlocks are intended to be held only for very brief periods of time. The
155 * critical section must not make syscalls and should avoid touching areas of
156 * memory that may trigger a page fault, in particular if the critical section
157 * may be executing on threads of widely differing priorities or on a mix of
158 * IO-throttled and unthrottled threads.
159 *
160 * Must be initialized with OS_LOCK_SPIN_INIT
161 */
162__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
163OS_EXPORT OS_LOCK_TYPE_DECL(spin);
164OS_LOCK_DECL(spin, 2);
165#define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
166
167/*!
168 * @typedef os_lock_handoff_s
169 *
170 * @abstract
171 * os_lock variant that on contention hands off the current kernel thread to the
172 * lock-owning userspace thread (if it is not running), temporarily overriding
173 * its priority and IO throttle if necessary.
174 *
175 * @discussion
176 * Intended for use in limited circumstances where the critical section might
177 * be executing on threads of widely differing priorities or on a mix of
178 * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
179 * be likely to encounter a priority inversion.
180 *
181 * IMPORTANT: This lock variant is NOT intended as a general replacement for all
182 * uses of os_lock_spin_s or OSSpinLock.
183 *
184 * Must be initialized with OS_LOCK_HANDOFF_INIT
185 */
186__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
187OS_EXPORT OS_LOCK_TYPE_DECL(handoff);
188OS_LOCK_DECL(handoff, 2);
189#define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
190
191
ada7c492
A
192__BEGIN_DECLS
193
194/*!
195 * @function os_lock_lock
196 *
197 * @abstract
198 * Locks an os_lock variant.
199 *
200 * @param lock
201 * Pointer to one of the os_lock variants.
202 */
203__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
204OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
205void os_lock_lock(os_lock_t lock);
206
207/*!
208 * @function os_lock_trylock
209 *
210 * @abstract
211 * Locks an os_lock variant if it is not already locked.
212 *
213 * @param lock
214 * Pointer to one of the os_lock variants.
215 *
216 * @result
217 * Returns true if the lock was succesfully locked and false if the lock was
218 * already locked.
219 */
220__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
221OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
222bool os_lock_trylock(os_lock_t lock);
223
224/*!
225 * @function os_lock_unlock
226 *
227 * @abstract
228 * Unlocks an os_lock variant.
229 *
230 * @param lock
231 * Pointer to one of the os_lock variants.
232 */
233__OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
234OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
235void os_lock_unlock(os_lock_t lock);
236
237/*! @group os_unfair_lock SPI
238 *
239 * @abstract
240 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
241 * waits in the kernel to be woken up by an unlock. The opaque lock value
242 * contains thread ownership information that the system may use to attempt to
243 * resolve priority inversions.
244 *
89154bfb 245 * This lock must be unlocked from the same thread that locked it, attempts to
ada7c492
A
246 * unlock from a different thread will cause an assertion aborting the process.
247 *
248 * This lock must not be accessed from multiple processes or threads via shared
249 * or multiply-mapped memory, the lock implementation relies on the address of
250 * the lock value and owning process.
251 *
252 * @discussion
253 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
254 * unlocker can potentially immediately reacquire the lock before a woken up
255 * waiter gets an opportunity to attempt to acquire the lock. This may be
256 * advantageous for performance reasons, but also makes starvation of waiters a
257 * possibility.
258 *
259 * Must be initialized with OS_UNFAIR_LOCK_INIT
260 */
261
262/*!
263 * @typedef os_unfair_lock_options_t
264 *
265 * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
266 * This flag informs the runtime that the specified lock is used for data
267 * synchronization and that the lock owner is always able to make progress
268 * toward releasing the lock without the help of another thread in the same
269 * process. This hint will cause the workqueue subsystem to not create new
270 * threads to offset for threads waiting for the lock.
271 *
272 * When this flag is used, the code running under the critical section should
273 * be well known and under your control (Generally it should not call into
274 * framework code).
89154bfb
A
275 *
276 * @const OS_UNFAIR_LOCK_ADAPTIVE_SPIN
277 * This flag allows for the kernel to use adaptive spinning when the holder
278 * of the lock is currently on core. This should only be used for locks
279 * where the protected critical section is always extremely short.
ada7c492 280 */
89154bfb
A
281OS_OPTIONS(os_unfair_lock_options, uint32_t,
282 OS_UNFAIR_LOCK_NONE OS_SWIFT_NAME(None)
ada7c492 283 OS_UNFAIR_LOCK_AVAILABILITY = 0x00000000,
89154bfb 284 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION OS_SWIFT_NAME(DataSynchronization)
ada7c492 285 OS_UNFAIR_LOCK_AVAILABILITY = 0x00010000,
89154bfb
A
286 OS_UNFAIR_LOCK_ADAPTIVE_SPIN OS_SWIFT_NAME(AdaptiveSpin)
287 __API_AVAILABLE(macos(10.15), ios(13.0),
288 tvos(13.0), watchos(6.0), bridgeos(4.0)) = 0x00040000,
ada7c492
A
289);
290
89154bfb
A
291#if __swift__
292#define OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(name) \
293 static const os_unfair_lock_options_t \
294 name##_FOR_SWIFT OS_SWIFT_NAME(name) = name
295OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_NONE);
296OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
297OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_ADAPTIVE_SPIN);
298#undef OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT
299#endif
300
ada7c492
A
301/*!
302 * @function os_unfair_lock_lock_with_options
303 *
304 * @abstract
305 * Locks an os_unfair_lock.
306 *
307 * @param lock
308 * Pointer to an os_unfair_lock.
309 *
310 * @param options
311 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
312 */
313OS_UNFAIR_LOCK_AVAILABILITY
314OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
315void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
316 os_unfair_lock_options_t options);
317
442fbc9d
A
318/*!
319 * @group os_unfair_lock no-TSD interfaces
438624e0
A
320 *
321 * Like the above, but don't require being on a thread with valid TSD, so they
442fbc9d 322 * can be called from injected mach-threads.
438624e0 323 *
442fbc9d
A
324 * The normal routines use the TSD value for mach_thread_self(), but mach
325 * threads do not have TSDs. Instead these functions require the value for
326 * mach_thread_self() to be passed explicitly.
438624e0 327 *
442fbc9d 328 * This should only be used directly by libpthread.
438624e0 329 */
442fbc9d 330__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
438624e0 331OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
442fbc9d
A
332void os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock,
333 os_unfair_lock_options_t options, mach_port_t mts);
438624e0 334
442fbc9d 335__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
438624e0 336OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
442fbc9d
A
337void os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock, mach_port_t mts);
338
438624e0
A
339
340/*! @group os_unfair_recursive_lock SPI
341 *
342 * @abstract
343 * Similar to os_unfair_lock, but recursive.
344 *
345 * @discussion
346 * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
347 */
348
349#define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
350 __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
351 __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
352
353#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
354#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
355 ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
356#elif defined(__cplusplus) && __cplusplus >= 201103L
357#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
358 (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
359#elif defined(__cplusplus)
360#define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
361 (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
362#else
363#define OS_UNFAIR_RECURSIVE_LOCK_INIT \
364 {OS_UNFAIR_LOCK_INIT, 0}
365#endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
366
ada7c492 367/*!
438624e0 368 * @typedef os_unfair_recursive_lock
ada7c492
A
369 *
370 * @abstract
438624e0 371 * Low-level lock that allows waiters to block efficiently on contention.
ada7c492
A
372 *
373 * @discussion
438624e0 374 * See os_unfair_lock.
ada7c492 375 *
438624e0
A
376 */
377OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
378typedef struct os_unfair_recursive_lock_s {
379 os_unfair_lock ourl_lock;
380 uint32_t ourl_count;
381} os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
382
383/*!
384 * @function os_unfair_recursive_lock_lock_with_options
ada7c492 385 *
438624e0
A
386 * @abstract
387 * See os_unfair_lock_lock_with_options
ada7c492 388 */
438624e0 389OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
ada7c492 390OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
438624e0
A
391void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
392 os_unfair_lock_options_t options);
ada7c492
A
393
394/*!
438624e0 395 * @function os_unfair_recursive_lock_lock
ada7c492
A
396 *
397 * @abstract
438624e0
A
398 * See os_unfair_lock_lock
399 */
400OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
401OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
402void
403os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock)
404{
405 os_unfair_recursive_lock_lock_with_options(lock, OS_UNFAIR_LOCK_NONE);
406}
407
408/*!
409 * @function os_unfair_recursive_lock_trylock
ada7c492 410 *
438624e0
A
411 * @abstract
412 * See os_unfair_lock_trylock
413 */
414OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
415OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
416bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
417
418/*!
419 * @function os_unfair_recursive_lock_unlock
ada7c492 420 *
438624e0
A
421 * @abstract
422 * See os_unfair_lock_unlock
423 */
424OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
425OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
426void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
427
428/*!
429 * @function os_unfair_recursive_lock_tryunlock4objc
ada7c492 430 *
438624e0
A
431 * @abstract
432 * See os_unfair_lock_unlock
ada7c492 433 */
438624e0 434OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
ada7c492 435OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
438624e0 436bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
ada7c492 437
438624e0
A
438/*!
439 * @function os_unfair_recursive_lock_assert_owner
e45b4692 440 *
438624e0
A
441 * @abstract
442 * See os_unfair_lock_assert_owner
443 */
444OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
445OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
446void
447os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock)
448{
449 os_unfair_lock_assert_owner(&lock->ourl_lock);
450}
451
452/*!
453 * @function os_unfair_recursive_lock_assert_not_owner
e45b4692 454 *
438624e0
A
455 * @abstract
456 * See os_unfair_lock_assert_not_owner
457 */
458OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
459OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
460void
461os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock)
462{
463 os_unfair_lock_assert_not_owner(&lock->ourl_lock);
464}
465
89154bfb
A
466/*!
467 * @function os_unfair_recursive_lock_owned
468 *
469 * @abstract
470 * This function is reserved for the use of people who want to soft-fault
471 * when locking models have been violated.
472 *
473 * @discussion
474 * This is meant for SQLite use to detect existing misuse of the API surface,
475 * and is not meant for anything else than calling os_log_fault() when such
476 * contracts are violated.
477 *
478 * There's little point to use this value for logic as the
479 * os_unfair_recursive_lock is already recursive anyway.
480 */
481__OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
482__TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
483OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
484bool
485os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock);
486
487/*!
488 * @function os_unfair_recursive_lock_unlock_forked_child
489 *
490 * @abstract
491 * Function to be used in an atfork child handler to unlock a recursive unfair
492 * lock.
493 *
494 * @discussion
495 * This function helps with handling recursive locks in the presence of fork.
496 *
497 * It is typical to setup atfork handlers that will:
498 * - take the lock in the pre-fork handler,
499 * - drop the lock in the parent handler,
500 * - reset the lock in the forked child.
501 *
502 * However, because a recursive lock may have been held by the current thread
503 * already, reseting needs to act like an unlock. This function serves for this
504 * purpose. Unlike os_unfair_recursive_lock_unlock(), this function will fixup
505 * the lock ownership to match the new identity of the thread after fork().
506 */
507__OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
508__TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
509OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
510void
511os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock);
512
438624e0
A
513#if __has_attribute(cleanup)
514
515/*!
516 * @function os_unfair_lock_scoped_guard_unlock
e45b4692 517 *
438624e0
A
518 * @abstract
519 * Used by os_unfair_lock_lock_scoped_guard
ada7c492
A
520 */
521OS_UNFAIR_LOCK_AVAILABILITY
438624e0
A
522OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
523void
524os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull * _Nonnull lock)
525{
526 os_unfair_lock_unlock(*lock);
527}
ada7c492 528
438624e0
A
529/*!
530 * @function os_unfair_lock_lock_scoped_guard
531 *
532 * @abstract
533 * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
534 * automatically called when the enclosing C scope ends.
535 *
536 * @param name
537 * Name for the variable holding the guard.
538 *
539 * @param lock
540 * Pointer to an os_unfair_lock.
541 *
542 * @see os_unfair_lock_lock
543 * @see os_unfair_lock_unlock
544 */
545#define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
546 os_unfair_lock_t \
547 __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
548 guard_name = lock; \
549 os_unfair_lock_lock(guard_name)
550
551#endif // __has_attribute(cleanup)
ada7c492
A
552
553__END_DECLS
554
555OS_ASSUME_NONNULL_END
556
557/*! @group Inline os_unfair_lock interfaces
558 *
559 * Inline versions of the os_unfair_lock fastpath.
560 *
561 * Intended exclusively for special highly performance-sensitive cases where the
562 * function calls to the os_unfair_lock API entrypoints add measurable overhead.
563 *
564 * Do not use in frameworks to implement synchronization API primitives that are
ea84da91 565 * exposed to developers, that would lead to false positives for that API from
ada7c492
A
566 * tools such as ThreadSanitizer.
567 *
568 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
569 * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
570 * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
571 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
572 *
573 * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
574 * above and still wish to use these interfaces.
575 */
576
577#if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
578
579#include <pthread/tsd_private.h>
580
581#ifdef __cplusplus
582extern "C++" {
e39c3b4b 583#if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
ada7c492
A
584#error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
585#endif
586#include <atomic>
587typedef std::atomic<os_unfair_lock> _os_atomic_unfair_lock;
588#define OSLOCK_STD(_a) std::_a
589__BEGIN_DECLS
590#else
591#if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
592#error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
593#endif
594#include <stdatomic.h>
595typedef _Atomic(os_unfair_lock) _os_atomic_unfair_lock;
596#define OSLOCK_STD(_a) _a
597#endif
598
599OS_ASSUME_NONNULL_BEGIN
600
e45b4692
A
601#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
602#define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
603#elif defined(__cplusplus) && __cplusplus >= 201103L
604#define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
605#elif defined(__cplusplus)
606#define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
607#else
608#define OS_UNFAIR_LOCK_UNLOCKED {0}
609#endif
610
442fbc9d
A
611/*!
612 * @function os_unfair_lock_lock_no_tsd_inline
613 *
614 * @abstract
615 * Locks an os_unfair_lock, without requiring valid TSD.
616 *
617 * This should only be used directly by libpthread.
618 *
619 * @param lock
620 * Pointer to an os_unfair_lock.
621 */
622__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
623OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
624void
625os_unfair_lock_lock_no_tsd_inline(os_unfair_lock_t lock,
626 os_unfair_lock_options_t options, mach_port_t mts)
627{
628 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
629 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
630 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
631 OSLOCK_STD(memory_order_acquire),
632 OSLOCK_STD(memory_order_relaxed))) {
633 return os_unfair_lock_lock_no_tsd(lock, options, mts);
634 }
635}
636
637/*!
638 * @function os_unfair_lock_unlock_no_tsd_inline
639 *
640 * @abstract
641 * Unlocks an os_unfair_lock, without requiring valid TSD.
642 *
643 * This should only be used directly by libpthread.
644 *
645 * @param lock
646 * Pointer to an os_unfair_lock.
647 */
648__API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
649OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
650void
651os_unfair_lock_unlock_no_tsd_inline(os_unfair_lock_t lock, mach_port_t mts)
652{
653 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
654 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
655 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
656 OSLOCK_STD(memory_order_release),
657 OSLOCK_STD(memory_order_relaxed))) {
658 return os_unfair_lock_unlock_no_tsd(lock, mts);
659 }
660}
661
ada7c492
A
662/*!
663 * @function os_unfair_lock_lock_inline
664 *
665 * @abstract
666 * Locks an os_unfair_lock.
667 *
668 * @param lock
669 * Pointer to an os_unfair_lock.
670 */
671OS_UNFAIR_LOCK_AVAILABILITY
672OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
673void
674os_unfair_lock_lock_inline(os_unfair_lock_t lock)
675{
676 if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
438624e0 677 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
ada7c492 678 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
e45b4692 679 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
ada7c492
A
680 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
681 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
682 OSLOCK_STD(memory_order_acquire),
683 OSLOCK_STD(memory_order_relaxed))) {
684 return os_unfair_lock_lock(lock);
685 }
686}
687
688/*!
689 * @function os_unfair_lock_lock_with_options_inline
690 *
691 * @abstract
692 * Locks an os_unfair_lock.
693 *
694 * @param lock
695 * Pointer to an os_unfair_lock.
696 *
697 * @param options
698 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
699 */
700OS_UNFAIR_LOCK_AVAILABILITY
701OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
702void
703os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock,
704 os_unfair_lock_options_t options)
705{
706 if (!_pthread_has_direct_tsd()) {
707 return os_unfair_lock_lock_with_options(lock, options);
708 }
438624e0 709 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
ada7c492 710 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
e45b4692 711 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
ada7c492
A
712 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
713 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
714 OSLOCK_STD(memory_order_acquire),
715 OSLOCK_STD(memory_order_relaxed))) {
716 return os_unfair_lock_lock_with_options(lock, options);
717 }
718}
719
720/*!
721 * @function os_unfair_lock_trylock_inline
722 *
723 * @abstract
724 * Locks an os_unfair_lock if it is not already locked.
725 *
726 * @discussion
727 * It is invalid to surround this function with a retry loop, if this function
728 * returns false, the program must be able to proceed without having acquired
729 * the lock, or it must call os_unfair_lock_lock_inline() instead.
730 *
731 * @param lock
732 * Pointer to an os_unfair_lock.
733 *
734 * @result
735 * Returns true if the lock was succesfully locked and false if the lock was
736 * already locked.
737 */
738OS_UNFAIR_LOCK_AVAILABILITY
739OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
740bool
741os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
742{
743 if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
438624e0 744 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
ada7c492 745 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
e45b4692 746 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
ada7c492
A
747 return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
748 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
749 OSLOCK_STD(memory_order_acquire), OSLOCK_STD(memory_order_relaxed));
750}
751
752/*!
753 * @function os_unfair_lock_unlock_inline
754 *
755 * @abstract
756 * Unlocks an os_unfair_lock.
757 *
758 * @param lock
759 * Pointer to an os_unfair_lock.
760 */
761OS_UNFAIR_LOCK_AVAILABILITY
762OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
763void
764os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
765{
766 if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
438624e0 767 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
ada7c492 768 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
e45b4692 769 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
ada7c492
A
770 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
771 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
772 OSLOCK_STD(memory_order_release),
773 OSLOCK_STD(memory_order_relaxed))) {
774 return os_unfair_lock_unlock(lock);
775 }
776}
777
ada7c492
A
778OS_ASSUME_NONNULL_END
779
780#undef OSLOCK_STD
781#ifdef __cplusplus
782__END_DECLS
783} // extern "C++"
784#endif
785
786#endif // OS_UNFAIR_LOCK_INLINE
787
788#endif // __OS_LOCK_PRIVATE__