]> git.saurik.com Git - apple/libplatform.git/blob - private/os/lock_private.h
libplatform-254.40.4.tar.gz
[apple/libplatform.git] / private / os / lock_private.h
1 /*
2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #ifndef __OS_LOCK_PRIVATE__
22 #define __OS_LOCK_PRIVATE__
23
24 #include <Availability.h>
25 #include <TargetConditionals.h>
26 #include <sys/cdefs.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <stdbool.h>
30 #include <mach/port.h>
31 #include <os/base_private.h>
32 #include <os/lock.h>
33
34 OS_ASSUME_NONNULL_BEGIN
35
36 /*! @header
37 * Low-level lock SPI
38 */
39
40 #define OS_LOCK_SPI_VERSION 20190424
41
42 /*!
43 * @typedef os_lock_t
44 *
45 * @abstract
46 * Pointer to one of the os_lock variants.
47 */
48
49 #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
50 #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
51 #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
52
53 #define OS_LOCK(type) os_lock_##type##_s
54 #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
55
56 #if defined(__cplusplus) && __cplusplus >= 201103L
57
58 #define OS_LOCK_DECL(type, size) \
59 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
60 private: \
61 OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
62 uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
63 public: \
64 constexpr OS_LOCK(type)() : \
65 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
66 } OS_LOCK(type)
67 #define OS_LOCK_INIT(type) {}
68
69 typedef OS_LOCK_STRUCT(base) {
70 protected:
71 constexpr OS_LOCK(base)() {}
72 } *os_lock_t;
73
74 #else
75
76 #define OS_LOCK_DECL(type, size) \
77 typedef OS_LOCK_STRUCT(type) { \
78 OS_LOCK_TYPE_STRUCT(type) * osl_type; \
79 uintptr_t _osl_##type##_opaque[size-1]; \
80 } OS_LOCK(type)
81
82 #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
83
84 #ifndef OS_LOCK_T_MEMBER
85 #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
86 #endif
87
88 typedef OS_TRANSPARENT_UNION union {
89 OS_LOCK_T_MEMBER(base);
90 OS_LOCK_T_MEMBER(unfair);
91 OS_LOCK_T_MEMBER(nospin);
92 OS_LOCK_T_MEMBER(spin);
93 OS_LOCK_T_MEMBER(handoff);
94 } os_lock_t;
95
96 #endif
97
98 /*!
99 * @typedef os_lock_unfair_s
100 *
101 * @abstract
102 * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
103 * waits in the kernel to be woken up by an unlock. The lock value contains
104 * ownership information that the system may use to attempt to resolve priority
105 * inversions.
106 *
107 * @discussion
108 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
109 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
110 * can potentially immediately reacquire the lock before a woken up waiter gets
111 * an opportunity to attempt to acquire the lock, so starvation is possibile.
112 *
113 * Must be initialized with OS_LOCK_UNFAIR_INIT
114 */
115 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
116 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
117 OS_EXPORT OS_LOCK_TYPE_DECL(unfair);
118 OS_LOCK_DECL(unfair, 2);
119 #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
120
121 /*!
122 * @typedef os_lock_nospin_s
123 *
124 * @abstract
125 * os_lock variant that does not spin on contention but waits in the kernel to
126 * be woken up by an unlock. No attempt to resolve priority inversions is made
127 * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
128 *
129 * @discussion
130 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
131 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
132 * can potentially immediately reacquire the lock before a woken up waiter gets
133 * an opportunity to attempt to acquire the lock, so starvation is possibile.
134 *
135 * Must be initialized with OS_LOCK_NOSPIN_INIT
136 */
137 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
138 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
139 OS_EXPORT OS_LOCK_TYPE_DECL(nospin);
140 OS_LOCK_DECL(nospin, 2);
141 #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
142
143 /*!
144 * @typedef os_lock_spin_s
145 *
146 * @abstract
147 * Deprecated os_lock variant that on contention starts by spinning trying to
148 * acquire the lock, then depressing the priority of the current thread and
149 * finally blocking the thread waiting for the lock to become available.
150 * Equivalent to OSSpinLock and equally not recommended, see discussion in
151 * libkern/OSAtomic.h headerdoc.
152 *
153 * @discussion
154 * Spinlocks are intended to be held only for very brief periods of time. The
155 * critical section must not make syscalls and should avoid touching areas of
156 * memory that may trigger a page fault, in particular if the critical section
157 * may be executing on threads of widely differing priorities or on a mix of
158 * IO-throttled and unthrottled threads.
159 *
160 * Must be initialized with OS_LOCK_SPIN_INIT
161 */
162 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
163 OS_EXPORT OS_LOCK_TYPE_DECL(spin);
164 OS_LOCK_DECL(spin, 2);
165 #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
166
167 /*!
168 * @typedef os_lock_handoff_s
169 *
170 * @abstract
171 * os_lock variant that on contention hands off the current kernel thread to the
172 * lock-owning userspace thread (if it is not running), temporarily overriding
173 * its priority and IO throttle if necessary.
174 *
175 * @discussion
176 * Intended for use in limited circumstances where the critical section might
177 * be executing on threads of widely differing priorities or on a mix of
178 * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
179 * be likely to encounter a priority inversion.
180 *
181 * IMPORTANT: This lock variant is NOT intended as a general replacement for all
182 * uses of os_lock_spin_s or OSSpinLock.
183 *
184 * Must be initialized with OS_LOCK_HANDOFF_INIT
185 */
186 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
187 OS_EXPORT OS_LOCK_TYPE_DECL(handoff);
188 OS_LOCK_DECL(handoff, 2);
189 #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
190
191
192 __BEGIN_DECLS
193
194 /*!
195 * @function os_lock_lock
196 *
197 * @abstract
198 * Locks an os_lock variant.
199 *
200 * @param lock
201 * Pointer to one of the os_lock variants.
202 */
203 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
204 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
205 void os_lock_lock(os_lock_t lock);
206
207 /*!
208 * @function os_lock_trylock
209 *
210 * @abstract
211 * Locks an os_lock variant if it is not already locked.
212 *
213 * @param lock
214 * Pointer to one of the os_lock variants.
215 *
216 * @result
217 * Returns true if the lock was succesfully locked and false if the lock was
218 * already locked.
219 */
220 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
221 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
222 bool os_lock_trylock(os_lock_t lock);
223
224 /*!
225 * @function os_lock_unlock
226 *
227 * @abstract
228 * Unlocks an os_lock variant.
229 *
230 * @param lock
231 * Pointer to one of the os_lock variants.
232 */
233 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
234 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
235 void os_lock_unlock(os_lock_t lock);
236
237 /*! @group os_unfair_lock SPI
238 *
239 * @abstract
240 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
241 * waits in the kernel to be woken up by an unlock. The opaque lock value
242 * contains thread ownership information that the system may use to attempt to
243 * resolve priority inversions.
244 *
245 * This lock must be unlocked from the same thread that locked it, attempts to
246 * unlock from a different thread will cause an assertion aborting the process.
247 *
248 * This lock must not be accessed from multiple processes or threads via shared
249 * or multiply-mapped memory, the lock implementation relies on the address of
250 * the lock value and owning process.
251 *
252 * @discussion
253 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
254 * unlocker can potentially immediately reacquire the lock before a woken up
255 * waiter gets an opportunity to attempt to acquire the lock. This may be
256 * advantageous for performance reasons, but also makes starvation of waiters a
257 * possibility.
258 *
259 * Must be initialized with OS_UNFAIR_LOCK_INIT
260 */
261
262 /*!
263 * @typedef os_unfair_lock_options_t
264 *
265 * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
266 * This flag informs the runtime that the specified lock is used for data
267 * synchronization and that the lock owner is always able to make progress
268 * toward releasing the lock without the help of another thread in the same
269 * process. This hint will cause the workqueue subsystem to not create new
270 * threads to offset for threads waiting for the lock.
271 *
272 * When this flag is used, the code running under the critical section should
273 * be well known and under your control (Generally it should not call into
274 * framework code).
275 *
276 * @const OS_UNFAIR_LOCK_ADAPTIVE_SPIN
277 * This flag allows for the kernel to use adaptive spinning when the holder
278 * of the lock is currently on core. This should only be used for locks
279 * where the protected critical section is always extremely short.
280 */
281 OS_OPTIONS(os_unfair_lock_options, uint32_t,
282 OS_UNFAIR_LOCK_NONE OS_SWIFT_NAME(None)
283 OS_UNFAIR_LOCK_AVAILABILITY = 0x00000000,
284 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION OS_SWIFT_NAME(DataSynchronization)
285 OS_UNFAIR_LOCK_AVAILABILITY = 0x00010000,
286 OS_UNFAIR_LOCK_ADAPTIVE_SPIN OS_SWIFT_NAME(AdaptiveSpin)
287 __API_AVAILABLE(macos(10.15), ios(13.0),
288 tvos(13.0), watchos(6.0), bridgeos(4.0)) = 0x00040000,
289 );
290
291 #if __swift__
292 #define OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(name) \
293 static const os_unfair_lock_options_t \
294 name##_FOR_SWIFT OS_SWIFT_NAME(name) = name
295 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_NONE);
296 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
297 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_ADAPTIVE_SPIN);
298 #undef OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT
299 #endif
300
301 /*!
302 * @function os_unfair_lock_lock_with_options
303 *
304 * @abstract
305 * Locks an os_unfair_lock.
306 *
307 * @param lock
308 * Pointer to an os_unfair_lock.
309 *
310 * @param options
311 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
312 */
313 OS_UNFAIR_LOCK_AVAILABILITY
314 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
315 void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
316 os_unfair_lock_options_t options);
317
318 /*!
319 * @group os_unfair_lock no-TSD interfaces
320 *
321 * Like the above, but don't require being on a thread with valid TSD, so they
322 * can be called from injected mach-threads.
323 *
324 * The normal routines use the TSD value for mach_thread_self(), but mach
325 * threads do not have TSDs. Instead these functions require the value for
326 * mach_thread_self() to be passed explicitly.
327 *
328 * This should only be used directly by libpthread.
329 */
330 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
331 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
332 void os_unfair_lock_lock_no_tsd(os_unfair_lock_t lock,
333 os_unfair_lock_options_t options, mach_port_t mts);
334
335 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
336 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
337 void os_unfair_lock_unlock_no_tsd(os_unfair_lock_t lock, mach_port_t mts);
338
339
340 /*! @group os_unfair_recursive_lock SPI
341 *
342 * @abstract
343 * Similar to os_unfair_lock, but recursive.
344 *
345 * @discussion
346 * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
347 */
348
349 #define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
350 __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
351 __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
352
353 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
354 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
355 ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
356 #elif defined(__cplusplus) && __cplusplus >= 201103L
357 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
358 (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
359 #elif defined(__cplusplus)
360 #define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
361 (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
362 #else
363 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
364 {OS_UNFAIR_LOCK_INIT, 0}
365 #endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
366
367 /*!
368 * @typedef os_unfair_recursive_lock
369 *
370 * @abstract
371 * Low-level lock that allows waiters to block efficiently on contention.
372 *
373 * @discussion
374 * See os_unfair_lock.
375 *
376 */
377 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
378 typedef struct os_unfair_recursive_lock_s {
379 os_unfair_lock ourl_lock;
380 uint32_t ourl_count;
381 } os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
382
383 /*!
384 * @function os_unfair_recursive_lock_lock_with_options
385 *
386 * @abstract
387 * See os_unfair_lock_lock_with_options
388 */
389 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
390 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
391 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
392 os_unfair_lock_options_t options);
393
394 /*!
395 * @function os_unfair_recursive_lock_lock
396 *
397 * @abstract
398 * See os_unfair_lock_lock
399 */
400 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
401 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
402 void
403 os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock)
404 {
405 os_unfair_recursive_lock_lock_with_options(lock, OS_UNFAIR_LOCK_NONE);
406 }
407
408 /*!
409 * @function os_unfair_recursive_lock_trylock
410 *
411 * @abstract
412 * See os_unfair_lock_trylock
413 */
414 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
415 OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
416 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
417
418 /*!
419 * @function os_unfair_recursive_lock_unlock
420 *
421 * @abstract
422 * See os_unfair_lock_unlock
423 */
424 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
425 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
426 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
427
428 /*!
429 * @function os_unfair_recursive_lock_tryunlock4objc
430 *
431 * @abstract
432 * See os_unfair_lock_unlock
433 */
434 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
435 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
436 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
437
438 /*!
439 * @function os_unfair_recursive_lock_assert_owner
440 *
441 * @abstract
442 * See os_unfair_lock_assert_owner
443 */
444 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
445 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
446 void
447 os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock)
448 {
449 os_unfair_lock_assert_owner(&lock->ourl_lock);
450 }
451
452 /*!
453 * @function os_unfair_recursive_lock_assert_not_owner
454 *
455 * @abstract
456 * See os_unfair_lock_assert_not_owner
457 */
458 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
459 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
460 void
461 os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock)
462 {
463 os_unfair_lock_assert_not_owner(&lock->ourl_lock);
464 }
465
466 /*!
467 * @function os_unfair_recursive_lock_owned
468 *
469 * @abstract
470 * This function is reserved for the use of people who want to soft-fault
471 * when locking models have been violated.
472 *
473 * @discussion
474 * This is meant for SQLite use to detect existing misuse of the API surface,
475 * and is not meant for anything else than calling os_log_fault() when such
476 * contracts are violated.
477 *
478 * There's little point to use this value for logic as the
479 * os_unfair_recursive_lock is already recursive anyway.
480 */
481 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
482 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
483 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
484 bool
485 os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock);
486
487 /*!
488 * @function os_unfair_recursive_lock_unlock_forked_child
489 *
490 * @abstract
491 * Function to be used in an atfork child handler to unlock a recursive unfair
492 * lock.
493 *
494 * @discussion
495 * This function helps with handling recursive locks in the presence of fork.
496 *
497 * It is typical to setup atfork handlers that will:
498 * - take the lock in the pre-fork handler,
499 * - drop the lock in the parent handler,
500 * - reset the lock in the forked child.
501 *
502 * However, because a recursive lock may have been held by the current thread
503 * already, reseting needs to act like an unlock. This function serves for this
504 * purpose. Unlike os_unfair_recursive_lock_unlock(), this function will fixup
505 * the lock ownership to match the new identity of the thread after fork().
506 */
507 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
508 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
509 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
510 void
511 os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock);
512
513 #if __has_attribute(cleanup)
514
515 /*!
516 * @function os_unfair_lock_scoped_guard_unlock
517 *
518 * @abstract
519 * Used by os_unfair_lock_lock_scoped_guard
520 */
521 OS_UNFAIR_LOCK_AVAILABILITY
522 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
523 void
524 os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull * _Nonnull lock)
525 {
526 os_unfair_lock_unlock(*lock);
527 }
528
529 /*!
530 * @function os_unfair_lock_lock_scoped_guard
531 *
532 * @abstract
533 * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
534 * automatically called when the enclosing C scope ends.
535 *
536 * @param name
537 * Name for the variable holding the guard.
538 *
539 * @param lock
540 * Pointer to an os_unfair_lock.
541 *
542 * @see os_unfair_lock_lock
543 * @see os_unfair_lock_unlock
544 */
545 #define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
546 os_unfair_lock_t \
547 __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
548 guard_name = lock; \
549 os_unfair_lock_lock(guard_name)
550
551 #endif // __has_attribute(cleanup)
552
553 __END_DECLS
554
555 OS_ASSUME_NONNULL_END
556
557 /*! @group Inline os_unfair_lock interfaces
558 *
559 * Inline versions of the os_unfair_lock fastpath.
560 *
561 * Intended exclusively for special highly performance-sensitive cases where the
562 * function calls to the os_unfair_lock API entrypoints add measurable overhead.
563 *
564 * Do not use in frameworks to implement synchronization API primitives that are
565 * exposed to developers, that would lead to false positives for that API from
566 * tools such as ThreadSanitizer.
567 *
568 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
569 * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
570 * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
571 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
572 *
573 * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
574 * above and still wish to use these interfaces.
575 */
576
577 #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
578
579 #include <pthread/tsd_private.h>
580
581 #ifdef __cplusplus
582 extern "C++" {
583 #if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
584 #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
585 #endif
586 #include <atomic>
587 typedef std::atomic<os_unfair_lock> _os_atomic_unfair_lock;
588 #define OSLOCK_STD(_a) std::_a
589 __BEGIN_DECLS
590 #else
591 #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
592 #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
593 #endif
594 #include <stdatomic.h>
595 typedef _Atomic(os_unfair_lock) _os_atomic_unfair_lock;
596 #define OSLOCK_STD(_a) _a
597 #endif
598
599 OS_ASSUME_NONNULL_BEGIN
600
601 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
602 #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
603 #elif defined(__cplusplus) && __cplusplus >= 201103L
604 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
605 #elif defined(__cplusplus)
606 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
607 #else
608 #define OS_UNFAIR_LOCK_UNLOCKED {0}
609 #endif
610
611 /*!
612 * @function os_unfair_lock_lock_no_tsd_inline
613 *
614 * @abstract
615 * Locks an os_unfair_lock, without requiring valid TSD.
616 *
617 * This should only be used directly by libpthread.
618 *
619 * @param lock
620 * Pointer to an os_unfair_lock.
621 */
622 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
623 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
624 void
625 os_unfair_lock_lock_no_tsd_inline(os_unfair_lock_t lock,
626 os_unfair_lock_options_t options, mach_port_t mts)
627 {
628 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
629 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
630 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
631 OSLOCK_STD(memory_order_acquire),
632 OSLOCK_STD(memory_order_relaxed))) {
633 return os_unfair_lock_lock_no_tsd(lock, options, mts);
634 }
635 }
636
637 /*!
638 * @function os_unfair_lock_unlock_no_tsd_inline
639 *
640 * @abstract
641 * Unlocks an os_unfair_lock, without requiring valid TSD.
642 *
643 * This should only be used directly by libpthread.
644 *
645 * @param lock
646 * Pointer to an os_unfair_lock.
647 */
648 __API_AVAILABLE(macos(10.16), ios(14.0), tvos(14.0), watchos(7.0))
649 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
650 void
651 os_unfair_lock_unlock_no_tsd_inline(os_unfair_lock_t lock, mach_port_t mts)
652 {
653 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
654 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
655 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
656 OSLOCK_STD(memory_order_release),
657 OSLOCK_STD(memory_order_relaxed))) {
658 return os_unfair_lock_unlock_no_tsd(lock, mts);
659 }
660 }
661
662 /*!
663 * @function os_unfair_lock_lock_inline
664 *
665 * @abstract
666 * Locks an os_unfair_lock.
667 *
668 * @param lock
669 * Pointer to an os_unfair_lock.
670 */
671 OS_UNFAIR_LOCK_AVAILABILITY
672 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
673 void
674 os_unfair_lock_lock_inline(os_unfair_lock_t lock)
675 {
676 if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
677 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
678 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
679 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
680 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
681 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
682 OSLOCK_STD(memory_order_acquire),
683 OSLOCK_STD(memory_order_relaxed))) {
684 return os_unfair_lock_lock(lock);
685 }
686 }
687
688 /*!
689 * @function os_unfair_lock_lock_with_options_inline
690 *
691 * @abstract
692 * Locks an os_unfair_lock.
693 *
694 * @param lock
695 * Pointer to an os_unfair_lock.
696 *
697 * @param options
698 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
699 */
700 OS_UNFAIR_LOCK_AVAILABILITY
701 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
702 void
703 os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock,
704 os_unfair_lock_options_t options)
705 {
706 if (!_pthread_has_direct_tsd()) {
707 return os_unfair_lock_lock_with_options(lock, options);
708 }
709 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
710 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
711 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
712 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
713 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
714 OSLOCK_STD(memory_order_acquire),
715 OSLOCK_STD(memory_order_relaxed))) {
716 return os_unfair_lock_lock_with_options(lock, options);
717 }
718 }
719
720 /*!
721 * @function os_unfair_lock_trylock_inline
722 *
723 * @abstract
724 * Locks an os_unfair_lock if it is not already locked.
725 *
726 * @discussion
727 * It is invalid to surround this function with a retry loop, if this function
728 * returns false, the program must be able to proceed without having acquired
729 * the lock, or it must call os_unfair_lock_lock_inline() instead.
730 *
731 * @param lock
732 * Pointer to an os_unfair_lock.
733 *
734 * @result
735 * Returns true if the lock was succesfully locked and false if the lock was
736 * already locked.
737 */
738 OS_UNFAIR_LOCK_AVAILABILITY
739 OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
740 bool
741 os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
742 {
743 if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
744 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
745 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
746 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
747 return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
748 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
749 OSLOCK_STD(memory_order_acquire), OSLOCK_STD(memory_order_relaxed));
750 }
751
752 /*!
753 * @function os_unfair_lock_unlock_inline
754 *
755 * @abstract
756 * Unlocks an os_unfair_lock.
757 *
758 * @param lock
759 * Pointer to an os_unfair_lock.
760 */
761 OS_UNFAIR_LOCK_AVAILABILITY
762 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
763 void
764 os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
765 {
766 if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
767 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
768 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
769 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
770 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
771 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
772 OSLOCK_STD(memory_order_release),
773 OSLOCK_STD(memory_order_relaxed))) {
774 return os_unfair_lock_unlock(lock);
775 }
776 }
777
778 OS_ASSUME_NONNULL_END
779
780 #undef OSLOCK_STD
781 #ifdef __cplusplus
782 __END_DECLS
783 } // extern "C++"
784 #endif
785
786 #endif // OS_UNFAIR_LOCK_INLINE
787
788 #endif // __OS_LOCK_PRIVATE__