]> git.saurik.com Git - apple/libplatform.git/blob - private/os/lock_private.h
92abefef13fdc4a4256871cd5fbf2f308f921bec
[apple/libplatform.git] / private / os / lock_private.h
1 /*
2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #ifndef __OS_LOCK_PRIVATE__
22 #define __OS_LOCK_PRIVATE__
23
24 #include <Availability.h>
25 #include <TargetConditionals.h>
26 #include <sys/cdefs.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <stdbool.h>
30 #include <os/base_private.h>
31 #include <os/lock.h>
32
33 OS_ASSUME_NONNULL_BEGIN
34
35 /*! @header
36 * Low-level lock SPI
37 */
38
39 #define OS_LOCK_SPI_VERSION 20171006
40
41 /*!
42 * @typedef os_lock_t
43 *
44 * @abstract
45 * Pointer to one of the os_lock variants.
46 */
47
48 #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
49 #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
50 #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
51
52 #define OS_LOCK(type) os_lock_##type##_s
53 #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
54
55 #if defined(__cplusplus) && __cplusplus >= 201103L
56
57 #define OS_LOCK_DECL(type, size) \
58 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
59 private: \
60 OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
61 uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
62 public: \
63 constexpr OS_LOCK(type)() : \
64 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
65 } OS_LOCK(type)
66 #define OS_LOCK_INIT(type) {}
67
68 typedef OS_LOCK_STRUCT(base) {
69 protected:
70 constexpr OS_LOCK(base)() {}
71 } *os_lock_t;
72
73 #else
74
75 #define OS_LOCK_DECL(type, size) \
76 typedef OS_LOCK_STRUCT(type) { \
77 OS_LOCK_TYPE_STRUCT(type) * osl_type; \
78 uintptr_t _osl_##type##_opaque[size-1]; \
79 } OS_LOCK(type)
80
81 #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
82
83 #ifndef OS_LOCK_T_MEMBER
84 #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
85 #endif
86
87 typedef OS_TRANSPARENT_UNION union {
88 OS_LOCK_T_MEMBER(base);
89 OS_LOCK_T_MEMBER(unfair);
90 OS_LOCK_T_MEMBER(nospin);
91 OS_LOCK_T_MEMBER(spin);
92 OS_LOCK_T_MEMBER(handoff);
93 } os_lock_t;
94
95 #endif
96
97 /*!
98 * @typedef os_lock_unfair_s
99 *
100 * @abstract
101 * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
102 * waits in the kernel to be woken up by an unlock. The lock value contains
103 * ownership information that the system may use to attempt to resolve priority
104 * inversions.
105 *
106 * @discussion
107 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
108 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
109 * can potentially immediately reacquire the lock before a woken up waiter gets
110 * an opportunity to attempt to acquire the lock, so starvation is possibile.
111 *
112 * Must be initialized with OS_LOCK_UNFAIR_INIT
113 */
114 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
115 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
116 OS_EXPORT OS_LOCK_TYPE_DECL(unfair);
117 OS_LOCK_DECL(unfair, 2);
118 #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
119
120 /*!
121 * @typedef os_lock_nospin_s
122 *
123 * @abstract
124 * os_lock variant that does not spin on contention but waits in the kernel to
125 * be woken up by an unlock. No attempt to resolve priority inversions is made
126 * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
127 *
128 * @discussion
129 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
130 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
131 * can potentially immediately reacquire the lock before a woken up waiter gets
132 * an opportunity to attempt to acquire the lock, so starvation is possibile.
133 *
134 * Must be initialized with OS_LOCK_NOSPIN_INIT
135 */
136 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
137 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
138 OS_EXPORT OS_LOCK_TYPE_DECL(nospin);
139 OS_LOCK_DECL(nospin, 2);
140 #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
141
142 /*!
143 * @typedef os_lock_spin_s
144 *
145 * @abstract
146 * Deprecated os_lock variant that on contention starts by spinning trying to
147 * acquire the lock, then depressing the priority of the current thread and
148 * finally blocking the thread waiting for the lock to become available.
149 * Equivalent to OSSpinLock and equally not recommended, see discussion in
150 * libkern/OSAtomic.h headerdoc.
151 *
152 * @discussion
153 * Spinlocks are intended to be held only for very brief periods of time. The
154 * critical section must not make syscalls and should avoid touching areas of
155 * memory that may trigger a page fault, in particular if the critical section
156 * may be executing on threads of widely differing priorities or on a mix of
157 * IO-throttled and unthrottled threads.
158 *
159 * Must be initialized with OS_LOCK_SPIN_INIT
160 */
161 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
162 OS_EXPORT OS_LOCK_TYPE_DECL(spin);
163 OS_LOCK_DECL(spin, 2);
164 #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
165
166 /*!
167 * @typedef os_lock_handoff_s
168 *
169 * @abstract
170 * os_lock variant that on contention hands off the current kernel thread to the
171 * lock-owning userspace thread (if it is not running), temporarily overriding
172 * its priority and IO throttle if necessary.
173 *
174 * @discussion
175 * Intended for use in limited circumstances where the critical section might
176 * be executing on threads of widely differing priorities or on a mix of
177 * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
178 * be likely to encounter a priority inversion.
179 *
180 * IMPORTANT: This lock variant is NOT intended as a general replacement for all
181 * uses of os_lock_spin_s or OSSpinLock.
182 *
183 * Must be initialized with OS_LOCK_HANDOFF_INIT
184 */
185 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
186 OS_EXPORT OS_LOCK_TYPE_DECL(handoff);
187 OS_LOCK_DECL(handoff, 2);
188 #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
189
190
191 __BEGIN_DECLS
192
193 /*!
194 * @function os_lock_lock
195 *
196 * @abstract
197 * Locks an os_lock variant.
198 *
199 * @param lock
200 * Pointer to one of the os_lock variants.
201 */
202 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
203 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
204 void os_lock_lock(os_lock_t lock);
205
206 /*!
207 * @function os_lock_trylock
208 *
209 * @abstract
210 * Locks an os_lock variant if it is not already locked.
211 *
212 * @param lock
213 * Pointer to one of the os_lock variants.
214 *
215 * @result
216 * Returns true if the lock was succesfully locked and false if the lock was
217 * already locked.
218 */
219 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
220 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
221 bool os_lock_trylock(os_lock_t lock);
222
223 /*!
224 * @function os_lock_unlock
225 *
226 * @abstract
227 * Unlocks an os_lock variant.
228 *
229 * @param lock
230 * Pointer to one of the os_lock variants.
231 */
232 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
233 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
234 void os_lock_unlock(os_lock_t lock);
235
236 /*! @group os_unfair_lock SPI
237 *
238 * @abstract
239 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
240 * waits in the kernel to be woken up by an unlock. The opaque lock value
241 * contains thread ownership information that the system may use to attempt to
242 * resolve priority inversions.
243 *
244 * This lock must be unlocked from the same thread that locked it, attempts to
245 * unlock from a different thread will cause an assertion aborting the process.
246 *
247 * This lock must not be accessed from multiple processes or threads via shared
248 * or multiply-mapped memory, the lock implementation relies on the address of
249 * the lock value and owning process.
250 *
251 * @discussion
252 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
253 * unlocker can potentially immediately reacquire the lock before a woken up
254 * waiter gets an opportunity to attempt to acquire the lock. This may be
255 * advantageous for performance reasons, but also makes starvation of waiters a
256 * possibility.
257 *
258 * Must be initialized with OS_UNFAIR_LOCK_INIT
259 */
260
261 /*!
262 * @typedef os_unfair_lock_options_t
263 *
264 * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
265 * This flag informs the runtime that the specified lock is used for data
266 * synchronization and that the lock owner is always able to make progress
267 * toward releasing the lock without the help of another thread in the same
268 * process. This hint will cause the workqueue subsystem to not create new
269 * threads to offset for threads waiting for the lock.
270 *
271 * When this flag is used, the code running under the critical section should
272 * be well known and under your control (Generally it should not call into
273 * framework code).
274 *
275 * @const OS_UNFAIR_LOCK_ADAPTIVE_SPIN
276 * This flag allows for the kernel to use adaptive spinning when the holder
277 * of the lock is currently on core. This should only be used for locks
278 * where the protected critical section is always extremely short.
279 */
280 OS_OPTIONS(os_unfair_lock_options, uint32_t,
281 OS_UNFAIR_LOCK_NONE OS_SWIFT_NAME(None)
282 OS_UNFAIR_LOCK_AVAILABILITY = 0x00000000,
283 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION OS_SWIFT_NAME(DataSynchronization)
284 OS_UNFAIR_LOCK_AVAILABILITY = 0x00010000,
285 OS_UNFAIR_LOCK_ADAPTIVE_SPIN OS_SWIFT_NAME(AdaptiveSpin)
286 __API_AVAILABLE(macos(10.15), ios(13.0),
287 tvos(13.0), watchos(6.0), bridgeos(4.0)) = 0x00040000,
288 );
289
290 #if __swift__
291 #define OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(name) \
292 static const os_unfair_lock_options_t \
293 name##_FOR_SWIFT OS_SWIFT_NAME(name) = name
294 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_NONE);
295 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION);
296 OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT(OS_UNFAIR_LOCK_ADAPTIVE_SPIN);
297 #undef OS_UNFAIR_LOCK_OPTIONS_COMPAT_FOR_SWIFT
298 #endif
299
300 /*!
301 * @function os_unfair_lock_lock_with_options
302 *
303 * @abstract
304 * Locks an os_unfair_lock.
305 *
306 * @param lock
307 * Pointer to an os_unfair_lock.
308 *
309 * @param options
310 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
311 */
312 OS_UNFAIR_LOCK_AVAILABILITY
313 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
314 void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
315 os_unfair_lock_options_t options);
316
317 /*! @group os_unfair_lock no-TSD interfaces
318 *
319 * Like the above, but don't require being on a thread with valid TSD, so they
320 * can be called from injected mach-threads. The normal routines use the TSD
321 * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
322 * locked value instead. As a result, they will be unable to resolve priority
323 * inversions.
324 *
325 * This should only be used by libpthread.
326 *
327 */
328 OS_UNFAIR_LOCK_AVAILABILITY
329 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
330 void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock);
331
332 OS_UNFAIR_LOCK_AVAILABILITY
333 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
334 void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock);
335
336 /*! @group os_unfair_recursive_lock SPI
337 *
338 * @abstract
339 * Similar to os_unfair_lock, but recursive.
340 *
341 * @discussion
342 * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
343 */
344
345 #define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
346 __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
347 __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
348
349 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
350 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
351 ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
352 #elif defined(__cplusplus) && __cplusplus >= 201103L
353 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
354 (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
355 #elif defined(__cplusplus)
356 #define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
357 (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
358 #else
359 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
360 {OS_UNFAIR_LOCK_INIT, 0}
361 #endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
362
363 /*!
364 * @typedef os_unfair_recursive_lock
365 *
366 * @abstract
367 * Low-level lock that allows waiters to block efficiently on contention.
368 *
369 * @discussion
370 * See os_unfair_lock.
371 *
372 */
373 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
374 typedef struct os_unfair_recursive_lock_s {
375 os_unfair_lock ourl_lock;
376 uint32_t ourl_count;
377 } os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
378
379 /*!
380 * @function os_unfair_recursive_lock_lock_with_options
381 *
382 * @abstract
383 * See os_unfair_lock_lock_with_options
384 */
385 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
386 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
387 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
388 os_unfair_lock_options_t options);
389
390 /*!
391 * @function os_unfair_recursive_lock_lock
392 *
393 * @abstract
394 * See os_unfair_lock_lock
395 */
396 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
397 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
398 void
399 os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock)
400 {
401 os_unfair_recursive_lock_lock_with_options(lock, OS_UNFAIR_LOCK_NONE);
402 }
403
404 /*!
405 * @function os_unfair_recursive_lock_trylock
406 *
407 * @abstract
408 * See os_unfair_lock_trylock
409 */
410 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
411 OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
412 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
413
414 /*!
415 * @function os_unfair_recursive_lock_unlock
416 *
417 * @abstract
418 * See os_unfair_lock_unlock
419 */
420 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
421 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
422 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
423
424 /*!
425 * @function os_unfair_recursive_lock_tryunlock4objc
426 *
427 * @abstract
428 * See os_unfair_lock_unlock
429 */
430 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
431 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
432 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
433
434 /*!
435 * @function os_unfair_recursive_lock_assert_owner
436 *
437 * @abstract
438 * See os_unfair_lock_assert_owner
439 */
440 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
441 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
442 void
443 os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock)
444 {
445 os_unfair_lock_assert_owner(&lock->ourl_lock);
446 }
447
448 /*!
449 * @function os_unfair_recursive_lock_assert_not_owner
450 *
451 * @abstract
452 * See os_unfair_lock_assert_not_owner
453 */
454 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
455 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
456 void
457 os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock)
458 {
459 os_unfair_lock_assert_not_owner(&lock->ourl_lock);
460 }
461
462 /*!
463 * @function os_unfair_recursive_lock_owned
464 *
465 * @abstract
466 * This function is reserved for the use of people who want to soft-fault
467 * when locking models have been violated.
468 *
469 * @discussion
470 * This is meant for SQLite use to detect existing misuse of the API surface,
471 * and is not meant for anything else than calling os_log_fault() when such
472 * contracts are violated.
473 *
474 * There's little point to use this value for logic as the
475 * os_unfair_recursive_lock is already recursive anyway.
476 */
477 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
478 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
479 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
480 bool
481 os_unfair_recursive_lock_owned(os_unfair_recursive_lock_t lock);
482
483 /*!
484 * @function os_unfair_recursive_lock_unlock_forked_child
485 *
486 * @abstract
487 * Function to be used in an atfork child handler to unlock a recursive unfair
488 * lock.
489 *
490 * @discussion
491 * This function helps with handling recursive locks in the presence of fork.
492 *
493 * It is typical to setup atfork handlers that will:
494 * - take the lock in the pre-fork handler,
495 * - drop the lock in the parent handler,
496 * - reset the lock in the forked child.
497 *
498 * However, because a recursive lock may have been held by the current thread
499 * already, reseting needs to act like an unlock. This function serves for this
500 * purpose. Unlike os_unfair_recursive_lock_unlock(), this function will fixup
501 * the lock ownership to match the new identity of the thread after fork().
502 */
503 __OSX_AVAILABLE(10.15) __IOS_AVAILABLE(13.0)
504 __TVOS_AVAILABLE(13.0) __WATCHOS_AVAILABLE(6.0)
505 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
506 void
507 os_unfair_recursive_lock_unlock_forked_child(os_unfair_recursive_lock_t lock);
508
509 #if __has_attribute(cleanup)
510
511 /*!
512 * @function os_unfair_lock_scoped_guard_unlock
513 *
514 * @abstract
515 * Used by os_unfair_lock_lock_scoped_guard
516 */
517 OS_UNFAIR_LOCK_AVAILABILITY
518 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
519 void
520 os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull * _Nonnull lock)
521 {
522 os_unfair_lock_unlock(*lock);
523 }
524
525 /*!
526 * @function os_unfair_lock_lock_scoped_guard
527 *
528 * @abstract
529 * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
530 * automatically called when the enclosing C scope ends.
531 *
532 * @param name
533 * Name for the variable holding the guard.
534 *
535 * @param lock
536 * Pointer to an os_unfair_lock.
537 *
538 * @see os_unfair_lock_lock
539 * @see os_unfair_lock_unlock
540 */
541 #define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
542 os_unfair_lock_t \
543 __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
544 guard_name = lock; \
545 os_unfair_lock_lock(guard_name)
546
547 #endif // __has_attribute(cleanup)
548
549 __END_DECLS
550
551 OS_ASSUME_NONNULL_END
552
553 /*! @group Inline os_unfair_lock interfaces
554 *
555 * Inline versions of the os_unfair_lock fastpath.
556 *
557 * Intended exclusively for special highly performance-sensitive cases where the
558 * function calls to the os_unfair_lock API entrypoints add measurable overhead.
559 *
560 * Do not use in frameworks to implement synchronization API primitives that are
561 * exposed to developers, that would lead to false positives for that API from
562 * tools such as ThreadSanitizer.
563 *
564 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
565 * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
566 * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
567 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
568 *
569 * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
570 * above and still wish to use these interfaces.
571 */
572
573 #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
574
575 #include <pthread/tsd_private.h>
576
577 #ifdef __cplusplus
578 extern "C++" {
579 #if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
580 #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
581 #endif
582 #include <atomic>
583 typedef std::atomic<os_unfair_lock> _os_atomic_unfair_lock;
584 #define OSLOCK_STD(_a) std::_a
585 __BEGIN_DECLS
586 #else
587 #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
588 #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
589 #endif
590 #include <stdatomic.h>
591 typedef _Atomic(os_unfair_lock) _os_atomic_unfair_lock;
592 #define OSLOCK_STD(_a) _a
593 #endif
594
595 OS_ASSUME_NONNULL_BEGIN
596
597 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
598 #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
599 #elif defined(__cplusplus) && __cplusplus >= 201103L
600 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
601 #elif defined(__cplusplus)
602 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
603 #else
604 #define OS_UNFAIR_LOCK_UNLOCKED {0}
605 #endif
606
607 /*!
608 * @function os_unfair_lock_lock_inline
609 *
610 * @abstract
611 * Locks an os_unfair_lock.
612 *
613 * @param lock
614 * Pointer to an os_unfair_lock.
615 */
616 OS_UNFAIR_LOCK_AVAILABILITY
617 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
618 void
619 os_unfair_lock_lock_inline(os_unfair_lock_t lock)
620 {
621 if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
622 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
623 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
624 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
625 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
626 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
627 OSLOCK_STD(memory_order_acquire),
628 OSLOCK_STD(memory_order_relaxed))) {
629 return os_unfair_lock_lock(lock);
630 }
631 }
632
633 /*!
634 * @function os_unfair_lock_lock_with_options_inline
635 *
636 * @abstract
637 * Locks an os_unfair_lock.
638 *
639 * @param lock
640 * Pointer to an os_unfair_lock.
641 *
642 * @param options
643 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
644 */
645 OS_UNFAIR_LOCK_AVAILABILITY
646 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
647 void
648 os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock,
649 os_unfair_lock_options_t options)
650 {
651 if (!_pthread_has_direct_tsd()) {
652 return os_unfair_lock_lock_with_options(lock, options);
653 }
654 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
655 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
656 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
657 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
658 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
659 OSLOCK_STD(memory_order_acquire),
660 OSLOCK_STD(memory_order_relaxed))) {
661 return os_unfair_lock_lock_with_options(lock, options);
662 }
663 }
664
665 /*!
666 * @function os_unfair_lock_trylock_inline
667 *
668 * @abstract
669 * Locks an os_unfair_lock if it is not already locked.
670 *
671 * @discussion
672 * It is invalid to surround this function with a retry loop, if this function
673 * returns false, the program must be able to proceed without having acquired
674 * the lock, or it must call os_unfair_lock_lock_inline() instead.
675 *
676 * @param lock
677 * Pointer to an os_unfair_lock.
678 *
679 * @result
680 * Returns true if the lock was succesfully locked and false if the lock was
681 * already locked.
682 */
683 OS_UNFAIR_LOCK_AVAILABILITY
684 OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
685 bool
686 os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
687 {
688 if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
689 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
690 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
691 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
692 return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
693 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
694 OSLOCK_STD(memory_order_acquire), OSLOCK_STD(memory_order_relaxed));
695 }
696
697 /*!
698 * @function os_unfair_lock_unlock_inline
699 *
700 * @abstract
701 * Unlocks an os_unfair_lock.
702 *
703 * @param lock
704 * Pointer to an os_unfair_lock.
705 */
706 OS_UNFAIR_LOCK_AVAILABILITY
707 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
708 void
709 os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
710 {
711 if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
712 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
713 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
714 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
715 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
716 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
717 OSLOCK_STD(memory_order_release),
718 OSLOCK_STD(memory_order_relaxed))) {
719 return os_unfair_lock_unlock(lock);
720 }
721 }
722
723 /*!
724 * @function os_unfair_lock_lock_inline_no_tsd_4libpthread
725 *
726 * @abstract
727 * Locks an os_unfair_lock, without requiring valid TSD.
728 *
729 * This should only be used by libpthread.
730 *
731 * @param lock
732 * Pointer to an os_unfair_lock.
733 */
734 OS_UNFAIR_LOCK_AVAILABILITY
735 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
736 void
737 os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
738 {
739 uint32_t mts = MACH_PORT_DEAD;
740 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
741 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
742 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
743 OSLOCK_STD(memory_order_acquire),
744 OSLOCK_STD(memory_order_relaxed))) {
745 return os_unfair_lock_lock_no_tsd_4libpthread(lock);
746 }
747 }
748
749 /*!
750 * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread
751 *
752 * @abstract
753 * Unlocks an os_unfair_lock, without requiring valid TSD.
754 *
755 * This should only be used by libpthread.
756 *
757 * @param lock
758 * Pointer to an os_unfair_lock.
759 */
760 OS_UNFAIR_LOCK_AVAILABILITY
761 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
762 void
763 os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
764 {
765 uint32_t mts = MACH_PORT_DEAD;
766 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
767 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
768 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
769 OSLOCK_STD(memory_order_release),
770 OSLOCK_STD(memory_order_relaxed))) {
771 return os_unfair_lock_unlock_no_tsd_4libpthread(lock);
772 }
773 }
774
775 OS_ASSUME_NONNULL_END
776
777 #undef OSLOCK_STD
778 #ifdef __cplusplus
779 __END_DECLS
780 } // extern "C++"
781 #endif
782
783 #endif // OS_UNFAIR_LOCK_INLINE
784
785 #endif // __OS_LOCK_PRIVATE__