]> git.saurik.com Git - apple/libplatform.git/blob - private/os/lock_private.h
libplatform-177.200.16.tar.gz
[apple/libplatform.git] / private / os / lock_private.h
1 /*
2 * Copyright (c) 2013-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #ifndef __OS_LOCK_PRIVATE__
22 #define __OS_LOCK_PRIVATE__
23
24 #include <Availability.h>
25 #include <TargetConditionals.h>
26 #include <sys/cdefs.h>
27 #include <stddef.h>
28 #include <stdint.h>
29 #include <stdbool.h>
30 #include <os/base_private.h>
31 #include <os/lock.h>
32
33 OS_ASSUME_NONNULL_BEGIN
34
35 /*! @header
36 * Low-level lock SPI
37 */
38
39 #define OS_LOCK_SPI_VERSION 20171006
40
41 /*!
42 * @typedef os_lock_t
43 *
44 * @abstract
45 * Pointer to one of the os_lock variants.
46 */
47
48 #define OS_LOCK_TYPE_STRUCT(type) const struct _os_lock_type_##type##_s
49 #define OS_LOCK_TYPE_REF(type) _os_lock_type_##type
50 #define OS_LOCK_TYPE_DECL(type) OS_LOCK_TYPE_STRUCT(type) OS_LOCK_TYPE_REF(type)
51
52 #define OS_LOCK(type) os_lock_##type##_s
53 #define OS_LOCK_STRUCT(type) struct OS_LOCK(type)
54
55 #if defined(__cplusplus) && __cplusplus >= 201103L
56
57 #define OS_LOCK_DECL(type, size) \
58 typedef OS_LOCK_STRUCT(type) : public OS_LOCK(base) { \
59 private: \
60 OS_LOCK_TYPE_STRUCT(type) * osl_type OS_UNUSED; \
61 uintptr_t _osl_##type##_opaque[size-1] OS_UNUSED; \
62 public: \
63 constexpr OS_LOCK(type)() : \
64 osl_type(&OS_LOCK_TYPE_REF(type)), _osl_##type##_opaque() {} \
65 } OS_LOCK(type)
66 #define OS_LOCK_INIT(type) {}
67
68 typedef OS_LOCK_STRUCT(base) {
69 protected:
70 constexpr OS_LOCK(base)() {}
71 } *os_lock_t;
72
73 #else
74
75 #define OS_LOCK_DECL(type, size) \
76 typedef OS_LOCK_STRUCT(type) { \
77 OS_LOCK_TYPE_STRUCT(type) * osl_type; \
78 uintptr_t _osl_##type##_opaque[size-1]; \
79 } OS_LOCK(type)
80
81 #define OS_LOCK_INIT(type) { .osl_type = &OS_LOCK_TYPE_REF(type), }
82
83 #ifndef OS_LOCK_T_MEMBER
84 #define OS_LOCK_T_MEMBER(type) OS_LOCK_STRUCT(type) *_osl_##type
85 #endif
86
87 typedef OS_TRANSPARENT_UNION union {
88 OS_LOCK_T_MEMBER(base);
89 OS_LOCK_T_MEMBER(unfair);
90 OS_LOCK_T_MEMBER(nospin);
91 OS_LOCK_T_MEMBER(spin);
92 OS_LOCK_T_MEMBER(handoff);
93 OS_LOCK_T_MEMBER(eliding);
94 OS_LOCK_T_MEMBER(transactional);
95 } os_lock_t;
96
97 #endif
98
99 /*!
100 * @typedef os_lock_unfair_s
101 *
102 * @abstract
103 * os_lock variant equivalent to os_unfair_lock. Does not spin on contention but
104 * waits in the kernel to be woken up by an unlock. The lock value contains
105 * ownership information that the system may use to attempt to resolve priority
106 * inversions.
107 *
108 * @discussion
109 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
110 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
111 * can potentially immediately reacquire the lock before a woken up waiter gets
112 * an opportunity to attempt to acquire the lock, so starvation is possibile.
113 *
114 * Must be initialized with OS_LOCK_UNFAIR_INIT
115 */
116 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
117 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
118 OS_EXPORT OS_LOCK_TYPE_DECL(unfair);
119 OS_LOCK_DECL(unfair, 2);
120 #define OS_LOCK_UNFAIR_INIT OS_LOCK_INIT(unfair)
121
122 /*!
123 * @typedef os_lock_nospin_s
124 *
125 * @abstract
126 * os_lock variant that does not spin on contention but waits in the kernel to
127 * be woken up by an unlock. No attempt to resolve priority inversions is made
128 * so os_unfair_lock or os_lock_unfair_s should generally be preferred.
129 *
130 * @discussion
131 * Intended as a replacement for os_lock_spin_s or OSSpinLock. Like with
132 * OSSpinLock there is no attempt at fairness or lock ordering, e.g. an unlocker
133 * can potentially immediately reacquire the lock before a woken up waiter gets
134 * an opportunity to attempt to acquire the lock, so starvation is possibile.
135 *
136 * Must be initialized with OS_LOCK_NOSPIN_INIT
137 */
138 __OSX_AVAILABLE(10.12) __IOS_AVAILABLE(10.0)
139 __TVOS_AVAILABLE(10.0) __WATCHOS_AVAILABLE(3.0)
140 OS_EXPORT OS_LOCK_TYPE_DECL(nospin);
141 OS_LOCK_DECL(nospin, 2);
142 #define OS_LOCK_NOSPIN_INIT OS_LOCK_INIT(nospin)
143
144 /*!
145 * @typedef os_lock_spin_s
146 *
147 * @abstract
148 * Deprecated os_lock variant that on contention starts by spinning trying to
149 * acquire the lock, then depressing the priority of the current thread and
150 * finally blocking the thread waiting for the lock to become available.
151 * Equivalent to OSSpinLock and equally not recommended, see discussion in
152 * libkern/OSAtomic.h headerdoc.
153 *
154 * @discussion
155 * Spinlocks are intended to be held only for very brief periods of time. The
156 * critical section must not make syscalls and should avoid touching areas of
157 * memory that may trigger a page fault, in particular if the critical section
158 * may be executing on threads of widely differing priorities or on a mix of
159 * IO-throttled and unthrottled threads.
160 *
161 * Must be initialized with OS_LOCK_SPIN_INIT
162 */
163 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
164 OS_EXPORT OS_LOCK_TYPE_DECL(spin);
165 OS_LOCK_DECL(spin, 2);
166 #define OS_LOCK_SPIN_INIT OS_LOCK_INIT(spin)
167
168 /*!
169 * @typedef os_lock_handoff_s
170 *
171 * @abstract
172 * os_lock variant that on contention hands off the current kernel thread to the
173 * lock-owning userspace thread (if it is not running), temporarily overriding
174 * its priority and IO throttle if necessary.
175 *
176 * @discussion
177 * Intended for use in limited circumstances where the critical section might
178 * be executing on threads of widely differing priorities or on a mix of
179 * IO-throttled and unthrottled threads where the ordinary os_lock_spin_s would
180 * be likely to encounter a priority inversion.
181 *
182 * IMPORTANT: This lock variant is NOT intended as a general replacement for all
183 * uses of os_lock_spin_s or OSSpinLock.
184 *
185 * Must be initialized with OS_LOCK_HANDOFF_INIT
186 */
187 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
188 OS_EXPORT OS_LOCK_TYPE_DECL(handoff);
189 OS_LOCK_DECL(handoff, 2);
190 #define OS_LOCK_HANDOFF_INIT OS_LOCK_INIT(handoff)
191
192
193 #if !TARGET_OS_IPHONE
194 /*!
195 * @typedef os_lock_eliding_s
196 *
197 * @abstract
198 * os_lock variant that uses hardware lock elision support if available to allow
199 * multiple processors to concurrently execute a critical section as long as
200 * they don't perform conflicting operations on each other's data. In case of
201 * conflict, the lock reverts to exclusive operation and os_lock_spin_s behavior
202 * on contention (at potential extra cost for the aborted attempt at lock-elided
203 * concurrent execution). If hardware HLE support is not present, this lock
204 * variant behaves like os_lock_spin_s.
205 *
206 * @discussion
207 * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
208 * with HLE support to ensure the data access pattern and length of the critical
209 * section allows lock-elided execution to succeed frequently enough to offset
210 * the cost of any aborted concurrent execution.
211 *
212 * Must be initialized with OS_LOCK_ELIDING_INIT
213 */
214 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA)
215 OS_EXPORT OS_LOCK_TYPE_DECL(eliding);
216 OS_LOCK_DECL(eliding, 8) OS_ALIGNED(64);
217 #define OS_LOCK_ELIDING_INIT OS_LOCK_INIT(eliding)
218
219 /*!
220 * @typedef os_lock_transactional_s
221 *
222 * @abstract
223 * os_lock variant that uses hardware restricted transactional memory support if
224 * available to allow multiple processors to concurrently execute the critical
225 * section as a transactional region. If transactional execution aborts, the
226 * lock reverts to exclusive operation and os_lock_spin_s behavior on contention
227 * (at potential extra cost for the aborted attempt at transactional concurrent
228 * execution). If hardware RTM support is not present, this lock variant behaves
229 * like os_lock_eliding_s.
230 *
231 * @discussion
232 * IMPORTANT: Use of this lock variant MUST be extensively tested on hardware
233 * with RTM support to ensure the data access pattern and length of the critical
234 * section allows transactional execution to succeed frequently enough to offset
235 * the cost of any aborted transactions.
236 *
237 * Must be initialized with OS_LOCK_TRANSACTIONAL_INIT
238 */
239 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_NA)
240 OS_EXPORT OS_LOCK_TYPE_DECL(transactional);
241 OS_LOCK_DECL(transactional, 8) OS_ALIGNED(64);
242 #define OS_LOCK_TRANSACTIONAL_INIT OS_LOCK_INIT(transactional)
243 #endif
244
245 __BEGIN_DECLS
246
247 /*!
248 * @function os_lock_lock
249 *
250 * @abstract
251 * Locks an os_lock variant.
252 *
253 * @param lock
254 * Pointer to one of the os_lock variants.
255 */
256 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
257 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
258 void os_lock_lock(os_lock_t lock);
259
260 /*!
261 * @function os_lock_trylock
262 *
263 * @abstract
264 * Locks an os_lock variant if it is not already locked.
265 *
266 * @param lock
267 * Pointer to one of the os_lock variants.
268 *
269 * @result
270 * Returns true if the lock was succesfully locked and false if the lock was
271 * already locked.
272 */
273 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
274 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
275 bool os_lock_trylock(os_lock_t lock);
276
277 /*!
278 * @function os_lock_unlock
279 *
280 * @abstract
281 * Unlocks an os_lock variant.
282 *
283 * @param lock
284 * Pointer to one of the os_lock variants.
285 */
286 __OSX_AVAILABLE_STARTING(__MAC_10_9,__IPHONE_7_0)
287 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
288 void os_lock_unlock(os_lock_t lock);
289
290 /*! @group os_unfair_lock SPI
291 *
292 * @abstract
293 * Replacement for the deprecated OSSpinLock. Does not spin on contention but
294 * waits in the kernel to be woken up by an unlock. The opaque lock value
295 * contains thread ownership information that the system may use to attempt to
296 * resolve priority inversions.
297 *
298 * This lock must be unlocked from the same thread that locked it, attemps to
299 * unlock from a different thread will cause an assertion aborting the process.
300 *
301 * This lock must not be accessed from multiple processes or threads via shared
302 * or multiply-mapped memory, the lock implementation relies on the address of
303 * the lock value and owning process.
304 *
305 * @discussion
306 * As with OSSpinLock there is no attempt at fairness or lock ordering, e.g. an
307 * unlocker can potentially immediately reacquire the lock before a woken up
308 * waiter gets an opportunity to attempt to acquire the lock. This may be
309 * advantageous for performance reasons, but also makes starvation of waiters a
310 * possibility.
311 *
312 * Must be initialized with OS_UNFAIR_LOCK_INIT
313 */
314
315 /*!
316 * @typedef os_unfair_lock_options_t
317 *
318 * @const OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
319 * This flag informs the runtime that the specified lock is used for data
320 * synchronization and that the lock owner is always able to make progress
321 * toward releasing the lock without the help of another thread in the same
322 * process. This hint will cause the workqueue subsystem to not create new
323 * threads to offset for threads waiting for the lock.
324 *
325 * When this flag is used, the code running under the critical section should
326 * be well known and under your control (Generally it should not call into
327 * framework code).
328 */
329 OS_ENUM(os_unfair_lock_options, uint32_t,
330 OS_UNFAIR_LOCK_NONE
331 OS_UNFAIR_LOCK_AVAILABILITY = 0x00000000,
332 OS_UNFAIR_LOCK_DATA_SYNCHRONIZATION
333 OS_UNFAIR_LOCK_AVAILABILITY = 0x00010000,
334 );
335
336 /*!
337 * @function os_unfair_lock_lock_with_options
338 *
339 * @abstract
340 * Locks an os_unfair_lock.
341 *
342 * @param lock
343 * Pointer to an os_unfair_lock.
344 *
345 * @param options
346 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
347 */
348 OS_UNFAIR_LOCK_AVAILABILITY
349 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
350 void os_unfair_lock_lock_with_options(os_unfair_lock_t lock,
351 os_unfair_lock_options_t options);
352
353 /*! @group os_unfair_lock no-TSD interfaces
354 *
355 * Like the above, but don't require being on a thread with valid TSD, so they
356 * can be called from injected mach-threads. The normal routines use the TSD
357 * value for mach_thread_self(), these routines use MACH_PORT_DEAD for the
358 * locked value instead. As a result, they will be unable to resolve priority
359 * inversions.
360 *
361 * This should only be used by libpthread.
362 *
363 */
364 OS_UNFAIR_LOCK_AVAILABILITY
365 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
366 void os_unfair_lock_lock_no_tsd_4libpthread(os_unfair_lock_t lock);
367
368 OS_UNFAIR_LOCK_AVAILABILITY
369 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
370 void os_unfair_lock_unlock_no_tsd_4libpthread(os_unfair_lock_t lock);
371
372 /*! @group os_unfair_recursive_lock SPI
373 *
374 * @abstract
375 * Similar to os_unfair_lock, but recursive.
376 *
377 * @discussion
378 * Must be initialized with OS_UNFAIR_RECURSIVE_LOCK_INIT
379 */
380
381 #define OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY \
382 __OSX_AVAILABLE(10.14) __IOS_AVAILABLE(12.0) \
383 __TVOS_AVAILABLE(12.0) __WATCHOS_AVAILABLE(5.0)
384
385 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
386 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
387 ((os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0})
388 #elif defined(__cplusplus) && __cplusplus >= 201103L
389 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
390 (os_unfair_recursive_lock{OS_UNFAIR_LOCK_INIT, 0})
391 #elif defined(__cplusplus)
392 #define OS_UNFAIR_RECURSIVE_LOCK_INIT (os_unfair_recursive_lock(\
393 (os_unfair_recursive_lock){OS_UNFAIR_LOCK_INIT, 0}))
394 #else
395 #define OS_UNFAIR_RECURSIVE_LOCK_INIT \
396 {OS_UNFAIR_LOCK_INIT, 0}
397 #endif // OS_UNFAIR_RECURSIVE_LOCK_INIT
398
399 /*!
400 * @typedef os_unfair_recursive_lock
401 *
402 * @abstract
403 * Low-level lock that allows waiters to block efficiently on contention.
404 *
405 * @discussion
406 * See os_unfair_lock.
407 *
408 */
409 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
410 typedef struct os_unfair_recursive_lock_s {
411 os_unfair_lock ourl_lock;
412 uint32_t ourl_count;
413 } os_unfair_recursive_lock, *os_unfair_recursive_lock_t;
414
415 /*!
416 * @function os_unfair_recursive_lock_lock_with_options
417 *
418 * @abstract
419 * See os_unfair_lock_lock_with_options
420 */
421 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
422 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
423 void os_unfair_recursive_lock_lock_with_options(os_unfair_recursive_lock_t lock,
424 os_unfair_lock_options_t options);
425
426 /*!
427 * @function os_unfair_recursive_lock_lock
428 *
429 * @abstract
430 * See os_unfair_lock_lock
431 */
432 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
433 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
434 void
435 os_unfair_recursive_lock_lock(os_unfair_recursive_lock_t lock)
436 {
437 os_unfair_recursive_lock_lock_with_options(lock, OS_UNFAIR_LOCK_NONE);
438 }
439
440 /*!
441 * @function os_unfair_recursive_lock_trylock
442 *
443 * @abstract
444 * See os_unfair_lock_trylock
445 */
446 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
447 OS_EXPORT OS_NOTHROW OS_WARN_RESULT OS_NONNULL_ALL
448 bool os_unfair_recursive_lock_trylock(os_unfair_recursive_lock_t lock);
449
450 /*!
451 * @function os_unfair_recursive_lock_unlock
452 *
453 * @abstract
454 * See os_unfair_lock_unlock
455 */
456 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
457 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
458 void os_unfair_recursive_lock_unlock(os_unfair_recursive_lock_t lock);
459
460 /*!
461 * @function os_unfair_recursive_lock_tryunlock4objc
462 *
463 * @abstract
464 * See os_unfair_lock_unlock
465 */
466 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
467 OS_EXPORT OS_NOTHROW OS_NONNULL_ALL
468 bool os_unfair_recursive_lock_tryunlock4objc(os_unfair_recursive_lock_t lock);
469
470 /*!
471 * @function os_unfair_recursive_lock_assert_owner
472 *
473 * @abstract
474 * See os_unfair_lock_assert_owner
475 */
476 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
477 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
478 void
479 os_unfair_recursive_lock_assert_owner(os_unfair_recursive_lock_t lock)
480 {
481 os_unfair_lock_assert_owner(&lock->ourl_lock);
482 }
483
484 /*!
485 * @function os_unfair_recursive_lock_assert_not_owner
486 *
487 * @abstract
488 * See os_unfair_lock_assert_not_owner
489 */
490 OS_UNFAIR_RECURSIVE_LOCK_AVAILABILITY
491 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
492 void
493 os_unfair_recursive_lock_assert_not_owner(os_unfair_recursive_lock_t lock)
494 {
495 os_unfair_lock_assert_not_owner(&lock->ourl_lock);
496 }
497
498 #if __has_attribute(cleanup)
499
500 /*!
501 * @function os_unfair_lock_scoped_guard_unlock
502 *
503 * @abstract
504 * Used by os_unfair_lock_lock_scoped_guard
505 */
506 OS_UNFAIR_LOCK_AVAILABILITY
507 OS_INLINE OS_ALWAYS_INLINE OS_NOTHROW OS_NONNULL_ALL
508 void
509 os_unfair_lock_scoped_guard_unlock(os_unfair_lock_t _Nonnull * _Nonnull lock)
510 {
511 os_unfair_lock_unlock(*lock);
512 }
513
514 /*!
515 * @function os_unfair_lock_lock_scoped_guard
516 *
517 * @abstract
518 * Same as os_unfair_lock_lock() except that os_unfair_lock_unlock() is
519 * automatically called when the enclosing C scope ends.
520 *
521 * @param name
522 * Name for the variable holding the guard.
523 *
524 * @param lock
525 * Pointer to an os_unfair_lock.
526 *
527 * @see os_unfair_lock_lock
528 * @see os_unfair_lock_unlock
529 */
530 #define os_unfair_lock_lock_scoped_guard(guard_name, lock) \
531 os_unfair_lock_t \
532 __attribute__((cleanup(os_unfair_lock_scoped_guard_unlock))) \
533 guard_name = lock; \
534 os_unfair_lock_lock(guard_name)
535
536 #endif // __has_attribute(cleanup)
537
538 __END_DECLS
539
540 OS_ASSUME_NONNULL_END
541
542 /*! @group Inline os_unfair_lock interfaces
543 *
544 * Inline versions of the os_unfair_lock fastpath.
545 *
546 * Intended exclusively for special highly performance-sensitive cases where the
547 * function calls to the os_unfair_lock API entrypoints add measurable overhead.
548 *
549 * Do not use in frameworks to implement synchronization API primitives that are
550 * exposed to developers, that would lead to false positives for that API from
551 * tools such as ThreadSanitizer.
552 *
553 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
554 * DO NOT USE IN CODE THAT IS NOT PART OF THE OPERATING SYSTEM OR THAT IS NOT
555 * REBUILT AS PART OF AN OS WORLDBUILD. YOU HAVE BEEN WARNED!
556 * !!!!!!!!!!!!!!!!!!!!! WARNING WARNING WARNING WARNING !!!!!!!!!!!!!!!!!!!!!
557 *
558 * Define OS_UNFAIR_LOCK_INLINE=1 to indicate that you have read the warning
559 * above and still wish to use these interfaces.
560 */
561
562 #if defined(OS_UNFAIR_LOCK_INLINE) && OS_UNFAIR_LOCK_INLINE
563
564 #include <pthread/tsd_private.h>
565
566 #ifdef __cplusplus
567 extern "C++" {
568 #if !(__has_include(<atomic>) && __has_extension(cxx_atomic))
569 #error Cannot use inline os_unfair_lock without <atomic> and C++11 atomics
570 #endif
571 #include <atomic>
572 typedef std::atomic<os_unfair_lock> _os_atomic_unfair_lock;
573 #define OSLOCK_STD(_a) std::_a
574 __BEGIN_DECLS
575 #else
576 #if !(__has_include(<stdatomic.h>) && __has_extension(c_atomic))
577 #error Cannot use inline os_unfair_lock without <stdatomic.h> and C11 atomics
578 #endif
579 #include <stdatomic.h>
580 typedef _Atomic(os_unfair_lock) _os_atomic_unfair_lock;
581 #define OSLOCK_STD(_a) _a
582 #endif
583
584 OS_ASSUME_NONNULL_BEGIN
585
586 #if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
587 #define OS_UNFAIR_LOCK_UNLOCKED ((os_unfair_lock){0})
588 #elif defined(__cplusplus) && __cplusplus >= 201103L
589 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock{})
590 #elif defined(__cplusplus)
591 #define OS_UNFAIR_LOCK_UNLOCKED (os_unfair_lock())
592 #else
593 #define OS_UNFAIR_LOCK_UNLOCKED {0}
594 #endif
595
596 /*!
597 * @function os_unfair_lock_lock_inline
598 *
599 * @abstract
600 * Locks an os_unfair_lock.
601 *
602 * @param lock
603 * Pointer to an os_unfair_lock.
604 */
605 OS_UNFAIR_LOCK_AVAILABILITY
606 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
607 void
608 os_unfair_lock_lock_inline(os_unfair_lock_t lock)
609 {
610 if (!_pthread_has_direct_tsd()) return os_unfair_lock_lock(lock);
611 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
612 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
613 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
614 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
615 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
616 OSLOCK_STD(memory_order_acquire),
617 OSLOCK_STD(memory_order_relaxed))) {
618 return os_unfair_lock_lock(lock);
619 }
620 }
621
622 /*!
623 * @function os_unfair_lock_lock_with_options_inline
624 *
625 * @abstract
626 * Locks an os_unfair_lock.
627 *
628 * @param lock
629 * Pointer to an os_unfair_lock.
630 *
631 * @param options
632 * Options to alter the behavior of the lock. See os_unfair_lock_options_t.
633 */
634 OS_UNFAIR_LOCK_AVAILABILITY
635 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
636 void
637 os_unfair_lock_lock_with_options_inline(os_unfair_lock_t lock,
638 os_unfair_lock_options_t options)
639 {
640 if (!_pthread_has_direct_tsd()) {
641 return os_unfair_lock_lock_with_options(lock, options);
642 }
643 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
644 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
645 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
646 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
647 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
648 OSLOCK_STD(memory_order_acquire),
649 OSLOCK_STD(memory_order_relaxed))) {
650 return os_unfair_lock_lock_with_options(lock, options);
651 }
652 }
653
654 /*!
655 * @function os_unfair_lock_trylock_inline
656 *
657 * @abstract
658 * Locks an os_unfair_lock if it is not already locked.
659 *
660 * @discussion
661 * It is invalid to surround this function with a retry loop, if this function
662 * returns false, the program must be able to proceed without having acquired
663 * the lock, or it must call os_unfair_lock_lock_inline() instead.
664 *
665 * @param lock
666 * Pointer to an os_unfair_lock.
667 *
668 * @result
669 * Returns true if the lock was succesfully locked and false if the lock was
670 * already locked.
671 */
672 OS_UNFAIR_LOCK_AVAILABILITY
673 OS_INLINE OS_ALWAYS_INLINE OS_WARN_RESULT OS_NONNULL_ALL
674 bool
675 os_unfair_lock_trylock_inline(os_unfair_lock_t lock)
676 {
677 if (!_pthread_has_direct_tsd()) return os_unfair_lock_trylock(lock);
678 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
679 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
680 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
681 return OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
682 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
683 OSLOCK_STD(memory_order_acquire), OSLOCK_STD(memory_order_relaxed));
684 }
685
686 /*!
687 * @function os_unfair_lock_unlock_inline
688 *
689 * @abstract
690 * Unlocks an os_unfair_lock.
691 *
692 * @param lock
693 * Pointer to an os_unfair_lock.
694 */
695 OS_UNFAIR_LOCK_AVAILABILITY
696 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
697 void
698 os_unfair_lock_unlock_inline(os_unfair_lock_t lock)
699 {
700 if (!_pthread_has_direct_tsd()) return os_unfair_lock_unlock(lock);
701 uint32_t mts = (uint32_t)(uintptr_t)_pthread_getspecific_direct(
702 _PTHREAD_TSD_SLOT_MACH_THREAD_SELF);
703 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
704 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
705 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
706 OSLOCK_STD(memory_order_release),
707 OSLOCK_STD(memory_order_relaxed))) {
708 return os_unfair_lock_unlock(lock);
709 }
710 }
711
712 /*!
713 * @function os_unfair_lock_lock_inline_no_tsd_4libpthread
714 *
715 * @abstract
716 * Locks an os_unfair_lock, without requiring valid TSD.
717 *
718 * This should only be used by libpthread.
719 *
720 * @param lock
721 * Pointer to an os_unfair_lock.
722 */
723 OS_UNFAIR_LOCK_AVAILABILITY
724 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
725 void
726 os_unfair_lock_lock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
727 {
728 uint32_t mts = MACH_PORT_DEAD;
729 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
730 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
731 (_os_atomic_unfair_lock*)lock, &unlocked, locked,
732 OSLOCK_STD(memory_order_acquire),
733 OSLOCK_STD(memory_order_relaxed))) {
734 return os_unfair_lock_lock_no_tsd_4libpthread(lock);
735 }
736 }
737
738 /*!
739 * @function os_unfair_lock_unlock_inline_no_tsd_4libpthread
740 *
741 * @abstract
742 * Unlocks an os_unfair_lock, without requiring valid TSD.
743 *
744 * This should only be used by libpthread.
745 *
746 * @param lock
747 * Pointer to an os_unfair_lock.
748 */
749 OS_UNFAIR_LOCK_AVAILABILITY
750 OS_INLINE OS_ALWAYS_INLINE OS_NONNULL_ALL
751 void
752 os_unfair_lock_unlock_inline_no_tsd_4libpthread(os_unfair_lock_t lock)
753 {
754 uint32_t mts = MACH_PORT_DEAD;
755 os_unfair_lock unlocked = OS_UNFAIR_LOCK_UNLOCKED, locked = { mts };
756 if (!OSLOCK_STD(atomic_compare_exchange_strong_explicit)(
757 (_os_atomic_unfair_lock*)lock, &locked, unlocked,
758 OSLOCK_STD(memory_order_release),
759 OSLOCK_STD(memory_order_relaxed))) {
760 return os_unfair_lock_unlock_no_tsd_4libpthread(lock);
761 }
762 }
763
764 OS_ASSUME_NONNULL_END
765
766 #undef OSLOCK_STD
767 #ifdef __cplusplus
768 __END_DECLS
769 } // extern "C++"
770 #endif
771
772 #endif // OS_UNFAIR_LOCK_INLINE
773
774 #endif // __OS_LOCK_PRIVATE__