2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_SHIMS_LOCK__
28 #define __DISPATCH_SHIMS_LOCK__
30 #pragma mark - platform macros
32 DISPATCH_ENUM(dispatch_lock_options
, uint32_t,
33 DLOCK_LOCK_NONE
= 0x00000000,
34 DLOCK_LOCK_DATA_CONTENTION
= 0x00010000,
39 typedef mach_port_t dispatch_lock_owner
;
40 typedef uint32_t dispatch_lock
;
42 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL)
43 #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
44 #define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001)
45 #define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002)
46 #define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port())
48 DISPATCH_ALWAYS_INLINE
50 _dispatch_lock_is_locked(dispatch_lock lock_value
)
52 return (lock_value
& DLOCK_OWNER_MASK
) != 0;
55 DISPATCH_ALWAYS_INLINE
56 static inline dispatch_lock_owner
57 _dispatch_lock_owner(dispatch_lock lock_value
)
59 lock_value
&= DLOCK_OWNER_MASK
;
61 lock_value
|= DLOCK_NOWAITERS_BIT
| DLOCK_NOFAILED_TRYLOCK_BIT
;
66 DISPATCH_ALWAYS_INLINE
68 _dispatch_lock_is_locked_by(dispatch_lock lock_value
, dispatch_lock_owner tid
)
70 // equivalent to _dispatch_lock_owner(lock_value) == tid
71 return ((lock_value
^ tid
) & DLOCK_OWNER_MASK
) == 0;
74 DISPATCH_ALWAYS_INLINE
76 _dispatch_lock_has_waiters(dispatch_lock lock_value
)
78 bool nowaiters_bit
= (lock_value
& DLOCK_NOWAITERS_BIT
);
79 return _dispatch_lock_is_locked(lock_value
) != nowaiters_bit
;
82 DISPATCH_ALWAYS_INLINE
84 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value
)
86 return !(lock_value
& DLOCK_NOFAILED_TRYLOCK_BIT
);
89 #elif defined(__linux__)
90 #include <linux/futex.h>
92 #include <sys/syscall.h> /* For SYS_xxx definitions */
94 typedef uint32_t dispatch_lock
;
95 typedef pid_t dispatch_lock_owner
;
97 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)0)
98 #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK)
99 #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS)
100 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED)
101 #define _dispatch_tid_self() \
102 ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid))
104 DISPATCH_ALWAYS_INLINE
106 _dispatch_lock_is_locked(dispatch_lock lock_value
)
108 return (lock_value
& DLOCK_OWNER_MASK
) != 0;
111 DISPATCH_ALWAYS_INLINE
112 static inline dispatch_lock_owner
113 _dispatch_lock_owner(dispatch_lock lock_value
)
115 return (lock_value
& DLOCK_OWNER_MASK
);
118 DISPATCH_ALWAYS_INLINE
120 _dispatch_lock_is_locked_by(dispatch_lock lock_value
, dispatch_lock_owner tid
)
122 return _dispatch_lock_owner(lock_value
) == tid
;
125 DISPATCH_ALWAYS_INLINE
127 _dispatch_lock_has_waiters(dispatch_lock lock_value
)
129 return (lock_value
& DLOCK_WAITERS_BIT
);
132 DISPATCH_ALWAYS_INLINE
134 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value
)
136 return !(lock_value
& DLOCK_FAILED_TRYLOCK_BIT
);
140 # error define _dispatch_lock encoding scheme for your platform here
143 #if __has_include(<sys/ulock.h>)
144 #include <sys/ulock.h>
147 #ifndef HAVE_UL_COMPARE_AND_WAIT
148 #if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200)
149 # define HAVE_UL_COMPARE_AND_WAIT 1
151 # define HAVE_UL_COMPARE_AND_WAIT 0
153 #endif // HAVE_UL_COMPARE_AND_WAIT
155 #ifndef HAVE_UL_UNFAIR_LOCK
156 #if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200)
157 # define HAVE_UL_UNFAIR_LOCK 1
159 # define HAVE_UL_UNFAIR_LOCK 0
161 #endif // HAVE_UL_UNFAIR_LOCK
163 #ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
164 #define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX)
176 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
177 if (unlikely((x) == KERN_INVALID_NAME)) { \
178 DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
179 } else if (unlikely(x)) { \
180 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
183 #define DISPATCH_GROUP_VERIFY_KR(x) do { \
184 if (unlikely((x) == KERN_INVALID_NAME)) { \
185 DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \
186 } else if (unlikely(x)) { \
187 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
191 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
192 if (unlikely((x) == -1)) { \
193 DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
198 #pragma mark - compare and wait
200 DISPATCH_NOT_TAIL_CALLED
201 void _dispatch_wait_on_address(uint32_t volatile *address
, uint32_t value
,
202 dispatch_lock_options_t flags
);
203 void _dispatch_wake_by_address(uint32_t volatile *address
);
205 #pragma mark - thread event
207 * @typedef dispatch_thread_event_t
210 * Dispatch Thread Events are used for one-time synchronization between threads.
213 * Dispatch Thread Events are cheap synchronization points used when a thread
214 * needs to block until a certain event has happened. Dispatch Thread Event
215 * must be initialized and destroyed with _dispatch_thread_event_init() and
216 * _dispatch_thread_event_destroy().
218 * A Dispatch Thread Event must be waited on and signaled exactly once between
219 * initialization and destruction. These objects are simpler than semaphores
220 * and do not support being signaled and waited on an arbitrary number of times.
222 * This locking primitive has no notion of ownership
224 typedef struct dispatch_thread_event_s
{
225 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
227 semaphore_t dte_semaphore
;
230 #elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
231 // 1 means signalled but not waited on yet
232 // UINT32_MAX means waited on, but not signalled yet
233 // 0 is the initial and final state
238 # error define dispatch_thread_event_s for your platform
240 } dispatch_thread_event_s
, *dispatch_thread_event_t
;
242 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
243 semaphore_t
_dispatch_thread_semaphore_create(void);
244 void _dispatch_thread_semaphore_dispose(void *);
246 DISPATCH_ALWAYS_INLINE
247 static inline semaphore_t
248 _dispatch_get_thread_semaphore(void)
250 semaphore_t sema
= (semaphore_t
)(uintptr_t)
251 _dispatch_thread_getspecific(dispatch_sema4_key
);
252 if (unlikely(!sema
)) {
253 return _dispatch_thread_semaphore_create();
255 _dispatch_thread_setspecific(dispatch_sema4_key
, NULL
);
259 DISPATCH_ALWAYS_INLINE
261 _dispatch_put_thread_semaphore(semaphore_t sema
)
263 semaphore_t old_sema
= (semaphore_t
)(uintptr_t)
264 _dispatch_thread_getspecific(dispatch_sema4_key
);
265 _dispatch_thread_setspecific(dispatch_sema4_key
, (void*)(uintptr_t)sema
);
266 if (unlikely(old_sema
)) {
267 return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema
);
272 DISPATCH_NOT_TAIL_CALLED
273 void _dispatch_thread_event_wait_slow(dispatch_thread_event_t
);
274 void _dispatch_thread_event_signal_slow(dispatch_thread_event_t
);
276 DISPATCH_ALWAYS_INLINE
278 _dispatch_thread_event_init(dispatch_thread_event_t dte
)
280 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
281 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
282 dte
->dte_semaphore
= _dispatch_get_thread_semaphore();
286 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
289 int rc
= sem_init(&dte
->dte_sem
, 0, 0);
290 DISPATCH_SEMAPHORE_VERIFY_RET(rc
);
294 DISPATCH_ALWAYS_INLINE
296 _dispatch_thread_event_signal(dispatch_thread_event_t dte
)
298 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
299 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
300 _dispatch_thread_event_signal_slow(dte
);
304 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
305 if (os_atomic_inc_orig(&dte
->dte_value
, release
) == 0) {
306 // 0 -> 1 transition doesn't need a signal
307 // force a wake even when the value is corrupt,
308 // waiters do the validation
314 _dispatch_thread_event_signal_slow(dte
);
318 DISPATCH_ALWAYS_INLINE
320 _dispatch_thread_event_wait(dispatch_thread_event_t dte
)
322 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
323 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
324 _dispatch_thread_event_wait_slow(dte
);
328 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
329 if (os_atomic_dec(&dte
->dte_value
, acquire
) == 0) {
330 // 1 -> 0 is always a valid transition, so we can return
331 // for any other value, go to the slowpath which checks it's not corrupt
337 _dispatch_thread_event_wait_slow(dte
);
340 DISPATCH_ALWAYS_INLINE
342 _dispatch_thread_event_destroy(dispatch_thread_event_t dte
)
344 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
345 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
346 _dispatch_put_thread_semaphore(dte
->dte_semaphore
);
350 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
352 dispatch_assert(dte
->dte_value
== 0);
354 int rc
= sem_destroy(&dte
->dte_sem
);
355 DISPATCH_SEMAPHORE_VERIFY_RET(rc
);
359 #pragma mark - unfair lock
361 typedef struct dispatch_unfair_lock_s
{
362 dispatch_lock dul_lock
;
363 } dispatch_unfair_lock_s
, *dispatch_unfair_lock_t
;
365 DISPATCH_NOT_TAIL_CALLED
366 void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l
,
367 dispatch_lock_options_t options
);
368 void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l
,
369 dispatch_lock tid_cur
);
371 DISPATCH_ALWAYS_INLINE
373 _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l
)
375 dispatch_lock tid_self
= _dispatch_tid_self();
376 if (likely(os_atomic_cmpxchg(&l
->dul_lock
,
377 DLOCK_OWNER_NULL
, tid_self
, acquire
))) {
380 return _dispatch_unfair_lock_lock_slow(l
, DLOCK_LOCK_NONE
);
383 DISPATCH_ALWAYS_INLINE
385 _dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l
,
386 dispatch_lock_owner
*owner
)
388 dispatch_lock tid_old
, tid_new
, tid_self
= _dispatch_tid_self();
390 os_atomic_rmw_loop(&l
->dul_lock
, tid_old
, tid_new
, acquire
, {
391 if (likely(!_dispatch_lock_is_locked(tid_old
))) {
394 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
395 tid_new
= tid_old
& ~DLOCK_NOFAILED_TRYLOCK_BIT
;
397 tid_new
= tid_old
| DLOCK_FAILED_TRYLOCK_BIT
;
401 if (owner
) *owner
= _dispatch_lock_owner(tid_new
);
402 return !_dispatch_lock_is_locked(tid_old
);
405 DISPATCH_ALWAYS_INLINE
407 _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l
)
409 dispatch_lock tid_old
, tid_new
;
411 os_atomic_rmw_loop(&l
->dul_lock
, tid_old
, tid_new
, release
, {
412 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
413 if (likely(tid_old
& DLOCK_NOFAILED_TRYLOCK_BIT
)) {
414 tid_new
= DLOCK_OWNER_NULL
;
416 tid_new
= tid_old
| DLOCK_NOFAILED_TRYLOCK_BIT
;
419 if (likely(!(tid_old
& DLOCK_FAILED_TRYLOCK_BIT
))) {
420 tid_new
= DLOCK_OWNER_NULL
;
422 tid_new
= tid_old
& ~DLOCK_FAILED_TRYLOCK_BIT
;
426 if (unlikely(tid_new
)) {
427 // unlock failed, renew the lock, which needs an acquire barrier
428 os_atomic_thread_fence(acquire
);
431 if (unlikely(_dispatch_lock_has_waiters(tid_old
))) {
432 _dispatch_unfair_lock_unlock_slow(l
, tid_old
);
437 DISPATCH_ALWAYS_INLINE
439 _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l
)
441 dispatch_lock tid_cur
, tid_self
= _dispatch_tid_self();
443 if (likely(os_atomic_cmpxchgv(&l
->dul_lock
,
444 tid_self
, DLOCK_OWNER_NULL
, &tid_cur
, release
))) {
448 tid_cur
= os_atomic_xchg(&l
->dul_lock
, DLOCK_OWNER_NULL
, release
);
449 if (likely(tid_cur
== tid_self
)) return false;
451 _dispatch_unfair_lock_unlock_slow(l
, tid_cur
);
452 return _dispatch_lock_has_failed_trylock(tid_cur
);
455 DISPATCH_ALWAYS_INLINE
457 _dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l
)
459 (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l
);
462 #pragma mark - gate lock
464 #if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX
465 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1
467 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0
470 #define DLOCK_GATE_UNLOCKED ((dispatch_lock)0)
472 #define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0)
473 #define DLOCK_ONCE_DONE (~(dispatch_once_t)0)
475 typedef struct dispatch_gate_s
{
476 dispatch_lock dgl_lock
;
477 } dispatch_gate_s
, *dispatch_gate_t
;
479 typedef struct dispatch_once_gate_s
{
481 dispatch_gate_s dgo_gate
;
482 dispatch_once_t dgo_once
;
484 } dispatch_once_gate_s
, *dispatch_once_gate_t
;
486 DISPATCH_NOT_TAIL_CALLED
487 void _dispatch_gate_wait_slow(dispatch_gate_t l
, dispatch_lock value
,
489 void _dispatch_gate_broadcast_slow(dispatch_gate_t l
, dispatch_lock tid_cur
);
491 DISPATCH_ALWAYS_INLINE
493 _dispatch_gate_tryenter(dispatch_gate_t l
)
495 dispatch_lock tid_self
= _dispatch_tid_self();
496 return likely(os_atomic_cmpxchg(&l
->dgl_lock
,
497 DLOCK_GATE_UNLOCKED
, tid_self
, acquire
));
500 #define _dispatch_gate_wait(l, flags) \
501 _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags)
503 DISPATCH_ALWAYS_INLINE
505 _dispatch_gate_broadcast(dispatch_gate_t l
)
507 dispatch_lock tid_cur
, tid_self
= _dispatch_tid_self();
508 tid_cur
= os_atomic_xchg(&l
->dgl_lock
, DLOCK_GATE_UNLOCKED
, release
);
509 if (likely(tid_cur
== tid_self
)) return;
510 _dispatch_gate_broadcast_slow(l
, tid_cur
);
513 DISPATCH_ALWAYS_INLINE
515 _dispatch_once_gate_tryenter(dispatch_once_gate_t l
)
517 dispatch_once_t tid_self
= (dispatch_once_t
)_dispatch_tid_self();
518 return likely(os_atomic_cmpxchg(&l
->dgo_once
,
519 DLOCK_ONCE_UNLOCKED
, tid_self
, acquire
));
522 #define _dispatch_once_gate_wait(l) \
523 _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \
526 DISPATCH_ALWAYS_INLINE
528 _dispatch_once_gate_broadcast(dispatch_once_gate_t l
)
530 dispatch_once_t tid_cur
, tid_self
= (dispatch_once_t
)_dispatch_tid_self();
531 // see once.c for explanation about this trick
532 os_atomic_maximally_synchronizing_barrier();
533 // above assumed to contain release barrier
534 tid_cur
= os_atomic_xchg(&l
->dgo_once
, DLOCK_ONCE_DONE
, relaxed
);
535 if (likely(tid_cur
== tid_self
)) return;
536 _dispatch_gate_broadcast_slow(&l
->dgo_gate
, (dispatch_lock
)tid_cur
);
539 #endif // __DISPATCH_SHIMS_LOCK__