2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_SHIMS_LOCK__
28 #define __DISPATCH_SHIMS_LOCK__
30 #pragma mark - platform macros
32 DISPATCH_ENUM(dispatch_lock_options
, uint32_t,
33 DLOCK_LOCK_NONE
= 0x00000000,
34 DLOCK_LOCK_DATA_CONTENTION
= 0x00010000,
39 typedef mach_port_t dispatch_lock_owner
;
40 typedef uint32_t dispatch_lock
;
42 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL)
43 #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
44 #define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001)
45 #define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002)
46 #define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port())
48 DISPATCH_ALWAYS_INLINE
50 _dispatch_lock_is_locked(dispatch_lock lock_value
)
52 return (lock_value
& DLOCK_OWNER_MASK
) != 0;
55 DISPATCH_ALWAYS_INLINE
56 static inline dispatch_lock_owner
57 _dispatch_lock_owner(dispatch_lock lock_value
)
59 lock_value
&= DLOCK_OWNER_MASK
;
61 lock_value
|= DLOCK_NOWAITERS_BIT
| DLOCK_NOFAILED_TRYLOCK_BIT
;
66 DISPATCH_ALWAYS_INLINE
68 _dispatch_lock_is_locked_by(dispatch_lock lock_value
, dispatch_lock_owner tid
)
70 // equivalent to _dispatch_lock_owner(lock_value) == tid
71 return ((lock_value
^ tid
) & DLOCK_OWNER_MASK
) == 0;
74 DISPATCH_ALWAYS_INLINE
76 _dispatch_lock_has_waiters(dispatch_lock lock_value
)
78 bool nowaiters_bit
= (lock_value
& DLOCK_NOWAITERS_BIT
);
79 return _dispatch_lock_is_locked(lock_value
) != nowaiters_bit
;
82 DISPATCH_ALWAYS_INLINE
84 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value
)
86 return !(lock_value
& DLOCK_NOFAILED_TRYLOCK_BIT
);
89 #elif defined(__linux__)
90 #include <linux/futex.h>
92 #include <sys/syscall.h> /* For SYS_xxx definitions */
94 typedef uint32_t dispatch_lock
;
95 typedef pid_t dispatch_lock_owner
;
97 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)0)
98 #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK)
99 #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS)
100 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED)
101 #define _dispatch_tid_self() \
102 ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid))
104 DISPATCH_ALWAYS_INLINE
106 _dispatch_lock_is_locked(dispatch_lock lock_value
)
108 return (lock_value
& DLOCK_OWNER_MASK
) != 0;
111 DISPATCH_ALWAYS_INLINE
112 static inline dispatch_lock_owner
113 _dispatch_lock_owner(dispatch_lock lock_value
)
115 return (lock_value
& DLOCK_OWNER_MASK
);
118 DISPATCH_ALWAYS_INLINE
120 _dispatch_lock_is_locked_by(dispatch_lock lock_value
, dispatch_lock_owner tid
)
122 return _dispatch_lock_owner(lock_value
) == tid
;
125 DISPATCH_ALWAYS_INLINE
127 _dispatch_lock_has_waiters(dispatch_lock lock_value
)
129 return (lock_value
& DLOCK_WAITERS_BIT
);
132 DISPATCH_ALWAYS_INLINE
134 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value
)
136 return !(lock_value
& DLOCK_FAILED_TRYLOCK_BIT
);
140 # error define _dispatch_lock encoding scheme for your platform here
143 #if __has_include(<sys/ulock.h>)
144 #include <sys/ulock.h>
147 #ifndef HAVE_UL_COMPARE_AND_WAIT
148 #if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200)
149 # define HAVE_UL_COMPARE_AND_WAIT 1
151 # define HAVE_UL_COMPARE_AND_WAIT 0
153 #endif // HAVE_UL_COMPARE_AND_WAIT
155 #ifndef HAVE_UL_UNFAIR_LOCK
156 #if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200)
157 # define HAVE_UL_UNFAIR_LOCK 1
159 # define HAVE_UL_UNFAIR_LOCK 0
161 #endif // HAVE_UL_UNFAIR_LOCK
171 #pragma mark - semaphores
173 #ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
175 #define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT)
177 #define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK 0
183 typedef semaphore_t _os_semaphore_t
;
184 #define _OS_SEM_POLICY_FIFO SYNC_POLICY_FIFO
185 #define _OS_SEM_POLICY_LIFO SYNC_POLICY_LIFO
186 #define _OS_SEM_TIMEOUT() KERN_OPERATION_TIMED_OUT
188 #define _os_semaphore_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
189 #define _os_semaphore_is_created(sema) (*(sema) != MACH_PORT_NULL)
190 void _os_semaphore_create_slow(_os_semaphore_t
*sema
, int policy
);
194 typedef sem_t _os_semaphore_t
;
195 #define _OS_SEM_POLICY_FIFO 0
196 #define _OS_SEM_POLICY_LIFO 0
197 #define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
199 void _os_semaphore_init(_os_semaphore_t
*sema
, int policy
);
200 #define _os_semaphore_is_created(sema) 1
201 #define _os_semaphore_create_slow(sema, policy) ((void)0)
205 typedef HANDLE _os_semaphore_t
;
206 #define _OS_SEM_POLICY_FIFO 0
207 #define _OS_SEM_POLICY_LIFO 0
208 #define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
210 #define _os_semaphore_init(sema, policy) (void)(*(sema) = 0)
211 #define _os_semaphore_is_created(sema) (*(sema) != 0)
212 void _os_semaphore_create_slow(_os_semaphore_t
*sema
, int policy
);
215 #error "port has to implement _os_semaphore_t"
218 void _os_semaphore_dispose_slow(_os_semaphore_t
*sema
);
219 void _os_semaphore_signal(_os_semaphore_t
*sema
, long count
);
220 void _os_semaphore_wait(_os_semaphore_t
*sema
);
221 bool _os_semaphore_timedwait(_os_semaphore_t
*sema
, dispatch_time_t timeout
);
223 DISPATCH_ALWAYS_INLINE
225 _os_semaphore_create(_os_semaphore_t
*sema
, int policy
)
227 if (!_os_semaphore_is_created(sema
)) {
228 _os_semaphore_create_slow(sema
, policy
);
232 DISPATCH_ALWAYS_INLINE
234 _os_semaphore_dispose(_os_semaphore_t
*sema
)
236 if (_os_semaphore_is_created(sema
)) {
237 _os_semaphore_dispose_slow(sema
);
241 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
242 semaphore_t
_dispatch_thread_semaphore_create(void);
243 void _dispatch_thread_semaphore_dispose(void *);
245 DISPATCH_ALWAYS_INLINE
246 static inline semaphore_t
247 _dispatch_get_thread_semaphore(void)
249 semaphore_t sema
= (semaphore_t
)(uintptr_t)
250 _dispatch_thread_getspecific(dispatch_sema4_key
);
251 if (unlikely(!sema
)) {
252 return _dispatch_thread_semaphore_create();
254 _dispatch_thread_setspecific(dispatch_sema4_key
, NULL
);
258 DISPATCH_ALWAYS_INLINE
260 _dispatch_put_thread_semaphore(semaphore_t sema
)
262 semaphore_t old_sema
= (semaphore_t
)(uintptr_t)
263 _dispatch_thread_getspecific(dispatch_sema4_key
);
264 _dispatch_thread_setspecific(dispatch_sema4_key
, (void*)(uintptr_t)sema
);
265 if (unlikely(old_sema
)) {
266 return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema
);
272 #pragma mark - compare and wait
274 DISPATCH_NOT_TAIL_CALLED
275 void _dispatch_wait_on_address(uint32_t volatile *address
, uint32_t value
,
276 dispatch_lock_options_t flags
);
277 void _dispatch_wake_by_address(uint32_t volatile *address
);
279 #pragma mark - thread event
281 * @typedef dispatch_thread_event_t
284 * Dispatch Thread Events are used for one-time synchronization between threads.
287 * Dispatch Thread Events are cheap synchronization points used when a thread
288 * needs to block until a certain event has happened. Dispatch Thread Event
289 * must be initialized and destroyed with _dispatch_thread_event_init() and
290 * _dispatch_thread_event_destroy().
292 * A Dispatch Thread Event must be waited on and signaled exactly once between
293 * initialization and destruction. These objects are simpler than semaphores
294 * and do not support being signaled and waited on an arbitrary number of times.
296 * This locking primitive has no notion of ownership
298 typedef struct dispatch_thread_event_s
{
299 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
301 _os_semaphore_t dte_sema
;
304 #elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
305 // 1 means signalled but not waited on yet
306 // UINT32_MAX means waited on, but not signalled yet
307 // 0 is the initial and final state
310 _os_semaphore_t dte_sema
;
312 } dispatch_thread_event_s
, *dispatch_thread_event_t
;
314 DISPATCH_NOT_TAIL_CALLED
315 void _dispatch_thread_event_wait_slow(dispatch_thread_event_t
);
316 void _dispatch_thread_event_signal_slow(dispatch_thread_event_t
);
318 DISPATCH_ALWAYS_INLINE
320 _dispatch_thread_event_init(dispatch_thread_event_t dte
)
322 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
323 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
324 dte
->dte_sema
= _dispatch_get_thread_semaphore();
328 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
331 _os_semaphore_init(&dte
->dte_sema
, _OS_SEM_POLICY_FIFO
);
335 DISPATCH_ALWAYS_INLINE
337 _dispatch_thread_event_signal(dispatch_thread_event_t dte
)
339 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
340 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
341 _dispatch_thread_event_signal_slow(dte
);
345 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
346 if (os_atomic_inc_orig(&dte
->dte_value
, release
) == 0) {
347 // 0 -> 1 transition doesn't need a signal
348 // force a wake even when the value is corrupt,
349 // waiters do the validation
355 _dispatch_thread_event_signal_slow(dte
);
359 DISPATCH_ALWAYS_INLINE
361 _dispatch_thread_event_wait(dispatch_thread_event_t dte
)
363 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
364 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
365 _dispatch_thread_event_wait_slow(dte
);
369 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
370 if (os_atomic_dec(&dte
->dte_value
, acquire
) == 0) {
371 // 1 -> 0 is always a valid transition, so we can return
372 // for any other value, go to the slowpath which checks it's not corrupt
378 _dispatch_thread_event_wait_slow(dte
);
381 DISPATCH_ALWAYS_INLINE
383 _dispatch_thread_event_destroy(dispatch_thread_event_t dte
)
385 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
386 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
387 _dispatch_put_thread_semaphore(dte
->dte_sema
);
391 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
393 dispatch_assert(dte
->dte_value
== 0);
395 _os_semaphore_dispose(&dte
->dte_sema
);
399 #pragma mark - unfair lock
401 typedef struct dispatch_unfair_lock_s
{
402 dispatch_lock dul_lock
;
403 } dispatch_unfair_lock_s
, *dispatch_unfair_lock_t
;
405 DISPATCH_NOT_TAIL_CALLED
406 void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l
,
407 dispatch_lock_options_t options
);
408 void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l
,
409 dispatch_lock tid_cur
);
411 DISPATCH_ALWAYS_INLINE
413 _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l
)
415 dispatch_lock tid_self
= _dispatch_tid_self();
416 if (likely(os_atomic_cmpxchg(&l
->dul_lock
,
417 DLOCK_OWNER_NULL
, tid_self
, acquire
))) {
420 return _dispatch_unfair_lock_lock_slow(l
, DLOCK_LOCK_NONE
);
423 DISPATCH_ALWAYS_INLINE
425 _dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l
,
426 dispatch_lock_owner
*owner
)
428 dispatch_lock tid_old
, tid_new
, tid_self
= _dispatch_tid_self();
430 os_atomic_rmw_loop(&l
->dul_lock
, tid_old
, tid_new
, acquire
, {
431 if (likely(!_dispatch_lock_is_locked(tid_old
))) {
434 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
435 tid_new
= tid_old
& ~DLOCK_NOFAILED_TRYLOCK_BIT
;
437 tid_new
= tid_old
| DLOCK_FAILED_TRYLOCK_BIT
;
441 if (owner
) *owner
= _dispatch_lock_owner(tid_new
);
442 return !_dispatch_lock_is_locked(tid_old
);
445 DISPATCH_ALWAYS_INLINE
447 _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l
)
449 dispatch_lock tid_old
, tid_new
;
451 os_atomic_rmw_loop(&l
->dul_lock
, tid_old
, tid_new
, release
, {
452 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
453 if (likely(tid_old
& DLOCK_NOFAILED_TRYLOCK_BIT
)) {
454 tid_new
= DLOCK_OWNER_NULL
;
456 tid_new
= tid_old
| DLOCK_NOFAILED_TRYLOCK_BIT
;
459 if (likely(!(tid_old
& DLOCK_FAILED_TRYLOCK_BIT
))) {
460 tid_new
= DLOCK_OWNER_NULL
;
462 tid_new
= tid_old
& ~DLOCK_FAILED_TRYLOCK_BIT
;
466 if (unlikely(tid_new
)) {
467 // unlock failed, renew the lock, which needs an acquire barrier
468 os_atomic_thread_fence(acquire
);
471 if (unlikely(_dispatch_lock_has_waiters(tid_old
))) {
472 _dispatch_unfair_lock_unlock_slow(l
, tid_old
);
477 DISPATCH_ALWAYS_INLINE
479 _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l
)
481 dispatch_lock tid_cur
, tid_self
= _dispatch_tid_self();
483 if (likely(os_atomic_cmpxchgv(&l
->dul_lock
,
484 tid_self
, DLOCK_OWNER_NULL
, &tid_cur
, release
))) {
488 tid_cur
= os_atomic_xchg(&l
->dul_lock
, DLOCK_OWNER_NULL
, release
);
489 if (likely(tid_cur
== tid_self
)) return false;
491 _dispatch_unfair_lock_unlock_slow(l
, tid_cur
);
492 return _dispatch_lock_has_failed_trylock(tid_cur
);
495 DISPATCH_ALWAYS_INLINE
497 _dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l
)
499 (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l
);
502 #pragma mark - gate lock
504 #if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX
505 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1
507 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0
510 #define DLOCK_GATE_UNLOCKED ((dispatch_lock)0)
512 #define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0)
513 #define DLOCK_ONCE_DONE (~(dispatch_once_t)0)
515 typedef struct dispatch_gate_s
{
516 dispatch_lock dgl_lock
;
517 } dispatch_gate_s
, *dispatch_gate_t
;
519 typedef struct dispatch_once_gate_s
{
521 dispatch_gate_s dgo_gate
;
522 dispatch_once_t dgo_once
;
524 } dispatch_once_gate_s
, *dispatch_once_gate_t
;
526 DISPATCH_NOT_TAIL_CALLED
527 void _dispatch_gate_wait_slow(dispatch_gate_t l
, dispatch_lock value
,
529 void _dispatch_gate_broadcast_slow(dispatch_gate_t l
, dispatch_lock tid_cur
);
531 DISPATCH_ALWAYS_INLINE
533 _dispatch_gate_tryenter(dispatch_gate_t l
)
535 dispatch_lock tid_self
= _dispatch_tid_self();
536 return likely(os_atomic_cmpxchg(&l
->dgl_lock
,
537 DLOCK_GATE_UNLOCKED
, tid_self
, acquire
));
540 #define _dispatch_gate_wait(l, flags) \
541 _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags)
543 DISPATCH_ALWAYS_INLINE
545 _dispatch_gate_broadcast(dispatch_gate_t l
)
547 dispatch_lock tid_cur
, tid_self
= _dispatch_tid_self();
548 tid_cur
= os_atomic_xchg(&l
->dgl_lock
, DLOCK_GATE_UNLOCKED
, release
);
549 if (likely(tid_cur
== tid_self
)) return;
550 _dispatch_gate_broadcast_slow(l
, tid_cur
);
553 DISPATCH_ALWAYS_INLINE
555 _dispatch_once_gate_tryenter(dispatch_once_gate_t l
)
557 dispatch_once_t tid_self
= (dispatch_once_t
)_dispatch_tid_self();
558 return likely(os_atomic_cmpxchg(&l
->dgo_once
,
559 DLOCK_ONCE_UNLOCKED
, tid_self
, acquire
));
562 #define _dispatch_once_gate_wait(l) \
563 _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \
566 DISPATCH_ALWAYS_INLINE
568 _dispatch_once_gate_broadcast(dispatch_once_gate_t l
)
570 dispatch_once_t tid_cur
, tid_self
= (dispatch_once_t
)_dispatch_tid_self();
571 // see once.c for explanation about this trick
572 os_atomic_maximally_synchronizing_barrier();
573 // above assumed to contain release barrier
574 tid_cur
= os_atomic_xchg(&l
->dgo_once
, DLOCK_ONCE_DONE
, relaxed
);
575 if (likely(tid_cur
== tid_self
)) return;
576 _dispatch_gate_broadcast_slow(&l
->dgo_gate
, (dispatch_lock
)tid_cur
);
579 #endif // __DISPATCH_SHIMS_LOCK__