2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
27 #ifndef __DISPATCH_SHIMS_LOCK__
28 #define __DISPATCH_SHIMS_LOCK__
30 #pragma mark - platform macros
32 DISPATCH_ENUM(dispatch_lock_options
, uint32_t,
33 DLOCK_LOCK_NONE
= 0x00000000,
34 DLOCK_LOCK_DATA_CONTENTION
= 0x00010000,
39 typedef mach_port_t dispatch_tid
;
40 typedef uint32_t dispatch_lock
;
42 #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
43 #define DLOCK_WAITERS_BIT ((dispatch_lock)0x00000001)
44 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002)
46 #define DLOCK_OWNER_NULL ((dispatch_tid)MACH_PORT_NULL)
47 #define _dispatch_tid_self() ((dispatch_tid)_dispatch_thread_port())
49 DISPATCH_ALWAYS_INLINE
50 static inline dispatch_tid
51 _dispatch_lock_owner(dispatch_lock lock_value
)
53 if (lock_value
& DLOCK_OWNER_MASK
) {
54 return lock_value
| DLOCK_WAITERS_BIT
| DLOCK_FAILED_TRYLOCK_BIT
;
56 return DLOCK_OWNER_NULL
;
59 #elif defined(__linux__)
61 #include <linux/futex.h>
63 #include <sys/syscall.h> /* For SYS_xxx definitions */
65 typedef uint32_t dispatch_tid
;
66 typedef uint32_t dispatch_lock
;
68 #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK)
69 #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS)
70 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED)
72 #define DLOCK_OWNER_NULL ((dispatch_tid)0)
73 #define _dispatch_tid_self() ((dispatch_tid)(_dispatch_get_tsd_base()->tid))
75 DISPATCH_ALWAYS_INLINE
76 static inline dispatch_tid
77 _dispatch_lock_owner(dispatch_lock lock_value
)
79 return lock_value
& DLOCK_OWNER_MASK
;
83 # error define _dispatch_lock encoding scheme for your platform here
86 DISPATCH_ALWAYS_INLINE
87 static inline dispatch_lock
88 _dispatch_lock_value_from_tid(dispatch_tid tid
)
90 return tid
& DLOCK_OWNER_MASK
;
93 DISPATCH_ALWAYS_INLINE
94 static inline dispatch_lock
95 _dispatch_lock_value_for_self(void)
97 return _dispatch_lock_value_from_tid(_dispatch_tid_self());
100 DISPATCH_ALWAYS_INLINE
102 _dispatch_lock_is_locked(dispatch_lock lock_value
)
104 // equivalent to _dispatch_lock_owner(lock_value) == 0
105 return (lock_value
& DLOCK_OWNER_MASK
) != 0;
108 DISPATCH_ALWAYS_INLINE
110 _dispatch_lock_is_locked_by(dispatch_lock lock_value
, dispatch_tid tid
)
112 // equivalent to _dispatch_lock_owner(lock_value) == tid
113 return ((lock_value
^ tid
) & DLOCK_OWNER_MASK
) == 0;
116 DISPATCH_ALWAYS_INLINE
118 _dispatch_lock_is_locked_by_self(dispatch_lock lock_value
)
120 // equivalent to _dispatch_lock_owner(lock_value) == tid
121 return ((lock_value
^ _dispatch_tid_self()) & DLOCK_OWNER_MASK
) == 0;
124 DISPATCH_ALWAYS_INLINE
126 _dispatch_lock_has_waiters(dispatch_lock lock_value
)
128 return (lock_value
& DLOCK_WAITERS_BIT
);
131 DISPATCH_ALWAYS_INLINE
133 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value
)
135 return (lock_value
& DLOCK_FAILED_TRYLOCK_BIT
);
138 #if __has_include(<sys/ulock.h>)
139 #include <sys/ulock.h>
140 #ifdef UL_COMPARE_AND_WAIT
141 #define HAVE_UL_COMPARE_AND_WAIT 1
143 #ifdef UL_UNFAIR_LOCK
144 #define HAVE_UL_UNFAIR_LOCK 1
156 #pragma mark - semaphores
160 typedef semaphore_t _dispatch_sema4_t
;
161 #define _DSEMA4_POLICY_FIFO SYNC_POLICY_FIFO
162 #define _DSEMA4_POLICY_LIFO SYNC_POLICY_LIFO
163 #define _DSEMA4_TIMEOUT() KERN_OPERATION_TIMED_OUT
165 #define _dispatch_sema4_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
166 #define _dispatch_sema4_is_created(sema) (*(sema) != MACH_PORT_NULL)
167 void _dispatch_sema4_create_slow(_dispatch_sema4_t
*sema
, int policy
);
171 typedef sem_t _dispatch_sema4_t
;
172 #define _DSEMA4_POLICY_FIFO 0
173 #define _DSEMA4_POLICY_LIFO 0
174 #define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
176 void _dispatch_sema4_init(_dispatch_sema4_t
*sema
, int policy
);
177 #define _dispatch_sema4_is_created(sema) ((void)sema, 1)
178 #define _dispatch_sema4_create_slow(sema, policy) ((void)sema, (void)policy)
182 typedef HANDLE _dispatch_sema4_t
;
183 #define _DSEMA4_POLICY_FIFO 0
184 #define _DSEMA4_POLICY_LIFO 0
185 #define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
187 #define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0)
188 #define _dispatch_sema4_is_created(sema) (*(sema) != 0)
189 void _dispatch_sema4_create_slow(_dispatch_sema4_t
*sema
, int policy
);
192 #error "port has to implement _dispatch_sema4_t"
195 void _dispatch_sema4_dispose_slow(_dispatch_sema4_t
*sema
, int policy
);
196 void _dispatch_sema4_signal(_dispatch_sema4_t
*sema
, long count
);
197 void _dispatch_sema4_wait(_dispatch_sema4_t
*sema
);
198 bool _dispatch_sema4_timedwait(_dispatch_sema4_t
*sema
, dispatch_time_t timeout
);
200 DISPATCH_ALWAYS_INLINE
202 _dispatch_sema4_create(_dispatch_sema4_t
*sema
, int policy
)
204 if (!_dispatch_sema4_is_created(sema
)) {
205 _dispatch_sema4_create_slow(sema
, policy
);
209 DISPATCH_ALWAYS_INLINE
211 _dispatch_sema4_dispose(_dispatch_sema4_t
*sema
, int policy
)
213 if (_dispatch_sema4_is_created(sema
)) {
214 _dispatch_sema4_dispose_slow(sema
, policy
);
218 #pragma mark - compare and wait
220 DISPATCH_NOT_TAIL_CALLED
221 void _dispatch_wait_on_address(uint32_t volatile *address
, uint32_t value
,
222 dispatch_lock_options_t flags
);
223 void _dispatch_wake_by_address(uint32_t volatile *address
);
225 #pragma mark - thread event
227 * @typedef dispatch_thread_event_t
230 * Dispatch Thread Events are used for one-time synchronization between threads.
233 * Dispatch Thread Events are cheap synchronization points used when a thread
234 * needs to block until a certain event has happened. Dispatch Thread Event
235 * must be initialized and destroyed with _dispatch_thread_event_init() and
236 * _dispatch_thread_event_destroy().
238 * A Dispatch Thread Event must be waited on and signaled exactly once between
239 * initialization and destruction. These objects are simpler than semaphores
240 * and do not support being signaled and waited on an arbitrary number of times.
242 * This locking primitive has no notion of ownership
244 typedef struct dispatch_thread_event_s
{
245 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
246 // 1 means signalled but not waited on yet
247 // UINT32_MAX means waited on, but not signalled yet
248 // 0 is the initial and final state
251 _dispatch_sema4_t dte_sema
;
253 } dispatch_thread_event_s
, *dispatch_thread_event_t
;
255 DISPATCH_NOT_TAIL_CALLED
256 void _dispatch_thread_event_wait_slow(dispatch_thread_event_t
);
257 void _dispatch_thread_event_signal_slow(dispatch_thread_event_t
);
259 DISPATCH_ALWAYS_INLINE
261 _dispatch_thread_event_init(dispatch_thread_event_t dte
)
263 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
266 _dispatch_sema4_init(&dte
->dte_sema
, _DSEMA4_POLICY_FIFO
);
270 DISPATCH_ALWAYS_INLINE
272 _dispatch_thread_event_signal(dispatch_thread_event_t dte
)
274 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
275 if (os_atomic_inc_orig(&dte
->dte_value
, release
) == 0) {
276 // 0 -> 1 transition doesn't need a signal
277 // force a wake even when the value is corrupt,
278 // waiters do the validation
284 _dispatch_thread_event_signal_slow(dte
);
288 DISPATCH_ALWAYS_INLINE
290 _dispatch_thread_event_wait(dispatch_thread_event_t dte
)
292 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
293 if (os_atomic_dec(&dte
->dte_value
, acquire
) == 0) {
294 // 1 -> 0 is always a valid transition, so we can return
295 // for any other value, go to the slowpath which checks it's not corrupt
301 _dispatch_thread_event_wait_slow(dte
);
304 DISPATCH_ALWAYS_INLINE
306 _dispatch_thread_event_destroy(dispatch_thread_event_t dte
)
308 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
310 dispatch_assert(dte
->dte_value
== 0);
312 _dispatch_sema4_dispose(&dte
->dte_sema
, _DSEMA4_POLICY_FIFO
);
316 #pragma mark - unfair lock
318 typedef struct dispatch_unfair_lock_s
{
319 dispatch_lock dul_lock
;
320 } dispatch_unfair_lock_s
, *dispatch_unfair_lock_t
;
322 DISPATCH_NOT_TAIL_CALLED
323 void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l
,
324 dispatch_lock_options_t options
);
325 void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l
,
326 dispatch_lock tid_cur
);
328 DISPATCH_ALWAYS_INLINE
330 _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l
)
332 dispatch_lock value_self
= _dispatch_lock_value_for_self();
333 if (likely(os_atomic_cmpxchg(&l
->dul_lock
,
334 DLOCK_OWNER_NULL
, value_self
, acquire
))) {
337 return _dispatch_unfair_lock_lock_slow(l
, DLOCK_LOCK_NONE
);
340 DISPATCH_ALWAYS_INLINE
342 _dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l
, dispatch_tid
*owner
)
344 dispatch_lock value_self
= _dispatch_lock_value_for_self();
345 dispatch_lock old_value
, new_value
;
347 os_atomic_rmw_loop(&l
->dul_lock
, old_value
, new_value
, acquire
, {
348 if (likely(!_dispatch_lock_is_locked(old_value
))) {
349 new_value
= value_self
;
351 new_value
= old_value
| DLOCK_FAILED_TRYLOCK_BIT
;
354 if (owner
) *owner
= _dispatch_lock_owner(new_value
);
355 return !_dispatch_lock_is_locked(old_value
);
358 DISPATCH_ALWAYS_INLINE
360 _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l
)
362 dispatch_lock old_value
, new_value
;
364 os_atomic_rmw_loop(&l
->dul_lock
, old_value
, new_value
, release
, {
365 if (unlikely(old_value
& DLOCK_FAILED_TRYLOCK_BIT
)) {
366 new_value
= old_value
^ DLOCK_FAILED_TRYLOCK_BIT
;
368 new_value
= DLOCK_OWNER_NULL
;
371 if (unlikely(new_value
)) {
372 // unlock failed, renew the lock, which needs an acquire barrier
373 os_atomic_thread_fence(acquire
);
376 if (unlikely(_dispatch_lock_has_waiters(old_value
))) {
377 _dispatch_unfair_lock_unlock_slow(l
, old_value
);
382 DISPATCH_ALWAYS_INLINE
384 _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l
)
386 dispatch_lock cur
, value_self
= _dispatch_lock_value_for_self();
388 if (likely(os_atomic_cmpxchgv(&l
->dul_lock
,
389 value_self
, DLOCK_OWNER_NULL
, &cur
, release
))) {
393 cur
= os_atomic_xchg(&l
->dul_lock
, DLOCK_OWNER_NULL
, release
);
394 if (likely(cur
== value_self
)) return false;
396 _dispatch_unfair_lock_unlock_slow(l
, cur
);
397 return _dispatch_lock_has_failed_trylock(cur
);
400 DISPATCH_ALWAYS_INLINE
402 _dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l
)
404 (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l
);
407 #pragma mark - gate lock
409 #if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX
410 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1
412 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0
415 #define DLOCK_GATE_UNLOCKED ((dispatch_lock)0)
417 #define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0)
418 #define DLOCK_ONCE_DONE (~(dispatch_once_t)0)
420 typedef struct dispatch_gate_s
{
421 dispatch_lock dgl_lock
;
422 } dispatch_gate_s
, *dispatch_gate_t
;
424 typedef struct dispatch_once_gate_s
{
426 dispatch_gate_s dgo_gate
;
427 dispatch_once_t dgo_once
;
429 } dispatch_once_gate_s
, *dispatch_once_gate_t
;
431 DISPATCH_NOT_TAIL_CALLED
432 void _dispatch_gate_wait_slow(dispatch_gate_t l
, dispatch_lock value
,
434 void _dispatch_gate_broadcast_slow(dispatch_gate_t l
, dispatch_lock tid_cur
);
436 DISPATCH_ALWAYS_INLINE
438 _dispatch_gate_tryenter(dispatch_gate_t l
)
440 return os_atomic_cmpxchg(&l
->dgl_lock
, DLOCK_GATE_UNLOCKED
,
441 _dispatch_lock_value_for_self(), acquire
);
444 #define _dispatch_gate_wait(l, flags) \
445 _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags)
447 DISPATCH_ALWAYS_INLINE
449 _dispatch_gate_broadcast(dispatch_gate_t l
)
451 dispatch_lock cur
, value_self
= _dispatch_lock_value_for_self();
452 cur
= os_atomic_xchg(&l
->dgl_lock
, DLOCK_GATE_UNLOCKED
, release
);
453 if (likely(cur
== value_self
)) return;
454 _dispatch_gate_broadcast_slow(l
, cur
);
457 DISPATCH_ALWAYS_INLINE
459 _dispatch_once_gate_tryenter(dispatch_once_gate_t l
)
461 return os_atomic_cmpxchg(&l
->dgo_once
, DLOCK_ONCE_UNLOCKED
,
462 (dispatch_once_t
)_dispatch_lock_value_for_self(), acquire
);
465 #define _dispatch_once_gate_wait(l) \
466 _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \
469 DISPATCH_ALWAYS_INLINE
470 static inline dispatch_once_t
471 _dispatch_once_xchg_done(dispatch_once_t
*pred
)
473 return os_atomic_xchg(pred
, DLOCK_ONCE_DONE
, release
);
476 DISPATCH_ALWAYS_INLINE
478 _dispatch_once_gate_broadcast(dispatch_once_gate_t l
)
480 dispatch_lock value_self
= _dispatch_lock_value_for_self();
481 dispatch_once_t cur
= _dispatch_once_xchg_done(&l
->dgo_once
);
482 if (likely(cur
== (dispatch_once_t
)value_self
)) return;
483 _dispatch_gate_broadcast_slow(&l
->dgo_gate
, (dispatch_lock
)cur
);
486 #endif // __DISPATCH_SHIMS_LOCK__