]> git.saurik.com Git - apple/libdispatch.git/blob - src/shims/lock.h
libdispatch-703.50.37.tar.gz
[apple/libdispatch.git] / src / shims / lock.h
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_SHIMS_LOCK__
28 #define __DISPATCH_SHIMS_LOCK__
29
30 #pragma mark - platform macros
31
32 DISPATCH_ENUM(dispatch_lock_options, uint32_t,
33 DLOCK_LOCK_NONE = 0x00000000,
34 DLOCK_LOCK_DATA_CONTENTION = 0x00010000,
35 );
36
37 #if TARGET_OS_MAC
38
39 typedef mach_port_t dispatch_lock_owner;
40 typedef uint32_t dispatch_lock;
41
42 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL)
43 #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
44 #define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001)
45 #define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002)
46 #define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port())
47
48 DISPATCH_ALWAYS_INLINE
49 static inline bool
50 _dispatch_lock_is_locked(dispatch_lock lock_value)
51 {
52 return (lock_value & DLOCK_OWNER_MASK) != 0;
53 }
54
55 DISPATCH_ALWAYS_INLINE
56 static inline dispatch_lock_owner
57 _dispatch_lock_owner(dispatch_lock lock_value)
58 {
59 lock_value &= DLOCK_OWNER_MASK;
60 if (lock_value) {
61 lock_value |= DLOCK_NOWAITERS_BIT | DLOCK_NOFAILED_TRYLOCK_BIT;
62 }
63 return lock_value;
64 }
65
66 DISPATCH_ALWAYS_INLINE
67 static inline bool
68 _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid)
69 {
70 // equivalent to _dispatch_lock_owner(lock_value) == tid
71 return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
72 }
73
74 DISPATCH_ALWAYS_INLINE
75 static inline bool
76 _dispatch_lock_has_waiters(dispatch_lock lock_value)
77 {
78 bool nowaiters_bit = (lock_value & DLOCK_NOWAITERS_BIT);
79 return _dispatch_lock_is_locked(lock_value) != nowaiters_bit;
80 }
81
82 DISPATCH_ALWAYS_INLINE
83 static inline bool
84 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
85 {
86 return !(lock_value & DLOCK_NOFAILED_TRYLOCK_BIT);
87 }
88
89 #elif defined(__linux__)
90 #include <linux/futex.h>
91 #include <unistd.h>
92 #include <sys/syscall.h> /* For SYS_xxx definitions */
93
94 typedef uint32_t dispatch_lock;
95 typedef pid_t dispatch_lock_owner;
96
97 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)0)
98 #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK)
99 #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS)
100 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED)
101 #define _dispatch_tid_self() \
102 ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid))
103
104 DISPATCH_ALWAYS_INLINE
105 static inline bool
106 _dispatch_lock_is_locked(dispatch_lock lock_value)
107 {
108 return (lock_value & DLOCK_OWNER_MASK) != 0;
109 }
110
111 DISPATCH_ALWAYS_INLINE
112 static inline dispatch_lock_owner
113 _dispatch_lock_owner(dispatch_lock lock_value)
114 {
115 return (lock_value & DLOCK_OWNER_MASK);
116 }
117
118 DISPATCH_ALWAYS_INLINE
119 static inline bool
120 _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid)
121 {
122 return _dispatch_lock_owner(lock_value) == tid;
123 }
124
125 DISPATCH_ALWAYS_INLINE
126 static inline bool
127 _dispatch_lock_has_waiters(dispatch_lock lock_value)
128 {
129 return (lock_value & DLOCK_WAITERS_BIT);
130 }
131
132 DISPATCH_ALWAYS_INLINE
133 static inline bool
134 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
135 {
136 return !(lock_value & DLOCK_FAILED_TRYLOCK_BIT);
137 }
138
139 #else
140 # error define _dispatch_lock encoding scheme for your platform here
141 #endif
142
143 #if __has_include(<sys/ulock.h>)
144 #include <sys/ulock.h>
145 #endif
146
147 #ifndef HAVE_UL_COMPARE_AND_WAIT
148 #if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200)
149 # define HAVE_UL_COMPARE_AND_WAIT 1
150 #else
151 # define HAVE_UL_COMPARE_AND_WAIT 0
152 #endif
153 #endif // HAVE_UL_COMPARE_AND_WAIT
154
155 #ifndef HAVE_UL_UNFAIR_LOCK
156 #if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200)
157 # define HAVE_UL_UNFAIR_LOCK 1
158 #else
159 # define HAVE_UL_UNFAIR_LOCK 0
160 #endif
161 #endif // HAVE_UL_UNFAIR_LOCK
162
163 #ifndef HAVE_FUTEX
164 #ifdef __linux__
165 #define HAVE_FUTEX 1
166 #else
167 #define HAVE_FUTEX 0
168 #endif
169 #endif // HAVE_FUTEX
170
171 #pragma mark - semaphores
172
173 #ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
174 #if TARGET_OS_MAC
175 #define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT)
176 #else
177 #define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK 0
178 #endif
179 #endif
180
181 #if USE_MACH_SEM
182
183 typedef semaphore_t _os_semaphore_t;
184 #define _OS_SEM_POLICY_FIFO SYNC_POLICY_FIFO
185 #define _OS_SEM_POLICY_LIFO SYNC_POLICY_LIFO
186 #define _OS_SEM_TIMEOUT() KERN_OPERATION_TIMED_OUT
187
188 #define _os_semaphore_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
189 #define _os_semaphore_is_created(sema) (*(sema) != MACH_PORT_NULL)
190 void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy);
191
192 #elif USE_POSIX_SEM
193
194 typedef sem_t _os_semaphore_t;
195 #define _OS_SEM_POLICY_FIFO 0
196 #define _OS_SEM_POLICY_LIFO 0
197 #define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
198
199 void _os_semaphore_init(_os_semaphore_t *sema, int policy);
200 #define _os_semaphore_is_created(sema) 1
201 #define _os_semaphore_create_slow(sema, policy) ((void)0)
202
203 #elif USE_WIN32_SEM
204
205 typedef HANDLE _os_semaphore_t;
206 #define _OS_SEM_POLICY_FIFO 0
207 #define _OS_SEM_POLICY_LIFO 0
208 #define _OS_SEM_TIMEOUT() ((errno) = ETIMEDOUT, -1)
209
210 #define _os_semaphore_init(sema, policy) (void)(*(sema) = 0)
211 #define _os_semaphore_is_created(sema) (*(sema) != 0)
212 void _os_semaphore_create_slow(_os_semaphore_t *sema, int policy);
213
214 #else
215 #error "port has to implement _os_semaphore_t"
216 #endif
217
218 void _os_semaphore_dispose_slow(_os_semaphore_t *sema);
219 void _os_semaphore_signal(_os_semaphore_t *sema, long count);
220 void _os_semaphore_wait(_os_semaphore_t *sema);
221 bool _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout);
222
223 DISPATCH_ALWAYS_INLINE
224 static inline void
225 _os_semaphore_create(_os_semaphore_t *sema, int policy)
226 {
227 if (!_os_semaphore_is_created(sema)) {
228 _os_semaphore_create_slow(sema, policy);
229 }
230 }
231
232 DISPATCH_ALWAYS_INLINE
233 static inline void
234 _os_semaphore_dispose(_os_semaphore_t *sema)
235 {
236 if (_os_semaphore_is_created(sema)) {
237 _os_semaphore_dispose_slow(sema);
238 }
239 }
240
241 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
242 semaphore_t _dispatch_thread_semaphore_create(void);
243 void _dispatch_thread_semaphore_dispose(void *);
244
245 DISPATCH_ALWAYS_INLINE
246 static inline semaphore_t
247 _dispatch_get_thread_semaphore(void)
248 {
249 semaphore_t sema = (semaphore_t)(uintptr_t)
250 _dispatch_thread_getspecific(dispatch_sema4_key);
251 if (unlikely(!sema)) {
252 return _dispatch_thread_semaphore_create();
253 }
254 _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
255 return sema;
256 }
257
258 DISPATCH_ALWAYS_INLINE
259 static inline void
260 _dispatch_put_thread_semaphore(semaphore_t sema)
261 {
262 semaphore_t old_sema = (semaphore_t)(uintptr_t)
263 _dispatch_thread_getspecific(dispatch_sema4_key);
264 _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema);
265 if (unlikely(old_sema)) {
266 return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema);
267 }
268 }
269 #endif
270
271
272 #pragma mark - compare and wait
273
274 DISPATCH_NOT_TAIL_CALLED
275 void _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value,
276 dispatch_lock_options_t flags);
277 void _dispatch_wake_by_address(uint32_t volatile *address);
278
279 #pragma mark - thread event
280 /**
281 * @typedef dispatch_thread_event_t
282 *
283 * @abstract
284 * Dispatch Thread Events are used for one-time synchronization between threads.
285 *
286 * @discussion
287 * Dispatch Thread Events are cheap synchronization points used when a thread
288 * needs to block until a certain event has happened. Dispatch Thread Event
289 * must be initialized and destroyed with _dispatch_thread_event_init() and
290 * _dispatch_thread_event_destroy().
291 *
292 * A Dispatch Thread Event must be waited on and signaled exactly once between
293 * initialization and destruction. These objects are simpler than semaphores
294 * and do not support being signaled and waited on an arbitrary number of times.
295 *
296 * This locking primitive has no notion of ownership
297 */
298 typedef struct dispatch_thread_event_s {
299 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
300 union {
301 _os_semaphore_t dte_sema;
302 uint32_t dte_value;
303 };
304 #elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
305 // 1 means signalled but not waited on yet
306 // UINT32_MAX means waited on, but not signalled yet
307 // 0 is the initial and final state
308 uint32_t dte_value;
309 #else
310 _os_semaphore_t dte_sema;
311 #endif
312 } dispatch_thread_event_s, *dispatch_thread_event_t;
313
314 DISPATCH_NOT_TAIL_CALLED
315 void _dispatch_thread_event_wait_slow(dispatch_thread_event_t);
316 void _dispatch_thread_event_signal_slow(dispatch_thread_event_t);
317
318 DISPATCH_ALWAYS_INLINE
319 static inline void
320 _dispatch_thread_event_init(dispatch_thread_event_t dte)
321 {
322 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
323 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
324 dte->dte_sema = _dispatch_get_thread_semaphore();
325 return;
326 }
327 #endif
328 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
329 dte->dte_value = 0;
330 #else
331 _os_semaphore_init(&dte->dte_sema, _OS_SEM_POLICY_FIFO);
332 #endif
333 }
334
335 DISPATCH_ALWAYS_INLINE
336 static inline void
337 _dispatch_thread_event_signal(dispatch_thread_event_t dte)
338 {
339 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
340 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
341 _dispatch_thread_event_signal_slow(dte);
342 return;
343 }
344 #endif
345 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
346 if (os_atomic_inc_orig(&dte->dte_value, release) == 0) {
347 // 0 -> 1 transition doesn't need a signal
348 // force a wake even when the value is corrupt,
349 // waiters do the validation
350 return;
351 }
352 #else
353 // fallthrough
354 #endif
355 _dispatch_thread_event_signal_slow(dte);
356 }
357
358
359 DISPATCH_ALWAYS_INLINE
360 static inline void
361 _dispatch_thread_event_wait(dispatch_thread_event_t dte)
362 {
363 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
364 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
365 _dispatch_thread_event_wait_slow(dte);
366 return;
367 }
368 #endif
369 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
370 if (os_atomic_dec(&dte->dte_value, acquire) == 0) {
371 // 1 -> 0 is always a valid transition, so we can return
372 // for any other value, go to the slowpath which checks it's not corrupt
373 return;
374 }
375 #else
376 // fallthrough
377 #endif
378 _dispatch_thread_event_wait_slow(dte);
379 }
380
381 DISPATCH_ALWAYS_INLINE
382 static inline void
383 _dispatch_thread_event_destroy(dispatch_thread_event_t dte)
384 {
385 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
386 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
387 _dispatch_put_thread_semaphore(dte->dte_sema);
388 return;
389 }
390 #endif
391 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
392 // nothing to do
393 dispatch_assert(dte->dte_value == 0);
394 #else
395 _os_semaphore_dispose(&dte->dte_sema);
396 #endif
397 }
398
399 #pragma mark - unfair lock
400
401 typedef struct dispatch_unfair_lock_s {
402 dispatch_lock dul_lock;
403 } dispatch_unfair_lock_s, *dispatch_unfair_lock_t;
404
405 DISPATCH_NOT_TAIL_CALLED
406 void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l,
407 dispatch_lock_options_t options);
408 void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l,
409 dispatch_lock tid_cur);
410
411 DISPATCH_ALWAYS_INLINE
412 static inline void
413 _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l)
414 {
415 dispatch_lock tid_self = _dispatch_tid_self();
416 if (likely(os_atomic_cmpxchg(&l->dul_lock,
417 DLOCK_OWNER_NULL, tid_self, acquire))) {
418 return;
419 }
420 return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE);
421 }
422
423 DISPATCH_ALWAYS_INLINE
424 static inline bool
425 _dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l,
426 dispatch_lock_owner *owner)
427 {
428 dispatch_lock tid_old, tid_new, tid_self = _dispatch_tid_self();
429
430 os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, acquire, {
431 if (likely(!_dispatch_lock_is_locked(tid_old))) {
432 tid_new = tid_self;
433 } else {
434 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
435 tid_new = tid_old & ~DLOCK_NOFAILED_TRYLOCK_BIT;
436 #else
437 tid_new = tid_old | DLOCK_FAILED_TRYLOCK_BIT;
438 #endif
439 }
440 });
441 if (owner) *owner = _dispatch_lock_owner(tid_new);
442 return !_dispatch_lock_is_locked(tid_old);
443 }
444
445 DISPATCH_ALWAYS_INLINE
446 static inline bool
447 _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l)
448 {
449 dispatch_lock tid_old, tid_new;
450
451 os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, release, {
452 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
453 if (likely(tid_old & DLOCK_NOFAILED_TRYLOCK_BIT)) {
454 tid_new = DLOCK_OWNER_NULL;
455 } else {
456 tid_new = tid_old | DLOCK_NOFAILED_TRYLOCK_BIT;
457 }
458 #else
459 if (likely(!(tid_old & DLOCK_FAILED_TRYLOCK_BIT))) {
460 tid_new = DLOCK_OWNER_NULL;
461 } else {
462 tid_new = tid_old & ~DLOCK_FAILED_TRYLOCK_BIT;
463 }
464 #endif
465 });
466 if (unlikely(tid_new)) {
467 // unlock failed, renew the lock, which needs an acquire barrier
468 os_atomic_thread_fence(acquire);
469 return false;
470 }
471 if (unlikely(_dispatch_lock_has_waiters(tid_old))) {
472 _dispatch_unfair_lock_unlock_slow(l, tid_old);
473 }
474 return true;
475 }
476
477 DISPATCH_ALWAYS_INLINE
478 static inline bool
479 _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l)
480 {
481 dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
482 #if HAVE_FUTEX
483 if (likely(os_atomic_cmpxchgv(&l->dul_lock,
484 tid_self, DLOCK_OWNER_NULL, &tid_cur, release))) {
485 return false;
486 }
487 #else
488 tid_cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release);
489 if (likely(tid_cur == tid_self)) return false;
490 #endif
491 _dispatch_unfair_lock_unlock_slow(l, tid_cur);
492 return _dispatch_lock_has_failed_trylock(tid_cur);
493 }
494
495 DISPATCH_ALWAYS_INLINE
496 static inline void
497 _dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l)
498 {
499 (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l);
500 }
501
502 #pragma mark - gate lock
503
504 #if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX
505 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1
506 #else
507 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0
508 #endif
509
510 #define DLOCK_GATE_UNLOCKED ((dispatch_lock)0)
511
512 #define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0)
513 #define DLOCK_ONCE_DONE (~(dispatch_once_t)0)
514
515 typedef struct dispatch_gate_s {
516 dispatch_lock dgl_lock;
517 } dispatch_gate_s, *dispatch_gate_t;
518
519 typedef struct dispatch_once_gate_s {
520 union {
521 dispatch_gate_s dgo_gate;
522 dispatch_once_t dgo_once;
523 };
524 } dispatch_once_gate_s, *dispatch_once_gate_t;
525
526 DISPATCH_NOT_TAIL_CALLED
527 void _dispatch_gate_wait_slow(dispatch_gate_t l, dispatch_lock value,
528 uint32_t flags);
529 void _dispatch_gate_broadcast_slow(dispatch_gate_t l, dispatch_lock tid_cur);
530
531 DISPATCH_ALWAYS_INLINE
532 static inline bool
533 _dispatch_gate_tryenter(dispatch_gate_t l)
534 {
535 dispatch_lock tid_self = _dispatch_tid_self();
536 return likely(os_atomic_cmpxchg(&l->dgl_lock,
537 DLOCK_GATE_UNLOCKED, tid_self, acquire));
538 }
539
540 #define _dispatch_gate_wait(l, flags) \
541 _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags)
542
543 DISPATCH_ALWAYS_INLINE
544 static inline void
545 _dispatch_gate_broadcast(dispatch_gate_t l)
546 {
547 dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
548 tid_cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release);
549 if (likely(tid_cur == tid_self)) return;
550 _dispatch_gate_broadcast_slow(l, tid_cur);
551 }
552
553 DISPATCH_ALWAYS_INLINE
554 static inline bool
555 _dispatch_once_gate_tryenter(dispatch_once_gate_t l)
556 {
557 dispatch_once_t tid_self = (dispatch_once_t)_dispatch_tid_self();
558 return likely(os_atomic_cmpxchg(&l->dgo_once,
559 DLOCK_ONCE_UNLOCKED, tid_self, acquire));
560 }
561
562 #define _dispatch_once_gate_wait(l) \
563 _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \
564 DLOCK_LOCK_NONE)
565
566 DISPATCH_ALWAYS_INLINE
567 static inline void
568 _dispatch_once_gate_broadcast(dispatch_once_gate_t l)
569 {
570 dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self();
571 // see once.c for explanation about this trick
572 os_atomic_maximally_synchronizing_barrier();
573 // above assumed to contain release barrier
574 tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed);
575 if (likely(tid_cur == tid_self)) return;
576 _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur);
577 }
578
579 #endif // __DISPATCH_SHIMS_LOCK__