]> git.saurik.com Git - apple/libdispatch.git/blob - src/shims/lock.h
libdispatch-703.1.4.tar.gz
[apple/libdispatch.git] / src / shims / lock.h
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_SHIMS_LOCK__
28 #define __DISPATCH_SHIMS_LOCK__
29
30 #pragma mark - platform macros
31
32 DISPATCH_ENUM(dispatch_lock_options, uint32_t,
33 DLOCK_LOCK_NONE = 0x00000000,
34 DLOCK_LOCK_DATA_CONTENTION = 0x00010000,
35 );
36
37 #if TARGET_OS_MAC
38
39 typedef mach_port_t dispatch_lock_owner;
40 typedef uint32_t dispatch_lock;
41
42 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)MACH_PORT_NULL)
43 #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
44 #define DLOCK_NOWAITERS_BIT ((dispatch_lock)0x00000001)
45 #define DLOCK_NOFAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002)
46 #define _dispatch_tid_self() ((dispatch_lock_owner)_dispatch_thread_port())
47
48 DISPATCH_ALWAYS_INLINE
49 static inline bool
50 _dispatch_lock_is_locked(dispatch_lock lock_value)
51 {
52 return (lock_value & DLOCK_OWNER_MASK) != 0;
53 }
54
55 DISPATCH_ALWAYS_INLINE
56 static inline dispatch_lock_owner
57 _dispatch_lock_owner(dispatch_lock lock_value)
58 {
59 lock_value &= DLOCK_OWNER_MASK;
60 if (lock_value) {
61 lock_value |= DLOCK_NOWAITERS_BIT | DLOCK_NOFAILED_TRYLOCK_BIT;
62 }
63 return lock_value;
64 }
65
66 DISPATCH_ALWAYS_INLINE
67 static inline bool
68 _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid)
69 {
70 // equivalent to _dispatch_lock_owner(lock_value) == tid
71 return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
72 }
73
74 DISPATCH_ALWAYS_INLINE
75 static inline bool
76 _dispatch_lock_has_waiters(dispatch_lock lock_value)
77 {
78 bool nowaiters_bit = (lock_value & DLOCK_NOWAITERS_BIT);
79 return _dispatch_lock_is_locked(lock_value) != nowaiters_bit;
80 }
81
82 DISPATCH_ALWAYS_INLINE
83 static inline bool
84 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
85 {
86 return !(lock_value & DLOCK_NOFAILED_TRYLOCK_BIT);
87 }
88
89 #elif defined(__linux__)
90 #include <linux/futex.h>
91 #include <unistd.h>
92 #include <sys/syscall.h> /* For SYS_xxx definitions */
93
94 typedef uint32_t dispatch_lock;
95 typedef pid_t dispatch_lock_owner;
96
97 #define DLOCK_OWNER_NULL ((dispatch_lock_owner)0)
98 #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK)
99 #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS)
100 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED)
101 #define _dispatch_tid_self() \
102 ((dispatch_lock_owner)(_dispatch_get_tsd_base()->tid))
103
104 DISPATCH_ALWAYS_INLINE
105 static inline bool
106 _dispatch_lock_is_locked(dispatch_lock lock_value)
107 {
108 return (lock_value & DLOCK_OWNER_MASK) != 0;
109 }
110
111 DISPATCH_ALWAYS_INLINE
112 static inline dispatch_lock_owner
113 _dispatch_lock_owner(dispatch_lock lock_value)
114 {
115 return (lock_value & DLOCK_OWNER_MASK);
116 }
117
118 DISPATCH_ALWAYS_INLINE
119 static inline bool
120 _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_lock_owner tid)
121 {
122 return _dispatch_lock_owner(lock_value) == tid;
123 }
124
125 DISPATCH_ALWAYS_INLINE
126 static inline bool
127 _dispatch_lock_has_waiters(dispatch_lock lock_value)
128 {
129 return (lock_value & DLOCK_WAITERS_BIT);
130 }
131
132 DISPATCH_ALWAYS_INLINE
133 static inline bool
134 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
135 {
136 return !(lock_value & DLOCK_FAILED_TRYLOCK_BIT);
137 }
138
139 #else
140 # error define _dispatch_lock encoding scheme for your platform here
141 #endif
142
143 #if __has_include(<sys/ulock.h>)
144 #include <sys/ulock.h>
145 #endif
146
147 #ifndef HAVE_UL_COMPARE_AND_WAIT
148 #if defined(UL_COMPARE_AND_WAIT) && DISPATCH_HOST_SUPPORTS_OSX(101200)
149 # define HAVE_UL_COMPARE_AND_WAIT 1
150 #else
151 # define HAVE_UL_COMPARE_AND_WAIT 0
152 #endif
153 #endif // HAVE_UL_COMPARE_AND_WAIT
154
155 #ifndef HAVE_UL_UNFAIR_LOCK
156 #if defined(UL_UNFAIR_LOCK) && DISPATCH_HOST_SUPPORTS_OSX(101200)
157 # define HAVE_UL_UNFAIR_LOCK 1
158 #else
159 # define HAVE_UL_UNFAIR_LOCK 0
160 #endif
161 #endif // HAVE_UL_UNFAIR_LOCK
162
163 #ifndef DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
164 #define DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK (!HAVE_UL_COMPARE_AND_WAIT && !HAVE_FUTEX)
165 #endif
166
167 #ifndef HAVE_FUTEX
168 #ifdef __linux__
169 #define HAVE_FUTEX 1
170 #else
171 #define HAVE_FUTEX 0
172 #endif
173 #endif // HAVE_FUTEX
174
175 #if USE_MACH_SEM
176 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
177 if (unlikely((x) == KERN_INVALID_NAME)) { \
178 DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
179 } else if (unlikely(x)) { \
180 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
181 } \
182 } while (0)
183 #define DISPATCH_GROUP_VERIFY_KR(x) do { \
184 if (unlikely((x) == KERN_INVALID_NAME)) { \
185 DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_group_t"); \
186 } else if (unlikely(x)) { \
187 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
188 } \
189 } while (0)
190 #elif USE_POSIX_SEM
191 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
192 if (unlikely((x) == -1)) { \
193 DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
194 } \
195 } while (0)
196 #endif
197
198 #pragma mark - compare and wait
199
200 DISPATCH_NOT_TAIL_CALLED
201 void _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value,
202 dispatch_lock_options_t flags);
203 void _dispatch_wake_by_address(uint32_t volatile *address);
204
205 #pragma mark - thread event
206 /**
207 * @typedef dispatch_thread_event_t
208 *
209 * @abstract
210 * Dispatch Thread Events are used for one-time synchronization between threads.
211 *
212 * @discussion
213 * Dispatch Thread Events are cheap synchronization points used when a thread
214 * needs to block until a certain event has happened. Dispatch Thread Event
215 * must be initialized and destroyed with _dispatch_thread_event_init() and
216 * _dispatch_thread_event_destroy().
217 *
218 * A Dispatch Thread Event must be waited on and signaled exactly once between
219 * initialization and destruction. These objects are simpler than semaphores
220 * and do not support being signaled and waited on an arbitrary number of times.
221 *
222 * This locking primitive has no notion of ownership
223 */
224 typedef struct dispatch_thread_event_s {
225 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
226 union {
227 semaphore_t dte_semaphore;
228 uint32_t dte_value;
229 };
230 #elif HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
231 // 1 means signalled but not waited on yet
232 // UINT32_MAX means waited on, but not signalled yet
233 // 0 is the initial and final state
234 uint32_t dte_value;
235 #elif USE_POSIX_SEM
236 sem_t dte_sem;
237 #else
238 # error define dispatch_thread_event_s for your platform
239 #endif
240 } dispatch_thread_event_s, *dispatch_thread_event_t;
241
242 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
243 semaphore_t _dispatch_thread_semaphore_create(void);
244 void _dispatch_thread_semaphore_dispose(void *);
245
246 DISPATCH_ALWAYS_INLINE
247 static inline semaphore_t
248 _dispatch_get_thread_semaphore(void)
249 {
250 semaphore_t sema = (semaphore_t)(uintptr_t)
251 _dispatch_thread_getspecific(dispatch_sema4_key);
252 if (unlikely(!sema)) {
253 return _dispatch_thread_semaphore_create();
254 }
255 _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
256 return sema;
257 }
258
259 DISPATCH_ALWAYS_INLINE
260 static inline void
261 _dispatch_put_thread_semaphore(semaphore_t sema)
262 {
263 semaphore_t old_sema = (semaphore_t)(uintptr_t)
264 _dispatch_thread_getspecific(dispatch_sema4_key);
265 _dispatch_thread_setspecific(dispatch_sema4_key, (void*)(uintptr_t)sema);
266 if (unlikely(old_sema)) {
267 return _dispatch_thread_semaphore_dispose((void *)(uintptr_t)old_sema);
268 }
269 }
270 #endif
271
272 DISPATCH_NOT_TAIL_CALLED
273 void _dispatch_thread_event_wait_slow(dispatch_thread_event_t);
274 void _dispatch_thread_event_signal_slow(dispatch_thread_event_t);
275
276 DISPATCH_ALWAYS_INLINE
277 static inline void
278 _dispatch_thread_event_init(dispatch_thread_event_t dte)
279 {
280 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
281 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
282 dte->dte_semaphore = _dispatch_get_thread_semaphore();
283 return;
284 }
285 #endif
286 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
287 dte->dte_value = 0;
288 #elif USE_POSIX_SEM
289 int rc = sem_init(&dte->dte_sem, 0, 0);
290 DISPATCH_SEMAPHORE_VERIFY_RET(rc);
291 #endif
292 }
293
294 DISPATCH_ALWAYS_INLINE
295 static inline void
296 _dispatch_thread_event_signal(dispatch_thread_event_t dte)
297 {
298 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
299 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
300 _dispatch_thread_event_signal_slow(dte);
301 return;
302 }
303 #endif
304 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
305 if (os_atomic_inc_orig(&dte->dte_value, release) == 0) {
306 // 0 -> 1 transition doesn't need a signal
307 // force a wake even when the value is corrupt,
308 // waiters do the validation
309 return;
310 }
311 #elif USE_POSIX_SEM
312 // fallthrough
313 #endif
314 _dispatch_thread_event_signal_slow(dte);
315 }
316
317
318 DISPATCH_ALWAYS_INLINE
319 static inline void
320 _dispatch_thread_event_wait(dispatch_thread_event_t dte)
321 {
322 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
323 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
324 _dispatch_thread_event_wait_slow(dte);
325 return;
326 }
327 #endif
328 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
329 if (os_atomic_dec(&dte->dte_value, acquire) == 0) {
330 // 1 -> 0 is always a valid transition, so we can return
331 // for any other value, go to the slowpath which checks it's not corrupt
332 return;
333 }
334 #elif USE_POSIX_SEM
335 // fallthrough
336 #endif
337 _dispatch_thread_event_wait_slow(dte);
338 }
339
340 DISPATCH_ALWAYS_INLINE
341 static inline void
342 _dispatch_thread_event_destroy(dispatch_thread_event_t dte)
343 {
344 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
345 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
346 _dispatch_put_thread_semaphore(dte->dte_semaphore);
347 return;
348 }
349 #endif
350 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
351 // nothing to do
352 dispatch_assert(dte->dte_value == 0);
353 #elif USE_POSIX_SEM
354 int rc = sem_destroy(&dte->dte_sem);
355 DISPATCH_SEMAPHORE_VERIFY_RET(rc);
356 #endif
357 }
358
359 #pragma mark - unfair lock
360
361 typedef struct dispatch_unfair_lock_s {
362 dispatch_lock dul_lock;
363 } dispatch_unfair_lock_s, *dispatch_unfair_lock_t;
364
365 DISPATCH_NOT_TAIL_CALLED
366 void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l,
367 dispatch_lock_options_t options);
368 void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l,
369 dispatch_lock tid_cur);
370
371 DISPATCH_ALWAYS_INLINE
372 static inline void
373 _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l)
374 {
375 dispatch_lock tid_self = _dispatch_tid_self();
376 if (likely(os_atomic_cmpxchg(&l->dul_lock,
377 DLOCK_OWNER_NULL, tid_self, acquire))) {
378 return;
379 }
380 return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE);
381 }
382
383 DISPATCH_ALWAYS_INLINE
384 static inline bool
385 _dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l,
386 dispatch_lock_owner *owner)
387 {
388 dispatch_lock tid_old, tid_new, tid_self = _dispatch_tid_self();
389
390 os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, acquire, {
391 if (likely(!_dispatch_lock_is_locked(tid_old))) {
392 tid_new = tid_self;
393 } else {
394 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
395 tid_new = tid_old & ~DLOCK_NOFAILED_TRYLOCK_BIT;
396 #else
397 tid_new = tid_old | DLOCK_FAILED_TRYLOCK_BIT;
398 #endif
399 }
400 });
401 if (owner) *owner = _dispatch_lock_owner(tid_new);
402 return !_dispatch_lock_is_locked(tid_old);
403 }
404
405 DISPATCH_ALWAYS_INLINE
406 static inline bool
407 _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l)
408 {
409 dispatch_lock tid_old, tid_new;
410
411 os_atomic_rmw_loop(&l->dul_lock, tid_old, tid_new, release, {
412 #ifdef DLOCK_NOFAILED_TRYLOCK_BIT
413 if (likely(tid_old & DLOCK_NOFAILED_TRYLOCK_BIT)) {
414 tid_new = DLOCK_OWNER_NULL;
415 } else {
416 tid_new = tid_old | DLOCK_NOFAILED_TRYLOCK_BIT;
417 }
418 #else
419 if (likely(!(tid_old & DLOCK_FAILED_TRYLOCK_BIT))) {
420 tid_new = DLOCK_OWNER_NULL;
421 } else {
422 tid_new = tid_old & ~DLOCK_FAILED_TRYLOCK_BIT;
423 }
424 #endif
425 });
426 if (unlikely(tid_new)) {
427 // unlock failed, renew the lock, which needs an acquire barrier
428 os_atomic_thread_fence(acquire);
429 return false;
430 }
431 if (unlikely(_dispatch_lock_has_waiters(tid_old))) {
432 _dispatch_unfair_lock_unlock_slow(l, tid_old);
433 }
434 return true;
435 }
436
437 DISPATCH_ALWAYS_INLINE
438 static inline bool
439 _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l)
440 {
441 dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
442 #if HAVE_FUTEX
443 if (likely(os_atomic_cmpxchgv(&l->dul_lock,
444 tid_self, DLOCK_OWNER_NULL, &tid_cur, release))) {
445 return false;
446 }
447 #else
448 tid_cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release);
449 if (likely(tid_cur == tid_self)) return false;
450 #endif
451 _dispatch_unfair_lock_unlock_slow(l, tid_cur);
452 return _dispatch_lock_has_failed_trylock(tid_cur);
453 }
454
455 DISPATCH_ALWAYS_INLINE
456 static inline void
457 _dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l)
458 {
459 (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l);
460 }
461
462 #pragma mark - gate lock
463
464 #if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX
465 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1
466 #else
467 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0
468 #endif
469
470 #define DLOCK_GATE_UNLOCKED ((dispatch_lock)0)
471
472 #define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0)
473 #define DLOCK_ONCE_DONE (~(dispatch_once_t)0)
474
475 typedef struct dispatch_gate_s {
476 dispatch_lock dgl_lock;
477 } dispatch_gate_s, *dispatch_gate_t;
478
479 typedef struct dispatch_once_gate_s {
480 union {
481 dispatch_gate_s dgo_gate;
482 dispatch_once_t dgo_once;
483 };
484 } dispatch_once_gate_s, *dispatch_once_gate_t;
485
486 DISPATCH_NOT_TAIL_CALLED
487 void _dispatch_gate_wait_slow(dispatch_gate_t l, dispatch_lock value,
488 uint32_t flags);
489 void _dispatch_gate_broadcast_slow(dispatch_gate_t l, dispatch_lock tid_cur);
490
491 DISPATCH_ALWAYS_INLINE
492 static inline bool
493 _dispatch_gate_tryenter(dispatch_gate_t l)
494 {
495 dispatch_lock tid_self = _dispatch_tid_self();
496 return likely(os_atomic_cmpxchg(&l->dgl_lock,
497 DLOCK_GATE_UNLOCKED, tid_self, acquire));
498 }
499
500 #define _dispatch_gate_wait(l, flags) \
501 _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags)
502
503 DISPATCH_ALWAYS_INLINE
504 static inline void
505 _dispatch_gate_broadcast(dispatch_gate_t l)
506 {
507 dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
508 tid_cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release);
509 if (likely(tid_cur == tid_self)) return;
510 _dispatch_gate_broadcast_slow(l, tid_cur);
511 }
512
513 DISPATCH_ALWAYS_INLINE
514 static inline bool
515 _dispatch_once_gate_tryenter(dispatch_once_gate_t l)
516 {
517 dispatch_once_t tid_self = (dispatch_once_t)_dispatch_tid_self();
518 return likely(os_atomic_cmpxchg(&l->dgo_once,
519 DLOCK_ONCE_UNLOCKED, tid_self, acquire));
520 }
521
522 #define _dispatch_once_gate_wait(l) \
523 _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \
524 DLOCK_LOCK_NONE)
525
526 DISPATCH_ALWAYS_INLINE
527 static inline void
528 _dispatch_once_gate_broadcast(dispatch_once_gate_t l)
529 {
530 dispatch_once_t tid_cur, tid_self = (dispatch_once_t)_dispatch_tid_self();
531 // see once.c for explanation about this trick
532 os_atomic_maximally_synchronizing_barrier();
533 // above assumed to contain release barrier
534 tid_cur = os_atomic_xchg(&l->dgo_once, DLOCK_ONCE_DONE, relaxed);
535 if (likely(tid_cur == tid_self)) return;
536 _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)tid_cur);
537 }
538
539 #endif // __DISPATCH_SHIMS_LOCK__