]> git.saurik.com Git - apple/libdispatch.git/blob - src/shims/lock.h
libdispatch-913.1.6.tar.gz
[apple/libdispatch.git] / src / shims / lock.h
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 /*
22 * IMPORTANT: This header file describes INTERNAL interfaces to libdispatch
23 * which are subject to change in future releases of Mac OS X. Any applications
24 * relying on these interfaces WILL break.
25 */
26
27 #ifndef __DISPATCH_SHIMS_LOCK__
28 #define __DISPATCH_SHIMS_LOCK__
29
30 #pragma mark - platform macros
31
32 DISPATCH_ENUM(dispatch_lock_options, uint32_t,
33 DLOCK_LOCK_NONE = 0x00000000,
34 DLOCK_LOCK_DATA_CONTENTION = 0x00010000,
35 );
36
37 #if TARGET_OS_MAC
38
39 typedef mach_port_t dispatch_tid;
40 typedef uint32_t dispatch_lock;
41
42 #define DLOCK_OWNER_MASK ((dispatch_lock)0xfffffffc)
43 #define DLOCK_WAITERS_BIT ((dispatch_lock)0x00000001)
44 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)0x00000002)
45
46 #define DLOCK_OWNER_NULL ((dispatch_tid)MACH_PORT_NULL)
47 #define _dispatch_tid_self() ((dispatch_tid)_dispatch_thread_port())
48
49 DISPATCH_ALWAYS_INLINE
50 static inline dispatch_tid
51 _dispatch_lock_owner(dispatch_lock lock_value)
52 {
53 if (lock_value & DLOCK_OWNER_MASK) {
54 return lock_value | DLOCK_WAITERS_BIT | DLOCK_FAILED_TRYLOCK_BIT;
55 }
56 return DLOCK_OWNER_NULL;
57 }
58
59 #elif defined(__linux__)
60
61 #include <linux/futex.h>
62 #include <unistd.h>
63 #include <sys/syscall.h> /* For SYS_xxx definitions */
64
65 typedef uint32_t dispatch_tid;
66 typedef uint32_t dispatch_lock;
67
68 #define DLOCK_OWNER_MASK ((dispatch_lock)FUTEX_TID_MASK)
69 #define DLOCK_WAITERS_BIT ((dispatch_lock)FUTEX_WAITERS)
70 #define DLOCK_FAILED_TRYLOCK_BIT ((dispatch_lock)FUTEX_OWNER_DIED)
71
72 #define DLOCK_OWNER_NULL ((dispatch_tid)0)
73 #define _dispatch_tid_self() ((dispatch_tid)(_dispatch_get_tsd_base()->tid))
74
75 DISPATCH_ALWAYS_INLINE
76 static inline dispatch_tid
77 _dispatch_lock_owner(dispatch_lock lock_value)
78 {
79 return lock_value & DLOCK_OWNER_MASK;
80 }
81
82 #else
83 # error define _dispatch_lock encoding scheme for your platform here
84 #endif
85
86 DISPATCH_ALWAYS_INLINE
87 static inline dispatch_lock
88 _dispatch_lock_value_from_tid(dispatch_tid tid)
89 {
90 return tid & DLOCK_OWNER_MASK;
91 }
92
93 DISPATCH_ALWAYS_INLINE
94 static inline dispatch_lock
95 _dispatch_lock_value_for_self(void)
96 {
97 return _dispatch_lock_value_from_tid(_dispatch_tid_self());
98 }
99
100 DISPATCH_ALWAYS_INLINE
101 static inline bool
102 _dispatch_lock_is_locked(dispatch_lock lock_value)
103 {
104 // equivalent to _dispatch_lock_owner(lock_value) == 0
105 return (lock_value & DLOCK_OWNER_MASK) != 0;
106 }
107
108 DISPATCH_ALWAYS_INLINE
109 static inline bool
110 _dispatch_lock_is_locked_by(dispatch_lock lock_value, dispatch_tid tid)
111 {
112 // equivalent to _dispatch_lock_owner(lock_value) == tid
113 return ((lock_value ^ tid) & DLOCK_OWNER_MASK) == 0;
114 }
115
116 DISPATCH_ALWAYS_INLINE
117 static inline bool
118 _dispatch_lock_is_locked_by_self(dispatch_lock lock_value)
119 {
120 // equivalent to _dispatch_lock_owner(lock_value) == tid
121 return ((lock_value ^ _dispatch_tid_self()) & DLOCK_OWNER_MASK) == 0;
122 }
123
124 DISPATCH_ALWAYS_INLINE
125 static inline bool
126 _dispatch_lock_has_waiters(dispatch_lock lock_value)
127 {
128 return (lock_value & DLOCK_WAITERS_BIT);
129 }
130
131 DISPATCH_ALWAYS_INLINE
132 static inline bool
133 _dispatch_lock_has_failed_trylock(dispatch_lock lock_value)
134 {
135 return (lock_value & DLOCK_FAILED_TRYLOCK_BIT);
136 }
137
138 #if __has_include(<sys/ulock.h>)
139 #include <sys/ulock.h>
140 #ifdef UL_COMPARE_AND_WAIT
141 #define HAVE_UL_COMPARE_AND_WAIT 1
142 #endif
143 #ifdef UL_UNFAIR_LOCK
144 #define HAVE_UL_UNFAIR_LOCK 1
145 #endif
146 #endif
147
148 #ifndef HAVE_FUTEX
149 #ifdef __linux__
150 #define HAVE_FUTEX 1
151 #else
152 #define HAVE_FUTEX 0
153 #endif
154 #endif // HAVE_FUTEX
155
156 #pragma mark - semaphores
157
158 #if USE_MACH_SEM
159
160 typedef semaphore_t _dispatch_sema4_t;
161 #define _DSEMA4_POLICY_FIFO SYNC_POLICY_FIFO
162 #define _DSEMA4_POLICY_LIFO SYNC_POLICY_LIFO
163 #define _DSEMA4_TIMEOUT() KERN_OPERATION_TIMED_OUT
164
165 #define _dispatch_sema4_init(sema, policy) (void)(*(sema) = MACH_PORT_NULL)
166 #define _dispatch_sema4_is_created(sema) (*(sema) != MACH_PORT_NULL)
167 void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy);
168
169 #elif USE_POSIX_SEM
170
171 typedef sem_t _dispatch_sema4_t;
172 #define _DSEMA4_POLICY_FIFO 0
173 #define _DSEMA4_POLICY_LIFO 0
174 #define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
175
176 void _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy);
177 #define _dispatch_sema4_is_created(sema) ((void)sema, 1)
178 #define _dispatch_sema4_create_slow(sema, policy) ((void)sema, (void)policy)
179
180 #elif USE_WIN32_SEM
181
182 typedef HANDLE _dispatch_sema4_t;
183 #define _DSEMA4_POLICY_FIFO 0
184 #define _DSEMA4_POLICY_LIFO 0
185 #define _DSEMA4_TIMEOUT() ((errno) = ETIMEDOUT, -1)
186
187 #define _dispatch_sema4_init(sema, policy) (void)(*(sema) = 0)
188 #define _dispatch_sema4_is_created(sema) (*(sema) != 0)
189 void _dispatch_sema4_create_slow(_dispatch_sema4_t *sema, int policy);
190
191 #else
192 #error "port has to implement _dispatch_sema4_t"
193 #endif
194
195 void _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy);
196 void _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count);
197 void _dispatch_sema4_wait(_dispatch_sema4_t *sema);
198 bool _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout);
199
200 DISPATCH_ALWAYS_INLINE
201 static inline void
202 _dispatch_sema4_create(_dispatch_sema4_t *sema, int policy)
203 {
204 if (!_dispatch_sema4_is_created(sema)) {
205 _dispatch_sema4_create_slow(sema, policy);
206 }
207 }
208
209 DISPATCH_ALWAYS_INLINE
210 static inline void
211 _dispatch_sema4_dispose(_dispatch_sema4_t *sema, int policy)
212 {
213 if (_dispatch_sema4_is_created(sema)) {
214 _dispatch_sema4_dispose_slow(sema, policy);
215 }
216 }
217
218 #pragma mark - compare and wait
219
220 DISPATCH_NOT_TAIL_CALLED
221 void _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value,
222 dispatch_lock_options_t flags);
223 void _dispatch_wake_by_address(uint32_t volatile *address);
224
225 #pragma mark - thread event
226 /**
227 * @typedef dispatch_thread_event_t
228 *
229 * @abstract
230 * Dispatch Thread Events are used for one-time synchronization between threads.
231 *
232 * @discussion
233 * Dispatch Thread Events are cheap synchronization points used when a thread
234 * needs to block until a certain event has happened. Dispatch Thread Event
235 * must be initialized and destroyed with _dispatch_thread_event_init() and
236 * _dispatch_thread_event_destroy().
237 *
238 * A Dispatch Thread Event must be waited on and signaled exactly once between
239 * initialization and destruction. These objects are simpler than semaphores
240 * and do not support being signaled and waited on an arbitrary number of times.
241 *
242 * This locking primitive has no notion of ownership
243 */
244 typedef struct dispatch_thread_event_s {
245 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
246 // 1 means signalled but not waited on yet
247 // UINT32_MAX means waited on, but not signalled yet
248 // 0 is the initial and final state
249 uint32_t dte_value;
250 #else
251 _dispatch_sema4_t dte_sema;
252 #endif
253 } dispatch_thread_event_s, *dispatch_thread_event_t;
254
255 DISPATCH_NOT_TAIL_CALLED
256 void _dispatch_thread_event_wait_slow(dispatch_thread_event_t);
257 void _dispatch_thread_event_signal_slow(dispatch_thread_event_t);
258
259 DISPATCH_ALWAYS_INLINE
260 static inline void
261 _dispatch_thread_event_init(dispatch_thread_event_t dte)
262 {
263 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
264 dte->dte_value = 0;
265 #else
266 _dispatch_sema4_init(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
267 #endif
268 }
269
270 DISPATCH_ALWAYS_INLINE
271 static inline void
272 _dispatch_thread_event_signal(dispatch_thread_event_t dte)
273 {
274 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
275 if (os_atomic_inc_orig(&dte->dte_value, release) == 0) {
276 // 0 -> 1 transition doesn't need a signal
277 // force a wake even when the value is corrupt,
278 // waiters do the validation
279 return;
280 }
281 #else
282 // fallthrough
283 #endif
284 _dispatch_thread_event_signal_slow(dte);
285 }
286
287
288 DISPATCH_ALWAYS_INLINE
289 static inline void
290 _dispatch_thread_event_wait(dispatch_thread_event_t dte)
291 {
292 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
293 if (os_atomic_dec(&dte->dte_value, acquire) == 0) {
294 // 1 -> 0 is always a valid transition, so we can return
295 // for any other value, go to the slowpath which checks it's not corrupt
296 return;
297 }
298 #else
299 // fallthrough
300 #endif
301 _dispatch_thread_event_wait_slow(dte);
302 }
303
304 DISPATCH_ALWAYS_INLINE
305 static inline void
306 _dispatch_thread_event_destroy(dispatch_thread_event_t dte)
307 {
308 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
309 // nothing to do
310 dispatch_assert(dte->dte_value == 0);
311 #else
312 _dispatch_sema4_dispose(&dte->dte_sema, _DSEMA4_POLICY_FIFO);
313 #endif
314 }
315
316 #pragma mark - unfair lock
317
318 typedef struct dispatch_unfair_lock_s {
319 dispatch_lock dul_lock;
320 } dispatch_unfair_lock_s, *dispatch_unfair_lock_t;
321
322 DISPATCH_NOT_TAIL_CALLED
323 void _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t l,
324 dispatch_lock_options_t options);
325 void _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t l,
326 dispatch_lock tid_cur);
327
328 DISPATCH_ALWAYS_INLINE
329 static inline void
330 _dispatch_unfair_lock_lock(dispatch_unfair_lock_t l)
331 {
332 dispatch_lock value_self = _dispatch_lock_value_for_self();
333 if (likely(os_atomic_cmpxchg(&l->dul_lock,
334 DLOCK_OWNER_NULL, value_self, acquire))) {
335 return;
336 }
337 return _dispatch_unfair_lock_lock_slow(l, DLOCK_LOCK_NONE);
338 }
339
340 DISPATCH_ALWAYS_INLINE
341 static inline bool
342 _dispatch_unfair_lock_trylock(dispatch_unfair_lock_t l, dispatch_tid *owner)
343 {
344 dispatch_lock value_self = _dispatch_lock_value_for_self();
345 dispatch_lock old_value, new_value;
346
347 os_atomic_rmw_loop(&l->dul_lock, old_value, new_value, acquire, {
348 if (likely(!_dispatch_lock_is_locked(old_value))) {
349 new_value = value_self;
350 } else {
351 new_value = old_value | DLOCK_FAILED_TRYLOCK_BIT;
352 }
353 });
354 if (owner) *owner = _dispatch_lock_owner(new_value);
355 return !_dispatch_lock_is_locked(old_value);
356 }
357
358 DISPATCH_ALWAYS_INLINE
359 static inline bool
360 _dispatch_unfair_lock_tryunlock(dispatch_unfair_lock_t l)
361 {
362 dispatch_lock old_value, new_value;
363
364 os_atomic_rmw_loop(&l->dul_lock, old_value, new_value, release, {
365 if (unlikely(old_value & DLOCK_FAILED_TRYLOCK_BIT)) {
366 new_value = old_value ^ DLOCK_FAILED_TRYLOCK_BIT;
367 } else {
368 new_value = DLOCK_OWNER_NULL;
369 }
370 });
371 if (unlikely(new_value)) {
372 // unlock failed, renew the lock, which needs an acquire barrier
373 os_atomic_thread_fence(acquire);
374 return false;
375 }
376 if (unlikely(_dispatch_lock_has_waiters(old_value))) {
377 _dispatch_unfair_lock_unlock_slow(l, old_value);
378 }
379 return true;
380 }
381
382 DISPATCH_ALWAYS_INLINE
383 static inline bool
384 _dispatch_unfair_lock_unlock_had_failed_trylock(dispatch_unfair_lock_t l)
385 {
386 dispatch_lock cur, value_self = _dispatch_lock_value_for_self();
387 #if HAVE_FUTEX
388 if (likely(os_atomic_cmpxchgv(&l->dul_lock,
389 value_self, DLOCK_OWNER_NULL, &cur, release))) {
390 return false;
391 }
392 #else
393 cur = os_atomic_xchg(&l->dul_lock, DLOCK_OWNER_NULL, release);
394 if (likely(cur == value_self)) return false;
395 #endif
396 _dispatch_unfair_lock_unlock_slow(l, cur);
397 return _dispatch_lock_has_failed_trylock(cur);
398 }
399
400 DISPATCH_ALWAYS_INLINE
401 static inline void
402 _dispatch_unfair_lock_unlock(dispatch_unfair_lock_t l)
403 {
404 (void)_dispatch_unfair_lock_unlock_had_failed_trylock(l);
405 }
406
407 #pragma mark - gate lock
408
409 #if HAVE_UL_UNFAIR_LOCK || HAVE_FUTEX
410 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 1
411 #else
412 #define DISPATCH_GATE_USE_FOR_DISPATCH_ONCE 0
413 #endif
414
415 #define DLOCK_GATE_UNLOCKED ((dispatch_lock)0)
416
417 #define DLOCK_ONCE_UNLOCKED ((dispatch_once_t)0)
418 #define DLOCK_ONCE_DONE (~(dispatch_once_t)0)
419
420 typedef struct dispatch_gate_s {
421 dispatch_lock dgl_lock;
422 } dispatch_gate_s, *dispatch_gate_t;
423
424 typedef struct dispatch_once_gate_s {
425 union {
426 dispatch_gate_s dgo_gate;
427 dispatch_once_t dgo_once;
428 };
429 } dispatch_once_gate_s, *dispatch_once_gate_t;
430
431 DISPATCH_NOT_TAIL_CALLED
432 void _dispatch_gate_wait_slow(dispatch_gate_t l, dispatch_lock value,
433 uint32_t flags);
434 void _dispatch_gate_broadcast_slow(dispatch_gate_t l, dispatch_lock tid_cur);
435
436 DISPATCH_ALWAYS_INLINE
437 static inline bool
438 _dispatch_gate_tryenter(dispatch_gate_t l)
439 {
440 return os_atomic_cmpxchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED,
441 _dispatch_lock_value_for_self(), acquire);
442 }
443
444 #define _dispatch_gate_wait(l, flags) \
445 _dispatch_gate_wait_slow(l, DLOCK_GATE_UNLOCKED, flags)
446
447 DISPATCH_ALWAYS_INLINE
448 static inline void
449 _dispatch_gate_broadcast(dispatch_gate_t l)
450 {
451 dispatch_lock cur, value_self = _dispatch_lock_value_for_self();
452 cur = os_atomic_xchg(&l->dgl_lock, DLOCK_GATE_UNLOCKED, release);
453 if (likely(cur == value_self)) return;
454 _dispatch_gate_broadcast_slow(l, cur);
455 }
456
457 DISPATCH_ALWAYS_INLINE
458 static inline bool
459 _dispatch_once_gate_tryenter(dispatch_once_gate_t l)
460 {
461 return os_atomic_cmpxchg(&l->dgo_once, DLOCK_ONCE_UNLOCKED,
462 (dispatch_once_t)_dispatch_lock_value_for_self(), acquire);
463 }
464
465 #define _dispatch_once_gate_wait(l) \
466 _dispatch_gate_wait_slow(&(l)->dgo_gate, (dispatch_lock)DLOCK_ONCE_DONE, \
467 DLOCK_LOCK_NONE)
468
469 DISPATCH_ALWAYS_INLINE
470 static inline dispatch_once_t
471 _dispatch_once_xchg_done(dispatch_once_t *pred)
472 {
473 return os_atomic_xchg(pred, DLOCK_ONCE_DONE, release);
474 }
475
476 DISPATCH_ALWAYS_INLINE
477 static inline void
478 _dispatch_once_gate_broadcast(dispatch_once_gate_t l)
479 {
480 dispatch_lock value_self = _dispatch_lock_value_for_self();
481 dispatch_once_t cur = _dispatch_once_xchg_done(&l->dgo_once);
482 if (likely(cur == (dispatch_once_t)value_self)) return;
483 _dispatch_gate_broadcast_slow(&l->dgo_gate, (dispatch_lock)cur);
484 }
485
486 #endif // __DISPATCH_SHIMS_LOCK__