]> git.saurik.com Git - apple/libdispatch.git/blob - src/shims/lock.c
libdispatch-703.1.4.tar.gz
[apple/libdispatch.git] / src / shims / lock.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 #define _dlock_syscall_switch(err, syscall, ...) \
24 for (;;) { \
25 int err; \
26 switch ((err = ((syscall) < 0 ? errno : 0))) { \
27 case EINTR: continue; \
28 __VA_ARGS__ \
29 } \
30 break; \
31 }
32
33 #if TARGET_OS_MAC
34 _Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION,
35 "values should be the same");
36
37 DISPATCH_ALWAYS_INLINE
38 static inline void
39 _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags,
40 uint32_t timeout)
41 {
42 int option;
43 if (flags & DLOCK_LOCK_DATA_CONTENTION) {
44 option = SWITCH_OPTION_OSLOCK_DEPRESS;
45 } else {
46 option = SWITCH_OPTION_DEPRESS;
47 }
48 thread_switch(_dispatch_lock_owner(value), option, timeout);
49 }
50 #endif
51
52 #pragma mark - ulock wrappers
53 #if HAVE_UL_COMPARE_AND_WAIT
54
55 static int
56 _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
57 uint32_t flags)
58 {
59 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK);
60 int rc;
61 _dlock_syscall_switch(err,
62 rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout),
63 case 0: return rc > 0 ? ENOTEMPTY : 0;
64 case ETIMEDOUT: case EFAULT: return err;
65 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
66 );
67 }
68
69 static void
70 _dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags)
71 {
72 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK);
73 _dlock_syscall_switch(err,
74 __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0),
75 case 0: case ENOENT: break;
76 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed");
77 );
78 }
79
80 #endif
81 #if HAVE_UL_UNFAIR_LOCK
82
83 // returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT
84 static int
85 _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
86 dispatch_lock_options_t flags)
87 {
88 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
89 // <rdar://problem/25075359>
90 timeout = timeout < 1000 ? 1 : timeout / 1000;
91 _dispatch_thread_switch(val, flags, timeout);
92 return 0;
93 }
94 int rc;
95 _dlock_syscall_switch(err,
96 rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout),
97 case 0: return rc > 0 ? ENOTEMPTY : 0;
98 case ETIMEDOUT: case EFAULT: return err;
99 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
100 );
101 }
102
103 static void
104 _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags)
105 {
106 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
107 // <rdar://problem/25075359>
108 return;
109 }
110 _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0),
111 case 0: case ENOENT: break;
112 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed");
113 );
114 }
115
116 #endif
117 #pragma mark - futex wrappers
118 #if HAVE_FUTEX
119 #include <sys/time.h>
120 #include <syscall.h>
121
122 DISPATCH_ALWAYS_INLINE
123 static inline int
124 _dispatch_futex(uint32_t *uaddr, int op, uint32_t val,
125 const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3,
126 int opflags)
127 {
128 return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3);
129 }
130
131 static int
132 _dispatch_futex_wait(uint32_t *uaddr, uint32_t val,
133 const struct timespec *timeout, int opflags)
134 {
135 _dlock_syscall_switch(err,
136 _dispatch_futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags),
137 case 0: case EWOULDBLOCK: case ETIMEDOUT: return err;
138 default: DISPATCH_CLIENT_CRASH(err, "futex_wait() failed");
139 );
140 }
141
142 static void
143 _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags)
144 {
145 int rc;
146 _dlock_syscall_switch(err,
147 rc = _dispatch_futex(uaddr, FUTEX_WAKE, wake, NULL, NULL, 0, opflags),
148 case 0: return;
149 default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed");
150 );
151 }
152
153 static void
154 _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect,
155 int opflags)
156 {
157 _dlock_syscall_switch(err,
158 _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout,
159 NULL, 0, opflags),
160 case 0: return;
161 default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed");
162 );
163 }
164
165 static void
166 _dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags)
167 {
168 _dlock_syscall_switch(err,
169 _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags),
170 case 0: return;
171 default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed");
172 );
173 }
174
175 #endif
176 #pragma mark - wait for address
177
178 void
179 _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value,
180 dispatch_lock_options_t flags)
181 {
182 #if HAVE_UL_COMPARE_AND_WAIT
183 _dispatch_ulock_wait((uint32_t *)address, value, 0, flags);
184 #elif HAVE_FUTEX
185 _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_PRIVATE_FLAG);
186 #else
187 mach_msg_timeout_t timeout = 1;
188 while (os_atomic_load(address, relaxed) == value) {
189 thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, timeout++);
190 }
191 #endif
192 (void)flags;
193 }
194
195 void
196 _dispatch_wake_by_address(uint32_t volatile *address)
197 {
198 #if HAVE_UL_COMPARE_AND_WAIT
199 _dispatch_ulock_wake((uint32_t *)address, ULF_WAKE_ALL);
200 #elif HAVE_FUTEX
201 _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_PRIVATE_FLAG);
202 #else
203 (void)address;
204 #endif
205 }
206
207 #pragma mark - thread event
208
209 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
210 semaphore_t
211 _dispatch_thread_semaphore_create(void)
212 {
213 semaphore_t s4;
214 kern_return_t kr;
215 while (unlikely(kr = semaphore_create(mach_task_self(), &s4,
216 SYNC_POLICY_FIFO, 0))) {
217 DISPATCH_VERIFY_MIG(kr);
218 _dispatch_temporary_resource_shortage();
219 }
220 return s4;
221 }
222
223 void
224 _dispatch_thread_semaphore_dispose(void *ctxt)
225 {
226 semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt;
227 kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
228 DISPATCH_VERIFY_MIG(kr);
229 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
230 }
231 #endif
232
233 void
234 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte)
235 {
236 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
237 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
238 kern_return_t kr = semaphore_signal(dte->dte_semaphore);
239 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
240 return;
241 }
242 #endif
243 #if HAVE_UL_COMPARE_AND_WAIT
244 _dispatch_ulock_wake(&dte->dte_value, 0);
245 #elif HAVE_FUTEX
246 _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG);
247 #elif USE_POSIX_SEM
248 int rc = sem_post(&dte->dte_sem);
249 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
250 #endif
251 }
252
253 void
254 _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte)
255 {
256 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
257 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
258 kern_return_t kr;
259 do {
260 kr = semaphore_wait(dte->dte_semaphore);
261 } while (unlikely(kr == KERN_ABORTED));
262 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
263 return;
264 }
265 #endif
266 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
267 for (;;) {
268 uint32_t value = os_atomic_load(&dte->dte_value, acquire);
269 if (likely(value == 0)) return;
270 if (unlikely(value != UINT32_MAX)) {
271 DISPATCH_CLIENT_CRASH(value, "Corrupt thread event value");
272 }
273 #if HAVE_UL_COMPARE_AND_WAIT
274 int rc = _dispatch_ulock_wait(&dte->dte_value, UINT32_MAX, 0, 0);
275 dispatch_assert(rc == 0 || rc == EFAULT);
276 #elif HAVE_FUTEX
277 _dispatch_futex_wait(&dte->dte_value, UINT32_MAX,
278 NULL, FUTEX_PRIVATE_FLAG);
279 #endif
280 }
281 #elif USE_POSIX_SEM
282 int rc;
283 do {
284 rc = sem_wait(&dte->dte_sem);
285 } while (unlikely(rc != 0));
286 DISPATCH_SEMAPHORE_VERIFY_RET(rc);
287 #endif
288 }
289
290 #pragma mark - unfair lock
291
292 #if HAVE_UL_UNFAIR_LOCK
293 void
294 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
295 dispatch_lock_options_t flags)
296 {
297 dispatch_lock tid_self = _dispatch_tid_self(), next = tid_self;
298 dispatch_lock tid_old, tid_new;
299 int rc;
300
301 for (;;) {
302 os_atomic_rmw_loop(&dul->dul_lock, tid_old, tid_new, acquire, {
303 if (likely(!_dispatch_lock_is_locked(tid_old))) {
304 tid_new = next;
305 } else {
306 tid_new = tid_old & ~DLOCK_NOWAITERS_BIT;
307 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
308 }
309 });
310 if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) {
311 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
312 }
313 if (tid_new == next) {
314 return;
315 }
316 rc = _dispatch_unfair_lock_wait(&dul->dul_lock, tid_new, 0, flags);
317 if (rc == ENOTEMPTY) {
318 next = tid_self & ~DLOCK_NOWAITERS_BIT;
319 } else {
320 next = tid_self;
321 }
322 }
323 }
324 #elif HAVE_FUTEX
325 void
326 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
327 dispatch_lock_options_t flags)
328 {
329 (void)flags;
330 _dispatch_futex_lock_pi(&dul->dul_lock, NULL, 1, FUTEX_PRIVATE_FLAG);
331 }
332 #else
333 void
334 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
335 dispatch_lock_options_t flags)
336 {
337 dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
338 uint32_t timeout = 1;
339
340 while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock,
341 DLOCK_OWNER_NULL, tid_self, &tid_cur, acquire))) {
342 if (unlikely(_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
343 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
344 }
345 _dispatch_thread_switch(tid_cur, flags, timeout++);
346 }
347 }
348 #endif
349
350 void
351 _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul,
352 dispatch_lock tid_cur)
353 {
354 dispatch_lock_owner tid_self = _dispatch_tid_self();
355 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
356 DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread");
357 }
358
359 #if HAVE_UL_UNFAIR_LOCK
360 if (!(tid_cur & DLOCK_NOWAITERS_BIT)) {
361 _dispatch_unfair_lock_wake(&dul->dul_lock, 0);
362 }
363 #elif HAVE_FUTEX
364 // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS
365 _dispatch_futex_unlock_pi(&dul->dul_lock, FUTEX_PRIVATE_FLAG);
366 #else
367 (void)dul;
368 #endif
369 }
370
371 #pragma mark - gate lock
372
373 void
374 _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value,
375 dispatch_lock_options_t flags)
376 {
377 dispatch_lock tid_self = _dispatch_tid_self(), tid_old, tid_new;
378 uint32_t timeout = 1;
379
380 for (;;) {
381 os_atomic_rmw_loop(&dgl->dgl_lock, tid_old, tid_new, acquire, {
382 if (likely(tid_old == value)) {
383 os_atomic_rmw_loop_give_up_with_fence(acquire, return);
384 }
385 #ifdef DLOCK_NOWAITERS_BIT
386 tid_new = tid_old & ~DLOCK_NOWAITERS_BIT;
387 #else
388 tid_new = tid_old | DLOCK_WAITERS_BIT;
389 #endif
390 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
391 });
392 if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) {
393 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
394 }
395 #if HAVE_UL_UNFAIR_LOCK
396 _dispatch_unfair_lock_wait(&dgl->dgl_lock, tid_new, 0, flags);
397 #elif HAVE_FUTEX
398 _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG);
399 #else
400 _dispatch_thread_switch(tid_new, flags, timeout++);
401 #endif
402 (void)timeout;
403 }
404 }
405
406 void
407 _dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock tid_cur)
408 {
409 dispatch_lock_owner tid_self = _dispatch_tid_self();
410 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
411 DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread");
412 }
413
414 #if HAVE_UL_UNFAIR_LOCK
415 _dispatch_unfair_lock_wake(&dgl->dgl_lock, ULF_WAKE_ALL);
416 #elif HAVE_FUTEX
417 _dispatch_futex_wake(&dgl->dgl_lock, INT_MAX, FUTEX_PRIVATE_FLAG);
418 #else
419 (void)dgl;
420 #endif
421 }