]> git.saurik.com Git - apple/libdispatch.git/blob - src/shims/lock.c
libdispatch-913.1.6.tar.gz
[apple/libdispatch.git] / src / shims / lock.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 #define _dlock_syscall_switch(err, syscall, ...) \
24 for (;;) { \
25 int err; \
26 switch ((err = ((syscall) < 0 ? errno : 0))) { \
27 case EINTR: continue; \
28 __VA_ARGS__ \
29 } \
30 break; \
31 }
32
33 #if TARGET_OS_MAC
34 _Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION,
35 "values should be the same");
36
37 #if !HAVE_UL_UNFAIR_LOCK
38 DISPATCH_ALWAYS_INLINE
39 static inline void
40 _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags,
41 uint32_t timeout)
42 {
43 int option;
44 if (flags & DLOCK_LOCK_DATA_CONTENTION) {
45 option = SWITCH_OPTION_OSLOCK_DEPRESS;
46 } else {
47 option = SWITCH_OPTION_DEPRESS;
48 }
49 thread_switch(_dispatch_lock_owner(value), option, timeout);
50 }
51 #endif // HAVE_UL_UNFAIR_LOCK
52 #endif
53
54 #pragma mark - semaphores
55
56 #if USE_MACH_SEM
57 #if __has_include(<os/semaphore_private.h>)
58 #include <os/semaphore_private.h>
59 #define DISPATCH_USE_OS_SEMAPHORE_CACHE 1
60 #else
61 #define DISPATCH_USE_OS_SEMAPHORE_CACHE 0
62 #endif
63
64 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
65 DISPATCH_VERIFY_MIG(x); \
66 if (unlikely((x) == KERN_INVALID_NAME)) { \
67 DISPATCH_CLIENT_CRASH((x), \
68 "Use-after-free of dispatch_semaphore_t or dispatch_group_t"); \
69 } else if (unlikely(x)) { \
70 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
71 } \
72 } while (0)
73
74 void
75 _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy)
76 {
77 semaphore_t tmp = MACH_PORT_NULL;
78
79 _dispatch_fork_becomes_unsafe();
80
81 // lazily allocate the semaphore port
82
83 // Someday:
84 // 1) Switch to a doubly-linked FIFO in user-space.
85 // 2) User-space timers for the timeout.
86
87 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
88 if (policy == _DSEMA4_POLICY_FIFO) {
89 tmp = (_dispatch_sema4_t)os_get_cached_semaphore();
90 if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
91 os_put_cached_semaphore((os_semaphore_t)tmp);
92 }
93 return;
94 }
95 #endif
96
97 kern_return_t kr = semaphore_create(mach_task_self(), &tmp, policy, 0);
98 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
99
100 if (!os_atomic_cmpxchg(s4, MACH_PORT_NULL, tmp, relaxed)) {
101 kr = semaphore_destroy(mach_task_self(), tmp);
102 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
103 }
104 }
105
106 void
107 _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy)
108 {
109 semaphore_t sema_port = *sema;
110 *sema = MACH_PORT_DEAD;
111 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
112 if (policy == _DSEMA4_POLICY_FIFO) {
113 return os_put_cached_semaphore((os_semaphore_t)sema_port);
114 }
115 #endif
116 kern_return_t kr = semaphore_destroy(mach_task_self(), sema_port);
117 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
118 }
119
120 void
121 _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
122 {
123 do {
124 kern_return_t kr = semaphore_signal(*sema);
125 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
126 } while (--count);
127 }
128
129 void
130 _dispatch_sema4_wait(_dispatch_sema4_t *sema)
131 {
132 kern_return_t kr;
133 do {
134 kr = semaphore_wait(*sema);
135 } while (kr == KERN_ABORTED);
136 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
137 }
138
139 bool
140 _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
141 {
142 mach_timespec_t _timeout;
143 kern_return_t kr;
144
145 do {
146 uint64_t nsec = _dispatch_timeout(timeout);
147 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
148 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
149 kr = slowpath(semaphore_timedwait(*sema, _timeout));
150 } while (kr == KERN_ABORTED);
151
152 if (kr == KERN_OPERATION_TIMED_OUT) {
153 return true;
154 }
155 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
156 return false;
157 }
158 #elif USE_POSIX_SEM
159 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
160 if (unlikely((x) == -1)) { \
161 DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
162 } \
163 } while (0)
164
165 void
166 _dispatch_sema4_init(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
167 {
168 int rc = sem_init(sema, 0, 0);
169 DISPATCH_SEMAPHORE_VERIFY_RET(rc);
170 }
171
172 void
173 _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
174 {
175 int rc = sem_destroy(sema);
176 DISPATCH_SEMAPHORE_VERIFY_RET(rc);
177 }
178
179 void
180 _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
181 {
182 do {
183 int ret = sem_post(sema);
184 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
185 } while (--count);
186 }
187
188 void
189 _dispatch_sema4_wait(_dispatch_sema4_t *sema)
190 {
191 int ret = sem_wait(sema);
192 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
193 }
194
195 bool
196 _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
197 {
198 struct timespec _timeout;
199 int ret;
200
201 do {
202 uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
203 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
204 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
205 ret = slowpath(sem_timedwait(sema, &_timeout));
206 } while (ret == -1 && errno == EINTR);
207
208 if (ret == -1 && errno == ETIMEDOUT) {
209 return true;
210 }
211 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
212 return false;
213 }
214 #elif USE_WIN32_SEM
215 // rdar://problem/8428132
216 static DWORD best_resolution = 1; // 1ms
217
218 static DWORD
219 _push_timer_resolution(DWORD ms)
220 {
221 MMRESULT res;
222 static dispatch_once_t once;
223
224 if (ms > 16) {
225 // only update timer resolution if smaller than default 15.6ms
226 // zero means not updated
227 return 0;
228 }
229
230 // aim for the best resolution we can accomplish
231 dispatch_once(&once, ^{
232 TIMECAPS tc;
233 MMRESULT res;
234 res = timeGetDevCaps(&tc, sizeof(tc));
235 if (res == MMSYSERR_NOERROR) {
236 best_resolution = min(max(tc.wPeriodMin, best_resolution),
237 tc.wPeriodMax);
238 }
239 });
240
241 res = timeBeginPeriod(best_resolution);
242 if (res == TIMERR_NOERROR) {
243 return best_resolution;
244 }
245 // zero means not updated
246 return 0;
247 }
248
249 // match ms parameter to result from _push_timer_resolution
250 DISPATCH_ALWAYS_INLINE
251 static inline void
252 _pop_timer_resolution(DWORD ms)
253 {
254 if (ms) timeEndPeriod(ms);
255 }
256
257 void
258 _dispatch_sema4_create_slow(_dispatch_sema4_t *s4, int policy DISPATCH_UNUSED)
259 {
260 HANDLE tmp;
261
262 // lazily allocate the semaphore port
263
264 while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) {
265 _dispatch_temporary_resource_shortage();
266 }
267
268 if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
269 CloseHandle(tmp);
270 }
271 }
272
273 void
274 _dispatch_sema4_dispose_slow(_dispatch_sema4_t *sema, int policy DISPATCH_UNUSED)
275 {
276 HANDLE sema_handle = *sema;
277 CloseHandle(sema_handle);
278 *sema = 0;
279 }
280
281 void
282 _dispatch_sema4_signal(_dispatch_sema4_t *sema, long count)
283 {
284 int ret = ReleaseSemaphore(*sema, count, NULL);
285 dispatch_assume(ret);
286 }
287
288 void
289 _dispatch_sema4_wait(_dispatch_sema4_t *sema)
290 {
291 WaitForSingleObject(*sema, INFINITE);
292 }
293
294 bool
295 _dispatch_sema4_timedwait(_dispatch_sema4_t *sema, dispatch_time_t timeout)
296 {
297 uint64_t nsec;
298 DWORD msec;
299 DWORD resolution;
300 DWORD wait_result;
301
302 nsec = _dispatch_timeout(timeout);
303 msec = (DWORD)(nsec / (uint64_t)1000000);
304 resolution = _push_timer_resolution(msec);
305 wait_result = WaitForSingleObject(dsema->dsema_handle, msec);
306 _pop_timer_resolution(resolution);
307 return wait_result == WAIT_TIMEOUT;
308 }
309 #else
310 #error "port has to implement _dispatch_sema4_t"
311 #endif
312
313 #pragma mark - ulock wrappers
314 #if HAVE_UL_COMPARE_AND_WAIT
315
316 static int
317 _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
318 uint32_t flags)
319 {
320 int rc;
321 _dlock_syscall_switch(err,
322 rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout),
323 case 0: return rc > 0 ? ENOTEMPTY : 0;
324 case ETIMEDOUT: case EFAULT: return err;
325 case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr,
326 "corruption of lock owner");
327 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
328 );
329 }
330
331 static void
332 _dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags)
333 {
334 _dlock_syscall_switch(err,
335 __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0),
336 case 0: case ENOENT: break;
337 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed");
338 );
339 }
340
341 #endif
342 #if HAVE_UL_UNFAIR_LOCK
343
344 // returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT
345 static int
346 _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
347 dispatch_lock_options_t flags)
348 {
349 int rc;
350 _dlock_syscall_switch(err,
351 rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout),
352 case 0: return rc > 0 ? ENOTEMPTY : 0;
353 case ETIMEDOUT: case EFAULT: return err;
354 case EOWNERDEAD: DISPATCH_CLIENT_CRASH(*uaddr,
355 "corruption of lock owner");
356 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
357 );
358 }
359
360 static void
361 _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags)
362 {
363 _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0),
364 case 0: case ENOENT: break;
365 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed");
366 );
367 }
368
369 #endif
370 #pragma mark - futex wrappers
371 #if HAVE_FUTEX
372 #include <sys/time.h>
373 #ifdef __ANDROID__
374 #include <sys/syscall.h>
375 #else
376 #include <syscall.h>
377 #endif /* __ANDROID__ */
378
379 DISPATCH_ALWAYS_INLINE
380 static inline int
381 _dispatch_futex(uint32_t *uaddr, int op, uint32_t val,
382 const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3,
383 int opflags)
384 {
385 return (int)syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3);
386 }
387
388 static int
389 _dispatch_futex_wait(uint32_t *uaddr, uint32_t val,
390 const struct timespec *timeout, int opflags)
391 {
392 _dlock_syscall_switch(err,
393 _dispatch_futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags),
394 case 0: case EWOULDBLOCK: case ETIMEDOUT: return err;
395 default: DISPATCH_CLIENT_CRASH(err, "futex_wait() failed");
396 );
397 }
398
399 static void
400 _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags)
401 {
402 int rc;
403 _dlock_syscall_switch(err,
404 rc = _dispatch_futex(uaddr, FUTEX_WAKE, (uint32_t)wake, NULL, NULL, 0, opflags),
405 case 0: return;
406 default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed");
407 );
408 }
409
410 static void
411 _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect,
412 int opflags)
413 {
414 _dlock_syscall_switch(err,
415 _dispatch_futex(uaddr, FUTEX_LOCK_PI, (uint32_t)detect, timeout,
416 NULL, 0, opflags),
417 case 0: return;
418 default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed");
419 );
420 }
421
422 static void
423 _dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags)
424 {
425 _dlock_syscall_switch(err,
426 _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags),
427 case 0: return;
428 default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed");
429 );
430 }
431
432 #endif
433 #pragma mark - wait for address
434
435 void
436 _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value,
437 dispatch_lock_options_t flags)
438 {
439 #if HAVE_UL_COMPARE_AND_WAIT
440 _dispatch_ulock_wait((uint32_t *)address, value, 0, flags);
441 #elif HAVE_FUTEX
442 _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_PRIVATE_FLAG);
443 #else
444 mach_msg_timeout_t timeout = 1;
445 while (os_atomic_load(address, relaxed) == value) {
446 thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, timeout++);
447 }
448 #endif
449 (void)flags;
450 }
451
452 void
453 _dispatch_wake_by_address(uint32_t volatile *address)
454 {
455 #if HAVE_UL_COMPARE_AND_WAIT
456 _dispatch_ulock_wake((uint32_t *)address, ULF_WAKE_ALL);
457 #elif HAVE_FUTEX
458 _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_PRIVATE_FLAG);
459 #else
460 (void)address;
461 #endif
462 }
463
464 #pragma mark - thread event
465
466 void
467 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte)
468 {
469 #if HAVE_UL_COMPARE_AND_WAIT
470 _dispatch_ulock_wake(&dte->dte_value, 0);
471 #elif HAVE_FUTEX
472 _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG);
473 #else
474 _dispatch_sema4_signal(&dte->dte_sema, 1);
475 #endif
476 }
477
478 void
479 _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte)
480 {
481 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
482 for (;;) {
483 uint32_t value = os_atomic_load(&dte->dte_value, acquire);
484 if (likely(value == 0)) return;
485 if (unlikely(value != UINT32_MAX)) {
486 DISPATCH_CLIENT_CRASH(value, "Corrupt thread event value");
487 }
488 #if HAVE_UL_COMPARE_AND_WAIT
489 int rc = _dispatch_ulock_wait(&dte->dte_value, UINT32_MAX, 0, 0);
490 dispatch_assert(rc == 0 || rc == EFAULT);
491 #elif HAVE_FUTEX
492 _dispatch_futex_wait(&dte->dte_value, UINT32_MAX,
493 NULL, FUTEX_PRIVATE_FLAG);
494 #endif
495 }
496 #else
497 _dispatch_sema4_wait(&dte->dte_sema);
498 #endif
499 }
500
501 #pragma mark - unfair lock
502
503 #if HAVE_UL_UNFAIR_LOCK
504 void
505 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
506 dispatch_lock_options_t flags)
507 {
508 dispatch_lock value_self = _dispatch_lock_value_for_self();
509 dispatch_lock old_value, new_value, next = value_self;
510 int rc;
511
512 for (;;) {
513 os_atomic_rmw_loop(&dul->dul_lock, old_value, new_value, acquire, {
514 if (likely(!_dispatch_lock_is_locked(old_value))) {
515 new_value = next;
516 } else {
517 new_value = old_value | DLOCK_WAITERS_BIT;
518 if (new_value == old_value) os_atomic_rmw_loop_give_up(break);
519 }
520 });
521 if (unlikely(_dispatch_lock_is_locked_by(old_value, value_self))) {
522 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
523 }
524 if (new_value == next) {
525 return;
526 }
527 rc = _dispatch_unfair_lock_wait(&dul->dul_lock, new_value, 0, flags);
528 if (rc == ENOTEMPTY) {
529 next = value_self | DLOCK_WAITERS_BIT;
530 } else {
531 next = value_self;
532 }
533 }
534 }
535 #elif HAVE_FUTEX
536 void
537 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
538 dispatch_lock_options_t flags)
539 {
540 (void)flags;
541 _dispatch_futex_lock_pi(&dul->dul_lock, NULL, 1, FUTEX_PRIVATE_FLAG);
542 }
543 #else
544 void
545 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
546 dispatch_lock_options_t flags)
547 {
548 dispatch_lock cur, value_self = _dispatch_lock_value_for_self();
549 uint32_t timeout = 1;
550
551 while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock,
552 DLOCK_OWNER_NULL, value_self, &cur, acquire))) {
553 if (unlikely(_dispatch_lock_is_locked_by(cur, self))) {
554 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
555 }
556 _dispatch_thread_switch(cur, flags, timeout++);
557 }
558 }
559 #endif
560
561 void
562 _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul, dispatch_lock cur)
563 {
564 if (unlikely(!_dispatch_lock_is_locked_by_self(cur))) {
565 DISPATCH_CLIENT_CRASH(cur, "lock not owned by current thread");
566 }
567
568 #if HAVE_UL_UNFAIR_LOCK
569 if (_dispatch_lock_has_waiters(cur)) {
570 _dispatch_unfair_lock_wake(&dul->dul_lock, 0);
571 }
572 #elif HAVE_FUTEX
573 // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS
574 _dispatch_futex_unlock_pi(&dul->dul_lock, FUTEX_PRIVATE_FLAG);
575 #else
576 (void)dul;
577 #endif
578 }
579
580 #pragma mark - gate lock
581
582 void
583 _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value,
584 dispatch_lock_options_t flags)
585 {
586 dispatch_lock self = _dispatch_lock_value_for_self();
587 dispatch_lock old_value, new_value;
588 uint32_t timeout = 1;
589
590 for (;;) {
591 os_atomic_rmw_loop(&dgl->dgl_lock, old_value, new_value, acquire, {
592 if (likely(old_value == value)) {
593 os_atomic_rmw_loop_give_up_with_fence(acquire, return);
594 }
595 new_value = old_value | DLOCK_WAITERS_BIT;
596 if (new_value == old_value) os_atomic_rmw_loop_give_up(break);
597 });
598 if (unlikely(_dispatch_lock_is_locked_by(old_value, self))) {
599 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
600 }
601 #if HAVE_UL_UNFAIR_LOCK
602 _dispatch_unfair_lock_wait(&dgl->dgl_lock, new_value, 0, flags);
603 #elif HAVE_FUTEX
604 _dispatch_futex_wait(&dgl->dgl_lock, new_value, NULL, FUTEX_PRIVATE_FLAG);
605 #else
606 _dispatch_thread_switch(new_value, flags, timeout++);
607 #endif
608 (void)timeout;
609 (void)flags;
610 }
611 }
612
613 void
614 _dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock cur)
615 {
616 if (unlikely(!_dispatch_lock_is_locked_by_self(cur))) {
617 DISPATCH_CLIENT_CRASH(cur, "lock not owned by current thread");
618 }
619
620 #if HAVE_UL_UNFAIR_LOCK
621 _dispatch_unfair_lock_wake(&dgl->dgl_lock, ULF_WAKE_ALL);
622 #elif HAVE_FUTEX
623 _dispatch_futex_wake(&dgl->dgl_lock, INT_MAX, FUTEX_PRIVATE_FLAG);
624 #else
625 (void)dgl;
626 #endif
627 }