2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 #define _dlock_syscall_switch(err, syscall, ...) \
26 switch ((err = ((syscall) < 0 ? errno : 0))) { \
27 case EINTR: continue; \
34 _Static_assert(DLOCK_LOCK_DATA_CONTENTION
== ULF_WAIT_WORKQ_DATA_CONTENTION
,
35 "values should be the same");
37 #if !HAVE_UL_UNFAIR_LOCK
38 DISPATCH_ALWAYS_INLINE
40 _dispatch_thread_switch(dispatch_lock value
, dispatch_lock_options_t flags
,
44 if (flags
& DLOCK_LOCK_DATA_CONTENTION
) {
45 option
= SWITCH_OPTION_OSLOCK_DEPRESS
;
47 option
= SWITCH_OPTION_DEPRESS
;
49 thread_switch(_dispatch_lock_owner(value
), option
, timeout
);
51 #endif // HAVE_UL_UNFAIR_LOCK
54 #pragma mark - semaphores
57 #if __has_include(<os/semaphore_private.h>)
58 #include <os/semaphore_private.h>
59 #define DISPATCH_USE_OS_SEMAPHORE_CACHE 1
61 #define DISPATCH_USE_OS_SEMAPHORE_CACHE 0
64 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
65 DISPATCH_VERIFY_MIG(x); \
66 if (unlikely((x) == KERN_INVALID_NAME)) { \
67 DISPATCH_CLIENT_CRASH((x), \
68 "Use-after-free of dispatch_semaphore_t or dispatch_group_t"); \
69 } else if (unlikely(x)) { \
70 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
75 _dispatch_sema4_create_slow(_dispatch_sema4_t
*s4
, int policy
)
77 semaphore_t tmp
= MACH_PORT_NULL
;
79 _dispatch_fork_becomes_unsafe();
81 // lazily allocate the semaphore port
84 // 1) Switch to a doubly-linked FIFO in user-space.
85 // 2) User-space timers for the timeout.
87 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
88 if (policy
== _DSEMA4_POLICY_FIFO
) {
89 tmp
= (_dispatch_sema4_t
)os_get_cached_semaphore();
90 if (!os_atomic_cmpxchg(s4
, MACH_PORT_NULL
, tmp
, relaxed
)) {
91 os_put_cached_semaphore((os_semaphore_t
)tmp
);
97 kern_return_t kr
= semaphore_create(mach_task_self(), &tmp
, policy
, 0);
98 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
100 if (!os_atomic_cmpxchg(s4
, MACH_PORT_NULL
, tmp
, relaxed
)) {
101 kr
= semaphore_destroy(mach_task_self(), tmp
);
102 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
107 _dispatch_sema4_dispose_slow(_dispatch_sema4_t
*sema
, int policy
)
109 semaphore_t sema_port
= *sema
;
110 *sema
= MACH_PORT_DEAD
;
111 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
112 if (policy
== _DSEMA4_POLICY_FIFO
) {
113 return os_put_cached_semaphore((os_semaphore_t
)sema_port
);
116 kern_return_t kr
= semaphore_destroy(mach_task_self(), sema_port
);
117 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
121 _dispatch_sema4_signal(_dispatch_sema4_t
*sema
, long count
)
124 kern_return_t kr
= semaphore_signal(*sema
);
125 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
130 _dispatch_sema4_wait(_dispatch_sema4_t
*sema
)
134 kr
= semaphore_wait(*sema
);
135 } while (kr
== KERN_ABORTED
);
136 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
140 _dispatch_sema4_timedwait(_dispatch_sema4_t
*sema
, dispatch_time_t timeout
)
142 mach_timespec_t _timeout
;
146 uint64_t nsec
= _dispatch_timeout(timeout
);
147 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
148 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
149 kr
= slowpath(semaphore_timedwait(*sema
, _timeout
));
150 } while (kr
== KERN_ABORTED
);
152 if (kr
== KERN_OPERATION_TIMED_OUT
) {
155 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
159 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
160 if (unlikely((x) == -1)) { \
161 DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
166 _dispatch_sema4_init(_dispatch_sema4_t
*sema
, int policy DISPATCH_UNUSED
)
168 int rc
= sem_init(sema
, 0, 0);
169 DISPATCH_SEMAPHORE_VERIFY_RET(rc
);
173 _dispatch_sema4_dispose_slow(_dispatch_sema4_t
*sema
, int policy DISPATCH_UNUSED
)
175 int rc
= sem_destroy(sema
);
176 DISPATCH_SEMAPHORE_VERIFY_RET(rc
);
180 _dispatch_sema4_signal(_dispatch_sema4_t
*sema
, long count
)
183 int ret
= sem_post(sema
);
184 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
189 _dispatch_sema4_wait(_dispatch_sema4_t
*sema
)
191 int ret
= sem_wait(sema
);
192 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
196 _dispatch_sema4_timedwait(_dispatch_sema4_t
*sema
, dispatch_time_t timeout
)
198 struct timespec _timeout
;
202 uint64_t nsec
= _dispatch_time_nanoseconds_since_epoch(timeout
);
203 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
204 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
205 ret
= slowpath(sem_timedwait(sema
, &_timeout
));
206 } while (ret
== -1 && errno
== EINTR
);
208 if (ret
== -1 && errno
== ETIMEDOUT
) {
211 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
215 // rdar://problem/8428132
216 static DWORD best_resolution
= 1; // 1ms
219 _push_timer_resolution(DWORD ms
)
222 static dispatch_once_t once
;
225 // only update timer resolution if smaller than default 15.6ms
226 // zero means not updated
230 // aim for the best resolution we can accomplish
231 dispatch_once(&once
, ^{
234 res
= timeGetDevCaps(&tc
, sizeof(tc
));
235 if (res
== MMSYSERR_NOERROR
) {
236 best_resolution
= min(max(tc
.wPeriodMin
, best_resolution
),
241 res
= timeBeginPeriod(best_resolution
);
242 if (res
== TIMERR_NOERROR
) {
243 return best_resolution
;
245 // zero means not updated
249 // match ms parameter to result from _push_timer_resolution
250 DISPATCH_ALWAYS_INLINE
252 _pop_timer_resolution(DWORD ms
)
254 if (ms
) timeEndPeriod(ms
);
258 _dispatch_sema4_create_slow(_dispatch_sema4_t
*s4
, int policy DISPATCH_UNUSED
)
262 // lazily allocate the semaphore port
264 while (!dispatch_assume(tmp
= CreateSemaphore(NULL
, 0, LONG_MAX
, NULL
))) {
265 _dispatch_temporary_resource_shortage();
268 if (!os_atomic_cmpxchg(s4
, 0, tmp
, relaxed
)) {
274 _dispatch_sema4_dispose_slow(_dispatch_sema4_t
*sema
, int policy DISPATCH_UNUSED
)
276 HANDLE sema_handle
= *sema
;
277 CloseHandle(sema_handle
);
282 _dispatch_sema4_signal(_dispatch_sema4_t
*sema
, long count
)
284 int ret
= ReleaseSemaphore(*sema
, count
, NULL
);
285 dispatch_assume(ret
);
289 _dispatch_sema4_wait(_dispatch_sema4_t
*sema
)
291 WaitForSingleObject(*sema
, INFINITE
);
295 _dispatch_sema4_timedwait(_dispatch_sema4_t
*sema
, dispatch_time_t timeout
)
302 nsec
= _dispatch_timeout(timeout
);
303 msec
= (DWORD
)(nsec
/ (uint64_t)1000000);
304 resolution
= _push_timer_resolution(msec
);
305 wait_result
= WaitForSingleObject(dsema
->dsema_handle
, msec
);
306 _pop_timer_resolution(resolution
);
307 return wait_result
== WAIT_TIMEOUT
;
310 #error "port has to implement _dispatch_sema4_t"
313 #pragma mark - ulock wrappers
314 #if HAVE_UL_COMPARE_AND_WAIT
317 _dispatch_ulock_wait(uint32_t *uaddr
, uint32_t val
, uint32_t timeout
,
321 _dlock_syscall_switch(err
,
322 rc
= __ulock_wait(UL_COMPARE_AND_WAIT
| flags
, uaddr
, val
, timeout
),
323 case 0: return rc
> 0 ? ENOTEMPTY
: 0;
324 case ETIMEDOUT
: case EFAULT
: return err
;
325 case EOWNERDEAD
: DISPATCH_CLIENT_CRASH(*uaddr
,
326 "corruption of lock owner");
327 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wait() failed");
332 _dispatch_ulock_wake(uint32_t *uaddr
, uint32_t flags
)
334 _dlock_syscall_switch(err
,
335 __ulock_wake(UL_COMPARE_AND_WAIT
| flags
, uaddr
, 0),
336 case 0: case ENOENT
: break;
337 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wake() failed");
342 #if HAVE_UL_UNFAIR_LOCK
344 // returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT
346 _dispatch_unfair_lock_wait(uint32_t *uaddr
, uint32_t val
, uint32_t timeout
,
347 dispatch_lock_options_t flags
)
350 _dlock_syscall_switch(err
,
351 rc
= __ulock_wait(UL_UNFAIR_LOCK
| flags
, uaddr
, val
, timeout
),
352 case 0: return rc
> 0 ? ENOTEMPTY
: 0;
353 case ETIMEDOUT
: case EFAULT
: return err
;
354 case EOWNERDEAD
: DISPATCH_CLIENT_CRASH(*uaddr
,
355 "corruption of lock owner");
356 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wait() failed");
361 _dispatch_unfair_lock_wake(uint32_t *uaddr
, uint32_t flags
)
363 _dlock_syscall_switch(err
, __ulock_wake(UL_UNFAIR_LOCK
| flags
, uaddr
, 0),
364 case 0: case ENOENT
: break;
365 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wake() failed");
370 #pragma mark - futex wrappers
372 #include <sys/time.h>
374 #include <sys/syscall.h>
377 #endif /* __ANDROID__ */
379 DISPATCH_ALWAYS_INLINE
381 _dispatch_futex(uint32_t *uaddr
, int op
, uint32_t val
,
382 const struct timespec
*timeout
, uint32_t *uaddr2
, uint32_t val3
,
385 return (int)syscall(SYS_futex
, uaddr
, op
| opflags
, val
, timeout
, uaddr2
, val3
);
389 _dispatch_futex_wait(uint32_t *uaddr
, uint32_t val
,
390 const struct timespec
*timeout
, int opflags
)
392 _dlock_syscall_switch(err
,
393 _dispatch_futex(uaddr
, FUTEX_WAIT
, val
, timeout
, NULL
, 0, opflags
),
394 case 0: case EWOULDBLOCK
: case ETIMEDOUT
: return err
;
395 default: DISPATCH_CLIENT_CRASH(err
, "futex_wait() failed");
400 _dispatch_futex_wake(uint32_t *uaddr
, int wake
, int opflags
)
403 _dlock_syscall_switch(err
,
404 rc
= _dispatch_futex(uaddr
, FUTEX_WAKE
, (uint32_t)wake
, NULL
, NULL
, 0, opflags
),
406 default: DISPATCH_CLIENT_CRASH(err
, "futex_wake() failed");
411 _dispatch_futex_lock_pi(uint32_t *uaddr
, struct timespec
*timeout
, int detect
,
414 _dlock_syscall_switch(err
,
415 _dispatch_futex(uaddr
, FUTEX_LOCK_PI
, (uint32_t)detect
, timeout
,
418 default: DISPATCH_CLIENT_CRASH(errno
, "futex_lock_pi() failed");
423 _dispatch_futex_unlock_pi(uint32_t *uaddr
, int opflags
)
425 _dlock_syscall_switch(err
,
426 _dispatch_futex(uaddr
, FUTEX_UNLOCK_PI
, 0, NULL
, NULL
, 0, opflags
),
428 default: DISPATCH_CLIENT_CRASH(errno
, "futex_unlock_pi() failed");
433 #pragma mark - wait for address
436 _dispatch_wait_on_address(uint32_t volatile *address
, uint32_t value
,
437 dispatch_lock_options_t flags
)
439 #if HAVE_UL_COMPARE_AND_WAIT
440 _dispatch_ulock_wait((uint32_t *)address
, value
, 0, flags
);
442 _dispatch_futex_wait((uint32_t *)address
, value
, NULL
, FUTEX_PRIVATE_FLAG
);
444 mach_msg_timeout_t timeout
= 1;
445 while (os_atomic_load(address
, relaxed
) == value
) {
446 thread_switch(MACH_PORT_NULL
, SWITCH_OPTION_WAIT
, timeout
++);
453 _dispatch_wake_by_address(uint32_t volatile *address
)
455 #if HAVE_UL_COMPARE_AND_WAIT
456 _dispatch_ulock_wake((uint32_t *)address
, ULF_WAKE_ALL
);
458 _dispatch_futex_wake((uint32_t *)address
, INT_MAX
, FUTEX_PRIVATE_FLAG
);
464 #pragma mark - thread event
467 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte
)
469 #if HAVE_UL_COMPARE_AND_WAIT
470 _dispatch_ulock_wake(&dte
->dte_value
, 0);
472 _dispatch_futex_wake(&dte
->dte_value
, 1, FUTEX_PRIVATE_FLAG
);
474 _dispatch_sema4_signal(&dte
->dte_sema
, 1);
479 _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte
)
481 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
483 uint32_t value
= os_atomic_load(&dte
->dte_value
, acquire
);
484 if (likely(value
== 0)) return;
485 if (unlikely(value
!= UINT32_MAX
)) {
486 DISPATCH_CLIENT_CRASH(value
, "Corrupt thread event value");
488 #if HAVE_UL_COMPARE_AND_WAIT
489 int rc
= _dispatch_ulock_wait(&dte
->dte_value
, UINT32_MAX
, 0, 0);
490 dispatch_assert(rc
== 0 || rc
== EFAULT
);
492 _dispatch_futex_wait(&dte
->dte_value
, UINT32_MAX
,
493 NULL
, FUTEX_PRIVATE_FLAG
);
497 _dispatch_sema4_wait(&dte
->dte_sema
);
501 #pragma mark - unfair lock
503 #if HAVE_UL_UNFAIR_LOCK
505 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
506 dispatch_lock_options_t flags
)
508 dispatch_lock value_self
= _dispatch_lock_value_for_self();
509 dispatch_lock old_value
, new_value
, next
= value_self
;
513 os_atomic_rmw_loop(&dul
->dul_lock
, old_value
, new_value
, acquire
, {
514 if (likely(!_dispatch_lock_is_locked(old_value
))) {
517 new_value
= old_value
| DLOCK_WAITERS_BIT
;
518 if (new_value
== old_value
) os_atomic_rmw_loop_give_up(break);
521 if (unlikely(_dispatch_lock_is_locked_by(old_value
, value_self
))) {
522 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
524 if (new_value
== next
) {
527 rc
= _dispatch_unfair_lock_wait(&dul
->dul_lock
, new_value
, 0, flags
);
528 if (rc
== ENOTEMPTY
) {
529 next
= value_self
| DLOCK_WAITERS_BIT
;
537 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
538 dispatch_lock_options_t flags
)
541 _dispatch_futex_lock_pi(&dul
->dul_lock
, NULL
, 1, FUTEX_PRIVATE_FLAG
);
545 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
546 dispatch_lock_options_t flags
)
548 dispatch_lock cur
, value_self
= _dispatch_lock_value_for_self();
549 uint32_t timeout
= 1;
551 while (unlikely(!os_atomic_cmpxchgv(&dul
->dul_lock
,
552 DLOCK_OWNER_NULL
, value_self
, &cur
, acquire
))) {
553 if (unlikely(_dispatch_lock_is_locked_by(cur
, self
))) {
554 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
556 _dispatch_thread_switch(cur
, flags
, timeout
++);
562 _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul
, dispatch_lock cur
)
564 if (unlikely(!_dispatch_lock_is_locked_by_self(cur
))) {
565 DISPATCH_CLIENT_CRASH(cur
, "lock not owned by current thread");
568 #if HAVE_UL_UNFAIR_LOCK
569 if (_dispatch_lock_has_waiters(cur
)) {
570 _dispatch_unfair_lock_wake(&dul
->dul_lock
, 0);
573 // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS
574 _dispatch_futex_unlock_pi(&dul
->dul_lock
, FUTEX_PRIVATE_FLAG
);
580 #pragma mark - gate lock
583 _dispatch_gate_wait_slow(dispatch_gate_t dgl
, dispatch_lock value
,
584 dispatch_lock_options_t flags
)
586 dispatch_lock self
= _dispatch_lock_value_for_self();
587 dispatch_lock old_value
, new_value
;
588 uint32_t timeout
= 1;
591 os_atomic_rmw_loop(&dgl
->dgl_lock
, old_value
, new_value
, acquire
, {
592 if (likely(old_value
== value
)) {
593 os_atomic_rmw_loop_give_up_with_fence(acquire
, return);
595 new_value
= old_value
| DLOCK_WAITERS_BIT
;
596 if (new_value
== old_value
) os_atomic_rmw_loop_give_up(break);
598 if (unlikely(_dispatch_lock_is_locked_by(old_value
, self
))) {
599 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
601 #if HAVE_UL_UNFAIR_LOCK
602 _dispatch_unfair_lock_wait(&dgl
->dgl_lock
, new_value
, 0, flags
);
604 _dispatch_futex_wait(&dgl
->dgl_lock
, new_value
, NULL
, FUTEX_PRIVATE_FLAG
);
606 _dispatch_thread_switch(new_value
, flags
, timeout
++);
614 _dispatch_gate_broadcast_slow(dispatch_gate_t dgl
, dispatch_lock cur
)
616 if (unlikely(!_dispatch_lock_is_locked_by_self(cur
))) {
617 DISPATCH_CLIENT_CRASH(cur
, "lock not owned by current thread");
620 #if HAVE_UL_UNFAIR_LOCK
621 _dispatch_unfair_lock_wake(&dgl
->dgl_lock
, ULF_WAKE_ALL
);
623 _dispatch_futex_wake(&dgl
->dgl_lock
, INT_MAX
, FUTEX_PRIVATE_FLAG
);