2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 #define _dlock_syscall_switch(err, syscall, ...) \
26 switch ((err = ((syscall) < 0 ? errno : 0))) { \
27 case EINTR: continue; \
34 _Static_assert(DLOCK_LOCK_DATA_CONTENTION
== ULF_WAIT_WORKQ_DATA_CONTENTION
,
35 "values should be the same");
37 DISPATCH_ALWAYS_INLINE
39 _dispatch_thread_switch(dispatch_lock value
, dispatch_lock_options_t flags
,
43 if (flags
& DLOCK_LOCK_DATA_CONTENTION
) {
44 option
= SWITCH_OPTION_OSLOCK_DEPRESS
;
46 option
= SWITCH_OPTION_DEPRESS
;
48 thread_switch(_dispatch_lock_owner(value
), option
, timeout
);
52 #pragma mark - semaphores
55 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
56 if (unlikely((x) == KERN_INVALID_NAME)) { \
57 DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
58 } else if (unlikely(x)) { \
59 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
64 _os_semaphore_create_slow(_os_semaphore_t
*s4
, int policy
)
69 _dispatch_fork_becomes_unsafe();
71 // lazily allocate the semaphore port
74 // 1) Switch to a doubly-linked FIFO in user-space.
75 // 2) User-space timers for the timeout.
76 // 3) Use the per-thread semaphore port.
78 while ((kr
= semaphore_create(mach_task_self(), &tmp
, policy
, 0))) {
79 DISPATCH_VERIFY_MIG(kr
);
80 _dispatch_temporary_resource_shortage();
83 if (!os_atomic_cmpxchg(s4
, 0, tmp
, relaxed
)) {
84 kr
= semaphore_destroy(mach_task_self(), tmp
);
85 DISPATCH_VERIFY_MIG(kr
);
86 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
91 _os_semaphore_dispose_slow(_os_semaphore_t
*sema
)
94 semaphore_t sema_port
= *sema
;
95 kr
= semaphore_destroy(mach_task_self(), sema_port
);
96 DISPATCH_VERIFY_MIG(kr
);
97 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
98 *sema
= MACH_PORT_DEAD
;
102 _os_semaphore_signal(_os_semaphore_t
*sema
, long count
)
105 kern_return_t kr
= semaphore_signal(*sema
);
106 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
111 _os_semaphore_wait(_os_semaphore_t
*sema
)
115 kr
= semaphore_wait(*sema
);
116 } while (kr
== KERN_ABORTED
);
117 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
121 _os_semaphore_timedwait(_os_semaphore_t
*sema
, dispatch_time_t timeout
)
123 mach_timespec_t _timeout
;
127 uint64_t nsec
= _dispatch_timeout(timeout
);
128 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
129 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
130 kr
= slowpath(semaphore_timedwait(*sema
, _timeout
));
131 } while (kr
== KERN_ABORTED
);
133 if (kr
== KERN_OPERATION_TIMED_OUT
) {
136 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
140 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
141 if (unlikely((x) == -1)) { \
142 DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
147 _os_semaphore_init(_os_semaphore_t
*sema
, int policy DISPATCH_UNUSED
)
149 int rc
= sem_init(sema
, 0, 0);
150 DISPATCH_SEMAPHORE_VERIFY_RET(rc
);
154 _os_semaphore_dispose_slow(_os_semaphore_t
*sema
)
156 int rc
= sem_destroy(sema
);
157 DISPATCH_SEMAPHORE_VERIFY_RET(rc
);
161 _os_semaphore_signal(_os_semaphore_t
*sema
, long count
)
164 int ret
= sem_post(sema
);
165 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
170 _os_semaphore_wait(_os_semaphore_t
*sema
)
172 int ret
= sem_wait(sema
);
173 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
177 _os_semaphore_timedwait(_os_semaphore_t
*sema
, dispatch_time_t timeout
)
179 struct timespec _timeout
;
183 uint64_t nsec
= _dispatch_time_nanoseconds_since_epoch(timeout
);
184 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
185 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
186 ret
= slowpath(sem_timedwait(sema
, &_timeout
));
187 } while (ret
== -1 && errno
== EINTR
);
189 if (ret
== -1 && errno
== ETIMEDOUT
) {
192 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
196 // rdar://problem/8428132
197 static DWORD best_resolution
= 1; // 1ms
200 _push_timer_resolution(DWORD ms
)
203 static dispatch_once_t once
;
206 // only update timer resolution if smaller than default 15.6ms
207 // zero means not updated
211 // aim for the best resolution we can accomplish
212 dispatch_once(&once
, ^{
215 res
= timeGetDevCaps(&tc
, sizeof(tc
));
216 if (res
== MMSYSERR_NOERROR
) {
217 best_resolution
= min(max(tc
.wPeriodMin
, best_resolution
),
222 res
= timeBeginPeriod(best_resolution
);
223 if (res
== TIMERR_NOERROR
) {
224 return best_resolution
;
226 // zero means not updated
230 // match ms parameter to result from _push_timer_resolution
231 DISPATCH_ALWAYS_INLINE
233 _pop_timer_resolution(DWORD ms
)
235 if (ms
) timeEndPeriod(ms
);
239 _os_semaphore_create_slow(_os_semaphore_t
*s4
, int policy DISPATCH_UNUSED
)
243 // lazily allocate the semaphore port
245 while (!dispatch_assume(tmp
= CreateSemaphore(NULL
, 0, LONG_MAX
, NULL
))) {
246 _dispatch_temporary_resource_shortage();
249 if (!os_atomic_cmpxchg(s4
, 0, tmp
, relaxed
)) {
255 _os_semaphore_dispose_slow(_os_semaphore_t
*sema
)
257 HANDLE sema_handle
= *sema
;
258 CloseHandle(sema_handle
);
263 _os_semaphore_signal(_os_semaphore_t
*sema
, long count
)
265 int ret
= ReleaseSemaphore(*sema
, count
, NULL
);
266 dispatch_assume(ret
);
270 _os_semaphore_wait(_os_semaphore_t
*sema
)
272 WaitForSingleObject(*sema
, INFINITE
);
276 _os_semaphore_timedwait(_os_semaphore_t
*sema
, dispatch_time_t timeout
)
283 nsec
= _dispatch_timeout(timeout
);
284 msec
= (DWORD
)(nsec
/ (uint64_t)1000000);
285 resolution
= _push_timer_resolution(msec
);
286 wait_result
= WaitForSingleObject(dsema
->dsema_handle
, msec
);
287 _pop_timer_resolution(resolution
);
288 return wait_result
== WAIT_TIMEOUT
;
291 #error "port has to implement _os_semaphore_t"
294 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
296 _dispatch_thread_semaphore_create(void)
300 while (unlikely(kr
= semaphore_create(mach_task_self(), &s4
,
301 SYNC_POLICY_FIFO
, 0))) {
302 DISPATCH_VERIFY_MIG(kr
);
303 _dispatch_temporary_resource_shortage();
309 _dispatch_thread_semaphore_dispose(void *ctxt
)
311 semaphore_t s4
= (semaphore_t
)(uintptr_t)ctxt
;
312 kern_return_t kr
= semaphore_destroy(mach_task_self(), s4
);
313 DISPATCH_VERIFY_MIG(kr
);
314 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
318 #pragma mark - ulock wrappers
319 #if HAVE_UL_COMPARE_AND_WAIT
322 _dispatch_ulock_wait(uint32_t *uaddr
, uint32_t val
, uint32_t timeout
,
325 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
);
327 _dlock_syscall_switch(err
,
328 rc
= __ulock_wait(UL_COMPARE_AND_WAIT
| flags
, uaddr
, val
, timeout
),
329 case 0: return rc
> 0 ? ENOTEMPTY
: 0;
330 case ETIMEDOUT
: case EFAULT
: return err
;
331 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wait() failed");
336 _dispatch_ulock_wake(uint32_t *uaddr
, uint32_t flags
)
338 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
);
339 _dlock_syscall_switch(err
,
340 __ulock_wake(UL_COMPARE_AND_WAIT
| flags
, uaddr
, 0),
341 case 0: case ENOENT
: break;
342 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wake() failed");
347 #if HAVE_UL_UNFAIR_LOCK
349 // returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT
351 _dispatch_unfair_lock_wait(uint32_t *uaddr
, uint32_t val
, uint32_t timeout
,
352 dispatch_lock_options_t flags
)
354 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
355 // <rdar://problem/25075359>
356 timeout
= timeout
< 1000 ? 1 : timeout
/ 1000;
357 _dispatch_thread_switch(val
, flags
, timeout
);
361 _dlock_syscall_switch(err
,
362 rc
= __ulock_wait(UL_UNFAIR_LOCK
| flags
, uaddr
, val
, timeout
),
363 case 0: return rc
> 0 ? ENOTEMPTY
: 0;
364 case ETIMEDOUT
: case EFAULT
: return err
;
365 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wait() failed");
370 _dispatch_unfair_lock_wake(uint32_t *uaddr
, uint32_t flags
)
372 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
373 // <rdar://problem/25075359>
376 _dlock_syscall_switch(err
, __ulock_wake(UL_UNFAIR_LOCK
| flags
, uaddr
, 0),
377 case 0: case ENOENT
: break;
378 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wake() failed");
383 #pragma mark - futex wrappers
385 #include <sys/time.h>
388 DISPATCH_ALWAYS_INLINE
390 _dispatch_futex(uint32_t *uaddr
, int op
, uint32_t val
,
391 const struct timespec
*timeout
, uint32_t *uaddr2
, uint32_t val3
,
394 return syscall(SYS_futex
, uaddr
, op
| opflags
, val
, timeout
, uaddr2
, val3
);
398 _dispatch_futex_wait(uint32_t *uaddr
, uint32_t val
,
399 const struct timespec
*timeout
, int opflags
)
401 _dlock_syscall_switch(err
,
402 _dispatch_futex(uaddr
, FUTEX_WAIT
, val
, timeout
, NULL
, 0, opflags
),
403 case 0: case EWOULDBLOCK
: case ETIMEDOUT
: return err
;
404 default: DISPATCH_CLIENT_CRASH(err
, "futex_wait() failed");
409 _dispatch_futex_wake(uint32_t *uaddr
, int wake
, int opflags
)
412 _dlock_syscall_switch(err
,
413 rc
= _dispatch_futex(uaddr
, FUTEX_WAKE
, wake
, NULL
, NULL
, 0, opflags
),
415 default: DISPATCH_CLIENT_CRASH(err
, "futex_wake() failed");
420 _dispatch_futex_lock_pi(uint32_t *uaddr
, struct timespec
*timeout
, int detect
,
423 _dlock_syscall_switch(err
,
424 _dispatch_futex(uaddr
, FUTEX_LOCK_PI
, detect
, timeout
,
427 default: DISPATCH_CLIENT_CRASH(errno
, "futex_lock_pi() failed");
432 _dispatch_futex_unlock_pi(uint32_t *uaddr
, int opflags
)
434 _dlock_syscall_switch(err
,
435 _dispatch_futex(uaddr
, FUTEX_UNLOCK_PI
, 0, NULL
, NULL
, 0, opflags
),
437 default: DISPATCH_CLIENT_CRASH(errno
, "futex_unlock_pi() failed");
442 #pragma mark - wait for address
445 _dispatch_wait_on_address(uint32_t volatile *address
, uint32_t value
,
446 dispatch_lock_options_t flags
)
448 #if HAVE_UL_COMPARE_AND_WAIT
449 _dispatch_ulock_wait((uint32_t *)address
, value
, 0, flags
);
451 _dispatch_futex_wait((uint32_t *)address
, value
, NULL
, FUTEX_PRIVATE_FLAG
);
453 mach_msg_timeout_t timeout
= 1;
454 while (os_atomic_load(address
, relaxed
) == value
) {
455 thread_switch(MACH_PORT_NULL
, SWITCH_OPTION_WAIT
, timeout
++);
462 _dispatch_wake_by_address(uint32_t volatile *address
)
464 #if HAVE_UL_COMPARE_AND_WAIT
465 _dispatch_ulock_wake((uint32_t *)address
, ULF_WAKE_ALL
);
467 _dispatch_futex_wake((uint32_t *)address
, INT_MAX
, FUTEX_PRIVATE_FLAG
);
473 #pragma mark - thread event
476 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte
)
478 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
479 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
480 kern_return_t kr
= semaphore_signal(dte
->dte_sema
);
481 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
485 #if HAVE_UL_COMPARE_AND_WAIT
486 _dispatch_ulock_wake(&dte
->dte_value
, 0);
488 _dispatch_futex_wake(&dte
->dte_value
, 1, FUTEX_PRIVATE_FLAG
);
490 _os_semaphore_signal(&dte
->dte_sema
, 1);
495 _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte
)
497 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
498 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
501 kr
= semaphore_wait(dte
->dte_sema
);
502 } while (unlikely(kr
== KERN_ABORTED
));
503 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
507 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
509 uint32_t value
= os_atomic_load(&dte
->dte_value
, acquire
);
510 if (likely(value
== 0)) return;
511 if (unlikely(value
!= UINT32_MAX
)) {
512 DISPATCH_CLIENT_CRASH(value
, "Corrupt thread event value");
514 #if HAVE_UL_COMPARE_AND_WAIT
515 int rc
= _dispatch_ulock_wait(&dte
->dte_value
, UINT32_MAX
, 0, 0);
516 dispatch_assert(rc
== 0 || rc
== EFAULT
);
518 _dispatch_futex_wait(&dte
->dte_value
, UINT32_MAX
,
519 NULL
, FUTEX_PRIVATE_FLAG
);
523 _os_semaphore_wait(&dte
->dte_sema
);
527 #pragma mark - unfair lock
529 #if HAVE_UL_UNFAIR_LOCK
531 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
532 dispatch_lock_options_t flags
)
534 dispatch_lock tid_self
= _dispatch_tid_self(), next
= tid_self
;
535 dispatch_lock tid_old
, tid_new
;
539 os_atomic_rmw_loop(&dul
->dul_lock
, tid_old
, tid_new
, acquire
, {
540 if (likely(!_dispatch_lock_is_locked(tid_old
))) {
543 tid_new
= tid_old
& ~DLOCK_NOWAITERS_BIT
;
544 if (tid_new
== tid_old
) os_atomic_rmw_loop_give_up(break);
547 if (unlikely(_dispatch_lock_is_locked_by(tid_old
, tid_self
))) {
548 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
550 if (tid_new
== next
) {
553 rc
= _dispatch_unfair_lock_wait(&dul
->dul_lock
, tid_new
, 0, flags
);
554 if (rc
== ENOTEMPTY
) {
555 next
= tid_self
& ~DLOCK_NOWAITERS_BIT
;
563 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
564 dispatch_lock_options_t flags
)
567 _dispatch_futex_lock_pi(&dul
->dul_lock
, NULL
, 1, FUTEX_PRIVATE_FLAG
);
571 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
572 dispatch_lock_options_t flags
)
574 dispatch_lock tid_cur
, tid_self
= _dispatch_tid_self();
575 uint32_t timeout
= 1;
577 while (unlikely(!os_atomic_cmpxchgv(&dul
->dul_lock
,
578 DLOCK_OWNER_NULL
, tid_self
, &tid_cur
, acquire
))) {
579 if (unlikely(_dispatch_lock_is_locked_by(tid_cur
, tid_self
))) {
580 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
582 _dispatch_thread_switch(tid_cur
, flags
, timeout
++);
588 _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul
,
589 dispatch_lock tid_cur
)
591 dispatch_lock_owner tid_self
= _dispatch_tid_self();
592 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur
, tid_self
))) {
593 DISPATCH_CLIENT_CRASH(tid_cur
, "lock not owned by current thread");
596 #if HAVE_UL_UNFAIR_LOCK
597 if (!(tid_cur
& DLOCK_NOWAITERS_BIT
)) {
598 _dispatch_unfair_lock_wake(&dul
->dul_lock
, 0);
601 // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS
602 _dispatch_futex_unlock_pi(&dul
->dul_lock
, FUTEX_PRIVATE_FLAG
);
608 #pragma mark - gate lock
611 _dispatch_gate_wait_slow(dispatch_gate_t dgl
, dispatch_lock value
,
612 dispatch_lock_options_t flags
)
614 dispatch_lock tid_self
= _dispatch_tid_self(), tid_old
, tid_new
;
615 uint32_t timeout
= 1;
618 os_atomic_rmw_loop(&dgl
->dgl_lock
, tid_old
, tid_new
, acquire
, {
619 if (likely(tid_old
== value
)) {
620 os_atomic_rmw_loop_give_up_with_fence(acquire
, return);
622 #ifdef DLOCK_NOWAITERS_BIT
623 tid_new
= tid_old
& ~DLOCK_NOWAITERS_BIT
;
625 tid_new
= tid_old
| DLOCK_WAITERS_BIT
;
627 if (tid_new
== tid_old
) os_atomic_rmw_loop_give_up(break);
629 if (unlikely(_dispatch_lock_is_locked_by(tid_old
, tid_self
))) {
630 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
632 #if HAVE_UL_UNFAIR_LOCK
633 _dispatch_unfair_lock_wait(&dgl
->dgl_lock
, tid_new
, 0, flags
);
635 _dispatch_futex_wait(&dgl
->dgl_lock
, tid_new
, NULL
, FUTEX_PRIVATE_FLAG
);
637 _dispatch_thread_switch(tid_new
, flags
, timeout
++);
644 _dispatch_gate_broadcast_slow(dispatch_gate_t dgl
, dispatch_lock tid_cur
)
646 dispatch_lock_owner tid_self
= _dispatch_tid_self();
647 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur
, tid_self
))) {
648 DISPATCH_CLIENT_CRASH(tid_cur
, "lock not owned by current thread");
651 #if HAVE_UL_UNFAIR_LOCK
652 _dispatch_unfair_lock_wake(&dgl
->dgl_lock
, ULF_WAKE_ALL
);
654 _dispatch_futex_wake(&dgl
->dgl_lock
, INT_MAX
, FUTEX_PRIVATE_FLAG
);