2 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 #define _dlock_syscall_switch(err, syscall, ...) \
26 switch ((err = ((syscall) < 0 ? errno : 0))) { \
27 case EINTR: continue; \
34 _Static_assert(DLOCK_LOCK_DATA_CONTENTION
== ULF_WAIT_WORKQ_DATA_CONTENTION
,
35 "values should be the same");
37 DISPATCH_ALWAYS_INLINE
39 _dispatch_thread_switch(dispatch_lock value
, dispatch_lock_options_t flags
,
43 if (flags
& DLOCK_LOCK_DATA_CONTENTION
) {
44 option
= SWITCH_OPTION_OSLOCK_DEPRESS
;
46 option
= SWITCH_OPTION_DEPRESS
;
48 thread_switch(_dispatch_lock_owner(value
), option
, timeout
);
52 #pragma mark - ulock wrappers
53 #if HAVE_UL_COMPARE_AND_WAIT
56 _dispatch_ulock_wait(uint32_t *uaddr
, uint32_t val
, uint32_t timeout
,
59 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
);
61 _dlock_syscall_switch(err
,
62 rc
= __ulock_wait(UL_COMPARE_AND_WAIT
| flags
, uaddr
, val
, timeout
),
63 case 0: return rc
> 0 ? ENOTEMPTY
: 0;
64 case ETIMEDOUT
: case EFAULT
: return err
;
65 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wait() failed");
70 _dispatch_ulock_wake(uint32_t *uaddr
, uint32_t flags
)
72 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
);
73 _dlock_syscall_switch(err
,
74 __ulock_wake(UL_COMPARE_AND_WAIT
| flags
, uaddr
, 0),
75 case 0: case ENOENT
: break;
76 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wake() failed");
81 #if HAVE_UL_UNFAIR_LOCK
83 // returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT
85 _dispatch_unfair_lock_wait(uint32_t *uaddr
, uint32_t val
, uint32_t timeout
,
86 dispatch_lock_options_t flags
)
88 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
89 // <rdar://problem/25075359>
90 timeout
= timeout
< 1000 ? 1 : timeout
/ 1000;
91 _dispatch_thread_switch(val
, flags
, timeout
);
95 _dlock_syscall_switch(err
,
96 rc
= __ulock_wait(UL_UNFAIR_LOCK
| flags
, uaddr
, val
, timeout
),
97 case 0: return rc
> 0 ? ENOTEMPTY
: 0;
98 case ETIMEDOUT
: case EFAULT
: return err
;
99 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wait() failed");
104 _dispatch_unfair_lock_wake(uint32_t *uaddr
, uint32_t flags
)
106 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
107 // <rdar://problem/25075359>
110 _dlock_syscall_switch(err
, __ulock_wake(UL_UNFAIR_LOCK
| flags
, uaddr
, 0),
111 case 0: case ENOENT
: break;
112 default: DISPATCH_INTERNAL_CRASH(err
, "ulock_wake() failed");
117 #pragma mark - futex wrappers
119 #include <sys/time.h>
122 DISPATCH_ALWAYS_INLINE
124 _dispatch_futex(uint32_t *uaddr
, int op
, uint32_t val
,
125 const struct timespec
*timeout
, uint32_t *uaddr2
, uint32_t val3
,
128 return syscall(SYS_futex
, uaddr
, op
| opflags
, val
, timeout
, uaddr2
, val3
);
132 _dispatch_futex_wait(uint32_t *uaddr
, uint32_t val
,
133 const struct timespec
*timeout
, int opflags
)
135 _dlock_syscall_switch(err
,
136 _dispatch_futex(uaddr
, FUTEX_WAIT
, val
, timeout
, NULL
, 0, opflags
),
137 case 0: case EWOULDBLOCK
: case ETIMEDOUT
: return err
;
138 default: DISPATCH_CLIENT_CRASH(err
, "futex_wait() failed");
143 _dispatch_futex_wake(uint32_t *uaddr
, int wake
, int opflags
)
146 _dlock_syscall_switch(err
,
147 rc
= _dispatch_futex(uaddr
, FUTEX_WAKE
, wake
, NULL
, NULL
, 0, opflags
),
149 default: DISPATCH_CLIENT_CRASH(err
, "futex_wake() failed");
154 _dispatch_futex_lock_pi(uint32_t *uaddr
, struct timespec
*timeout
, int detect
,
157 _dlock_syscall_switch(err
,
158 _dispatch_futex(uaddr
, FUTEX_LOCK_PI
, detect
, timeout
,
161 default: DISPATCH_CLIENT_CRASH(errno
, "futex_lock_pi() failed");
166 _dispatch_futex_unlock_pi(uint32_t *uaddr
, int opflags
)
168 _dlock_syscall_switch(err
,
169 _dispatch_futex(uaddr
, FUTEX_UNLOCK_PI
, 0, NULL
, NULL
, 0, opflags
),
171 default: DISPATCH_CLIENT_CRASH(errno
, "futex_unlock_pi() failed");
176 #pragma mark - wait for address
179 _dispatch_wait_on_address(uint32_t volatile *address
, uint32_t value
,
180 dispatch_lock_options_t flags
)
182 #if HAVE_UL_COMPARE_AND_WAIT
183 _dispatch_ulock_wait((uint32_t *)address
, value
, 0, flags
);
185 _dispatch_futex_wait((uint32_t *)address
, value
, NULL
, FUTEX_PRIVATE_FLAG
);
187 mach_msg_timeout_t timeout
= 1;
188 while (os_atomic_load(address
, relaxed
) == value
) {
189 thread_switch(MACH_PORT_NULL
, SWITCH_OPTION_WAIT
, timeout
++);
196 _dispatch_wake_by_address(uint32_t volatile *address
)
198 #if HAVE_UL_COMPARE_AND_WAIT
199 _dispatch_ulock_wake((uint32_t *)address
, ULF_WAKE_ALL
);
201 _dispatch_futex_wake((uint32_t *)address
, INT_MAX
, FUTEX_PRIVATE_FLAG
);
207 #pragma mark - thread event
209 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
211 _dispatch_thread_semaphore_create(void)
215 while (unlikely(kr
= semaphore_create(mach_task_self(), &s4
,
216 SYNC_POLICY_FIFO
, 0))) {
217 DISPATCH_VERIFY_MIG(kr
);
218 _dispatch_temporary_resource_shortage();
224 _dispatch_thread_semaphore_dispose(void *ctxt
)
226 semaphore_t s4
= (semaphore_t
)(uintptr_t)ctxt
;
227 kern_return_t kr
= semaphore_destroy(mach_task_self(), s4
);
228 DISPATCH_VERIFY_MIG(kr
);
229 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
234 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte
)
236 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
237 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
238 kern_return_t kr
= semaphore_signal(dte
->dte_semaphore
);
239 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
243 #if HAVE_UL_COMPARE_AND_WAIT
244 _dispatch_ulock_wake(&dte
->dte_value
, 0);
246 _dispatch_futex_wake(&dte
->dte_value
, 1, FUTEX_PRIVATE_FLAG
);
248 int rc
= sem_post(&dte
->dte_sem
);
249 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
254 _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte
)
256 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
257 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
) {
260 kr
= semaphore_wait(dte
->dte_semaphore
);
261 } while (unlikely(kr
== KERN_ABORTED
));
262 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
266 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
268 uint32_t value
= os_atomic_load(&dte
->dte_value
, acquire
);
269 if (likely(value
== 0)) return;
270 if (unlikely(value
!= UINT32_MAX
)) {
271 DISPATCH_CLIENT_CRASH(value
, "Corrupt thread event value");
273 #if HAVE_UL_COMPARE_AND_WAIT
274 int rc
= _dispatch_ulock_wait(&dte
->dte_value
, UINT32_MAX
, 0, 0);
275 dispatch_assert(rc
== 0 || rc
== EFAULT
);
277 _dispatch_futex_wait(&dte
->dte_value
, UINT32_MAX
,
278 NULL
, FUTEX_PRIVATE_FLAG
);
284 rc
= sem_wait(&dte
->dte_sem
);
285 } while (unlikely(rc
!= 0));
286 DISPATCH_SEMAPHORE_VERIFY_RET(rc
);
290 #pragma mark - unfair lock
292 #if HAVE_UL_UNFAIR_LOCK
294 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
295 dispatch_lock_options_t flags
)
297 dispatch_lock tid_self
= _dispatch_tid_self(), next
= tid_self
;
298 dispatch_lock tid_old
, tid_new
;
302 os_atomic_rmw_loop(&dul
->dul_lock
, tid_old
, tid_new
, acquire
, {
303 if (likely(!_dispatch_lock_is_locked(tid_old
))) {
306 tid_new
= tid_old
& ~DLOCK_NOWAITERS_BIT
;
307 if (tid_new
== tid_old
) os_atomic_rmw_loop_give_up(break);
310 if (unlikely(_dispatch_lock_is_locked_by(tid_old
, tid_self
))) {
311 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
313 if (tid_new
== next
) {
316 rc
= _dispatch_unfair_lock_wait(&dul
->dul_lock
, tid_new
, 0, flags
);
317 if (rc
== ENOTEMPTY
) {
318 next
= tid_self
& ~DLOCK_NOWAITERS_BIT
;
326 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
327 dispatch_lock_options_t flags
)
330 _dispatch_futex_lock_pi(&dul
->dul_lock
, NULL
, 1, FUTEX_PRIVATE_FLAG
);
334 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul
,
335 dispatch_lock_options_t flags
)
337 dispatch_lock tid_cur
, tid_self
= _dispatch_tid_self();
338 uint32_t timeout
= 1;
340 while (unlikely(!os_atomic_cmpxchgv(&dul
->dul_lock
,
341 DLOCK_OWNER_NULL
, tid_self
, &tid_cur
, acquire
))) {
342 if (unlikely(_dispatch_lock_is_locked_by(tid_cur
, tid_self
))) {
343 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
345 _dispatch_thread_switch(tid_cur
, flags
, timeout
++);
351 _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul
,
352 dispatch_lock tid_cur
)
354 dispatch_lock_owner tid_self
= _dispatch_tid_self();
355 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur
, tid_self
))) {
356 DISPATCH_CLIENT_CRASH(tid_cur
, "lock not owned by current thread");
359 #if HAVE_UL_UNFAIR_LOCK
360 if (!(tid_cur
& DLOCK_NOWAITERS_BIT
)) {
361 _dispatch_unfair_lock_wake(&dul
->dul_lock
, 0);
364 // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS
365 _dispatch_futex_unlock_pi(&dul
->dul_lock
, FUTEX_PRIVATE_FLAG
);
371 #pragma mark - gate lock
374 _dispatch_gate_wait_slow(dispatch_gate_t dgl
, dispatch_lock value
,
375 dispatch_lock_options_t flags
)
377 dispatch_lock tid_self
= _dispatch_tid_self(), tid_old
, tid_new
;
378 uint32_t timeout
= 1;
381 os_atomic_rmw_loop(&dgl
->dgl_lock
, tid_old
, tid_new
, acquire
, {
382 if (likely(tid_old
== value
)) {
383 os_atomic_rmw_loop_give_up_with_fence(acquire
, return);
385 #ifdef DLOCK_NOWAITERS_BIT
386 tid_new
= tid_old
& ~DLOCK_NOWAITERS_BIT
;
388 tid_new
= tid_old
| DLOCK_WAITERS_BIT
;
390 if (tid_new
== tid_old
) os_atomic_rmw_loop_give_up(break);
392 if (unlikely(_dispatch_lock_is_locked_by(tid_old
, tid_self
))) {
393 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
395 #if HAVE_UL_UNFAIR_LOCK
396 _dispatch_unfair_lock_wait(&dgl
->dgl_lock
, tid_new
, 0, flags
);
398 _dispatch_futex_wait(&dgl
->dgl_lock
, tid_new
, NULL
, FUTEX_PRIVATE_FLAG
);
400 _dispatch_thread_switch(tid_new
, flags
, timeout
++);
407 _dispatch_gate_broadcast_slow(dispatch_gate_t dgl
, dispatch_lock tid_cur
)
409 dispatch_lock_owner tid_self
= _dispatch_tid_self();
410 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur
, tid_self
))) {
411 DISPATCH_CLIENT_CRASH(tid_cur
, "lock not owned by current thread");
414 #if HAVE_UL_UNFAIR_LOCK
415 _dispatch_unfair_lock_wake(&dgl
->dgl_lock
, ULF_WAKE_ALL
);
417 _dispatch_futex_wake(&dgl
->dgl_lock
, INT_MAX
, FUTEX_PRIVATE_FLAG
);