]> git.saurik.com Git - apple/libdispatch.git/blob - src/shims/lock.c
libdispatch-703.50.37.tar.gz
[apple/libdispatch.git] / src / shims / lock.c
1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 #define _dlock_syscall_switch(err, syscall, ...) \
24 for (;;) { \
25 int err; \
26 switch ((err = ((syscall) < 0 ? errno : 0))) { \
27 case EINTR: continue; \
28 __VA_ARGS__ \
29 } \
30 break; \
31 }
32
33 #if TARGET_OS_MAC
34 _Static_assert(DLOCK_LOCK_DATA_CONTENTION == ULF_WAIT_WORKQ_DATA_CONTENTION,
35 "values should be the same");
36
37 DISPATCH_ALWAYS_INLINE
38 static inline void
39 _dispatch_thread_switch(dispatch_lock value, dispatch_lock_options_t flags,
40 uint32_t timeout)
41 {
42 int option;
43 if (flags & DLOCK_LOCK_DATA_CONTENTION) {
44 option = SWITCH_OPTION_OSLOCK_DEPRESS;
45 } else {
46 option = SWITCH_OPTION_DEPRESS;
47 }
48 thread_switch(_dispatch_lock_owner(value), option, timeout);
49 }
50 #endif
51
52 #pragma mark - semaphores
53
54 #if USE_MACH_SEM
55 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
56 if (unlikely((x) == KERN_INVALID_NAME)) { \
57 DISPATCH_CLIENT_CRASH((x), "Use-after-free of dispatch_semaphore_t"); \
58 } else if (unlikely(x)) { \
59 DISPATCH_INTERNAL_CRASH((x), "mach semaphore API failure"); \
60 } \
61 } while (0)
62
63 void
64 _os_semaphore_create_slow(_os_semaphore_t *s4, int policy)
65 {
66 kern_return_t kr;
67 semaphore_t tmp;
68
69 _dispatch_fork_becomes_unsafe();
70
71 // lazily allocate the semaphore port
72
73 // Someday:
74 // 1) Switch to a doubly-linked FIFO in user-space.
75 // 2) User-space timers for the timeout.
76 // 3) Use the per-thread semaphore port.
77
78 while ((kr = semaphore_create(mach_task_self(), &tmp, policy, 0))) {
79 DISPATCH_VERIFY_MIG(kr);
80 _dispatch_temporary_resource_shortage();
81 }
82
83 if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
84 kr = semaphore_destroy(mach_task_self(), tmp);
85 DISPATCH_VERIFY_MIG(kr);
86 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
87 }
88 }
89
90 void
91 _os_semaphore_dispose_slow(_os_semaphore_t *sema)
92 {
93 kern_return_t kr;
94 semaphore_t sema_port = *sema;
95 kr = semaphore_destroy(mach_task_self(), sema_port);
96 DISPATCH_VERIFY_MIG(kr);
97 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
98 *sema = MACH_PORT_DEAD;
99 }
100
101 void
102 _os_semaphore_signal(_os_semaphore_t *sema, long count)
103 {
104 do {
105 kern_return_t kr = semaphore_signal(*sema);
106 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
107 } while (--count);
108 }
109
110 void
111 _os_semaphore_wait(_os_semaphore_t *sema)
112 {
113 kern_return_t kr;
114 do {
115 kr = semaphore_wait(*sema);
116 } while (kr == KERN_ABORTED);
117 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
118 }
119
120 bool
121 _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
122 {
123 mach_timespec_t _timeout;
124 kern_return_t kr;
125
126 do {
127 uint64_t nsec = _dispatch_timeout(timeout);
128 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
129 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
130 kr = slowpath(semaphore_timedwait(*sema, _timeout));
131 } while (kr == KERN_ABORTED);
132
133 if (kr == KERN_OPERATION_TIMED_OUT) {
134 return true;
135 }
136 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
137 return false;
138 }
139 #elif USE_POSIX_SEM
140 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
141 if (unlikely((x) == -1)) { \
142 DISPATCH_INTERNAL_CRASH(errno, "POSIX semaphore API failure"); \
143 } \
144 } while (0)
145
146 void
147 _os_semaphore_init(_os_semaphore_t *sema, int policy DISPATCH_UNUSED)
148 {
149 int rc = sem_init(sema, 0, 0);
150 DISPATCH_SEMAPHORE_VERIFY_RET(rc);
151 }
152
153 void
154 _os_semaphore_dispose_slow(_os_semaphore_t *sema)
155 {
156 int rc = sem_destroy(sema);
157 DISPATCH_SEMAPHORE_VERIFY_RET(rc);
158 }
159
160 void
161 _os_semaphore_signal(_os_semaphore_t *sema, long count)
162 {
163 do {
164 int ret = sem_post(sema);
165 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
166 } while (--count);
167 }
168
169 void
170 _os_semaphore_wait(_os_semaphore_t *sema)
171 {
172 int ret = sem_wait(sema);
173 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
174 }
175
176 bool
177 _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
178 {
179 struct timespec _timeout;
180 int ret;
181
182 do {
183 uint64_t nsec = _dispatch_time_nanoseconds_since_epoch(timeout);
184 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
185 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
186 ret = slowpath(sem_timedwait(sema, &_timeout));
187 } while (ret == -1 && errno == EINTR);
188
189 if (ret == -1 && errno == ETIMEDOUT) {
190 return true;
191 }
192 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
193 return false;
194 }
195 #elif USE_WIN32_SEM
196 // rdar://problem/8428132
197 static DWORD best_resolution = 1; // 1ms
198
199 static DWORD
200 _push_timer_resolution(DWORD ms)
201 {
202 MMRESULT res;
203 static dispatch_once_t once;
204
205 if (ms > 16) {
206 // only update timer resolution if smaller than default 15.6ms
207 // zero means not updated
208 return 0;
209 }
210
211 // aim for the best resolution we can accomplish
212 dispatch_once(&once, ^{
213 TIMECAPS tc;
214 MMRESULT res;
215 res = timeGetDevCaps(&tc, sizeof(tc));
216 if (res == MMSYSERR_NOERROR) {
217 best_resolution = min(max(tc.wPeriodMin, best_resolution),
218 tc.wPeriodMax);
219 }
220 });
221
222 res = timeBeginPeriod(best_resolution);
223 if (res == TIMERR_NOERROR) {
224 return best_resolution;
225 }
226 // zero means not updated
227 return 0;
228 }
229
230 // match ms parameter to result from _push_timer_resolution
231 DISPATCH_ALWAYS_INLINE
232 static inline void
233 _pop_timer_resolution(DWORD ms)
234 {
235 if (ms) timeEndPeriod(ms);
236 }
237
238 void
239 _os_semaphore_create_slow(_os_semaphore_t *s4, int policy DISPATCH_UNUSED)
240 {
241 HANDLE tmp;
242
243 // lazily allocate the semaphore port
244
245 while (!dispatch_assume(tmp = CreateSemaphore(NULL, 0, LONG_MAX, NULL))) {
246 _dispatch_temporary_resource_shortage();
247 }
248
249 if (!os_atomic_cmpxchg(s4, 0, tmp, relaxed)) {
250 CloseHandle(tmp);
251 }
252 }
253
254 void
255 _os_semaphore_dispose_slow(_os_semaphore_t *sema)
256 {
257 HANDLE sema_handle = *sema;
258 CloseHandle(sema_handle);
259 *sema = 0;
260 }
261
262 void
263 _os_semaphore_signal(_os_semaphore_t *sema, long count)
264 {
265 int ret = ReleaseSemaphore(*sema, count, NULL);
266 dispatch_assume(ret);
267 }
268
269 void
270 _os_semaphore_wait(_os_semaphore_t *sema)
271 {
272 WaitForSingleObject(*sema, INFINITE);
273 }
274
275 bool
276 _os_semaphore_timedwait(_os_semaphore_t *sema, dispatch_time_t timeout)
277 {
278 uint64_t nsec;
279 DWORD msec;
280 DWORD resolution;
281 DWORD wait_result;
282
283 nsec = _dispatch_timeout(timeout);
284 msec = (DWORD)(nsec / (uint64_t)1000000);
285 resolution = _push_timer_resolution(msec);
286 wait_result = WaitForSingleObject(dsema->dsema_handle, msec);
287 _pop_timer_resolution(resolution);
288 return wait_result == WAIT_TIMEOUT;
289 }
290 #else
291 #error "port has to implement _os_semaphore_t"
292 #endif
293
294 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
295 semaphore_t
296 _dispatch_thread_semaphore_create(void)
297 {
298 semaphore_t s4;
299 kern_return_t kr;
300 while (unlikely(kr = semaphore_create(mach_task_self(), &s4,
301 SYNC_POLICY_FIFO, 0))) {
302 DISPATCH_VERIFY_MIG(kr);
303 _dispatch_temporary_resource_shortage();
304 }
305 return s4;
306 }
307
308 void
309 _dispatch_thread_semaphore_dispose(void *ctxt)
310 {
311 semaphore_t s4 = (semaphore_t)(uintptr_t)ctxt;
312 kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
313 DISPATCH_VERIFY_MIG(kr);
314 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
315 }
316 #endif
317
318 #pragma mark - ulock wrappers
319 #if HAVE_UL_COMPARE_AND_WAIT
320
321 static int
322 _dispatch_ulock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
323 uint32_t flags)
324 {
325 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK);
326 int rc;
327 _dlock_syscall_switch(err,
328 rc = __ulock_wait(UL_COMPARE_AND_WAIT | flags, uaddr, val, timeout),
329 case 0: return rc > 0 ? ENOTEMPTY : 0;
330 case ETIMEDOUT: case EFAULT: return err;
331 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
332 );
333 }
334
335 static void
336 _dispatch_ulock_wake(uint32_t *uaddr, uint32_t flags)
337 {
338 dispatch_assert(!DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK);
339 _dlock_syscall_switch(err,
340 __ulock_wake(UL_COMPARE_AND_WAIT | flags, uaddr, 0),
341 case 0: case ENOENT: break;
342 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed");
343 );
344 }
345
346 #endif
347 #if HAVE_UL_UNFAIR_LOCK
348
349 // returns 0, ETIMEDOUT, ENOTEMPTY, EFAULT
350 static int
351 _dispatch_unfair_lock_wait(uint32_t *uaddr, uint32_t val, uint32_t timeout,
352 dispatch_lock_options_t flags)
353 {
354 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
355 // <rdar://problem/25075359>
356 timeout = timeout < 1000 ? 1 : timeout / 1000;
357 _dispatch_thread_switch(val, flags, timeout);
358 return 0;
359 }
360 int rc;
361 _dlock_syscall_switch(err,
362 rc = __ulock_wait(UL_UNFAIR_LOCK | flags, uaddr, val, timeout),
363 case 0: return rc > 0 ? ENOTEMPTY : 0;
364 case ETIMEDOUT: case EFAULT: return err;
365 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wait() failed");
366 );
367 }
368
369 static void
370 _dispatch_unfair_lock_wake(uint32_t *uaddr, uint32_t flags)
371 {
372 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
373 // <rdar://problem/25075359>
374 return;
375 }
376 _dlock_syscall_switch(err, __ulock_wake(UL_UNFAIR_LOCK | flags, uaddr, 0),
377 case 0: case ENOENT: break;
378 default: DISPATCH_INTERNAL_CRASH(err, "ulock_wake() failed");
379 );
380 }
381
382 #endif
383 #pragma mark - futex wrappers
384 #if HAVE_FUTEX
385 #include <sys/time.h>
386 #include <syscall.h>
387
388 DISPATCH_ALWAYS_INLINE
389 static inline int
390 _dispatch_futex(uint32_t *uaddr, int op, uint32_t val,
391 const struct timespec *timeout, uint32_t *uaddr2, uint32_t val3,
392 int opflags)
393 {
394 return syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3);
395 }
396
397 static int
398 _dispatch_futex_wait(uint32_t *uaddr, uint32_t val,
399 const struct timespec *timeout, int opflags)
400 {
401 _dlock_syscall_switch(err,
402 _dispatch_futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags),
403 case 0: case EWOULDBLOCK: case ETIMEDOUT: return err;
404 default: DISPATCH_CLIENT_CRASH(err, "futex_wait() failed");
405 );
406 }
407
408 static void
409 _dispatch_futex_wake(uint32_t *uaddr, int wake, int opflags)
410 {
411 int rc;
412 _dlock_syscall_switch(err,
413 rc = _dispatch_futex(uaddr, FUTEX_WAKE, wake, NULL, NULL, 0, opflags),
414 case 0: return;
415 default: DISPATCH_CLIENT_CRASH(err, "futex_wake() failed");
416 );
417 }
418
419 static void
420 _dispatch_futex_lock_pi(uint32_t *uaddr, struct timespec *timeout, int detect,
421 int opflags)
422 {
423 _dlock_syscall_switch(err,
424 _dispatch_futex(uaddr, FUTEX_LOCK_PI, detect, timeout,
425 NULL, 0, opflags),
426 case 0: return;
427 default: DISPATCH_CLIENT_CRASH(errno, "futex_lock_pi() failed");
428 );
429 }
430
431 static void
432 _dispatch_futex_unlock_pi(uint32_t *uaddr, int opflags)
433 {
434 _dlock_syscall_switch(err,
435 _dispatch_futex(uaddr, FUTEX_UNLOCK_PI, 0, NULL, NULL, 0, opflags),
436 case 0: return;
437 default: DISPATCH_CLIENT_CRASH(errno, "futex_unlock_pi() failed");
438 );
439 }
440
441 #endif
442 #pragma mark - wait for address
443
444 void
445 _dispatch_wait_on_address(uint32_t volatile *address, uint32_t value,
446 dispatch_lock_options_t flags)
447 {
448 #if HAVE_UL_COMPARE_AND_WAIT
449 _dispatch_ulock_wait((uint32_t *)address, value, 0, flags);
450 #elif HAVE_FUTEX
451 _dispatch_futex_wait((uint32_t *)address, value, NULL, FUTEX_PRIVATE_FLAG);
452 #else
453 mach_msg_timeout_t timeout = 1;
454 while (os_atomic_load(address, relaxed) == value) {
455 thread_switch(MACH_PORT_NULL, SWITCH_OPTION_WAIT, timeout++);
456 }
457 #endif
458 (void)flags;
459 }
460
461 void
462 _dispatch_wake_by_address(uint32_t volatile *address)
463 {
464 #if HAVE_UL_COMPARE_AND_WAIT
465 _dispatch_ulock_wake((uint32_t *)address, ULF_WAKE_ALL);
466 #elif HAVE_FUTEX
467 _dispatch_futex_wake((uint32_t *)address, INT_MAX, FUTEX_PRIVATE_FLAG);
468 #else
469 (void)address;
470 #endif
471 }
472
473 #pragma mark - thread event
474
475 void
476 _dispatch_thread_event_signal_slow(dispatch_thread_event_t dte)
477 {
478 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
479 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
480 kern_return_t kr = semaphore_signal(dte->dte_sema);
481 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
482 return;
483 }
484 #endif
485 #if HAVE_UL_COMPARE_AND_WAIT
486 _dispatch_ulock_wake(&dte->dte_value, 0);
487 #elif HAVE_FUTEX
488 _dispatch_futex_wake(&dte->dte_value, 1, FUTEX_PRIVATE_FLAG);
489 #else
490 _os_semaphore_signal(&dte->dte_sema, 1);
491 #endif
492 }
493
494 void
495 _dispatch_thread_event_wait_slow(dispatch_thread_event_t dte)
496 {
497 #if DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK
498 if (DISPATCH_LOCK_USE_SEMAPHORE_FALLBACK) {
499 kern_return_t kr;
500 do {
501 kr = semaphore_wait(dte->dte_sema);
502 } while (unlikely(kr == KERN_ABORTED));
503 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
504 return;
505 }
506 #endif
507 #if HAVE_UL_COMPARE_AND_WAIT || HAVE_FUTEX
508 for (;;) {
509 uint32_t value = os_atomic_load(&dte->dte_value, acquire);
510 if (likely(value == 0)) return;
511 if (unlikely(value != UINT32_MAX)) {
512 DISPATCH_CLIENT_CRASH(value, "Corrupt thread event value");
513 }
514 #if HAVE_UL_COMPARE_AND_WAIT
515 int rc = _dispatch_ulock_wait(&dte->dte_value, UINT32_MAX, 0, 0);
516 dispatch_assert(rc == 0 || rc == EFAULT);
517 #elif HAVE_FUTEX
518 _dispatch_futex_wait(&dte->dte_value, UINT32_MAX,
519 NULL, FUTEX_PRIVATE_FLAG);
520 #endif
521 }
522 #else
523 _os_semaphore_wait(&dte->dte_sema);
524 #endif
525 }
526
527 #pragma mark - unfair lock
528
529 #if HAVE_UL_UNFAIR_LOCK
530 void
531 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
532 dispatch_lock_options_t flags)
533 {
534 dispatch_lock tid_self = _dispatch_tid_self(), next = tid_self;
535 dispatch_lock tid_old, tid_new;
536 int rc;
537
538 for (;;) {
539 os_atomic_rmw_loop(&dul->dul_lock, tid_old, tid_new, acquire, {
540 if (likely(!_dispatch_lock_is_locked(tid_old))) {
541 tid_new = next;
542 } else {
543 tid_new = tid_old & ~DLOCK_NOWAITERS_BIT;
544 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
545 }
546 });
547 if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) {
548 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
549 }
550 if (tid_new == next) {
551 return;
552 }
553 rc = _dispatch_unfair_lock_wait(&dul->dul_lock, tid_new, 0, flags);
554 if (rc == ENOTEMPTY) {
555 next = tid_self & ~DLOCK_NOWAITERS_BIT;
556 } else {
557 next = tid_self;
558 }
559 }
560 }
561 #elif HAVE_FUTEX
562 void
563 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
564 dispatch_lock_options_t flags)
565 {
566 (void)flags;
567 _dispatch_futex_lock_pi(&dul->dul_lock, NULL, 1, FUTEX_PRIVATE_FLAG);
568 }
569 #else
570 void
571 _dispatch_unfair_lock_lock_slow(dispatch_unfair_lock_t dul,
572 dispatch_lock_options_t flags)
573 {
574 dispatch_lock tid_cur, tid_self = _dispatch_tid_self();
575 uint32_t timeout = 1;
576
577 while (unlikely(!os_atomic_cmpxchgv(&dul->dul_lock,
578 DLOCK_OWNER_NULL, tid_self, &tid_cur, acquire))) {
579 if (unlikely(_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
580 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
581 }
582 _dispatch_thread_switch(tid_cur, flags, timeout++);
583 }
584 }
585 #endif
586
587 void
588 _dispatch_unfair_lock_unlock_slow(dispatch_unfair_lock_t dul,
589 dispatch_lock tid_cur)
590 {
591 dispatch_lock_owner tid_self = _dispatch_tid_self();
592 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
593 DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread");
594 }
595
596 #if HAVE_UL_UNFAIR_LOCK
597 if (!(tid_cur & DLOCK_NOWAITERS_BIT)) {
598 _dispatch_unfair_lock_wake(&dul->dul_lock, 0);
599 }
600 #elif HAVE_FUTEX
601 // futex_unlock_pi() handles both OWNER_DIED which we abuse & WAITERS
602 _dispatch_futex_unlock_pi(&dul->dul_lock, FUTEX_PRIVATE_FLAG);
603 #else
604 (void)dul;
605 #endif
606 }
607
608 #pragma mark - gate lock
609
610 void
611 _dispatch_gate_wait_slow(dispatch_gate_t dgl, dispatch_lock value,
612 dispatch_lock_options_t flags)
613 {
614 dispatch_lock tid_self = _dispatch_tid_self(), tid_old, tid_new;
615 uint32_t timeout = 1;
616
617 for (;;) {
618 os_atomic_rmw_loop(&dgl->dgl_lock, tid_old, tid_new, acquire, {
619 if (likely(tid_old == value)) {
620 os_atomic_rmw_loop_give_up_with_fence(acquire, return);
621 }
622 #ifdef DLOCK_NOWAITERS_BIT
623 tid_new = tid_old & ~DLOCK_NOWAITERS_BIT;
624 #else
625 tid_new = tid_old | DLOCK_WAITERS_BIT;
626 #endif
627 if (tid_new == tid_old) os_atomic_rmw_loop_give_up(break);
628 });
629 if (unlikely(_dispatch_lock_is_locked_by(tid_old, tid_self))) {
630 DISPATCH_CLIENT_CRASH(0, "trying to lock recursively");
631 }
632 #if HAVE_UL_UNFAIR_LOCK
633 _dispatch_unfair_lock_wait(&dgl->dgl_lock, tid_new, 0, flags);
634 #elif HAVE_FUTEX
635 _dispatch_futex_wait(&dgl->dgl_lock, tid_new, NULL, FUTEX_PRIVATE_FLAG);
636 #else
637 _dispatch_thread_switch(tid_new, flags, timeout++);
638 #endif
639 (void)timeout;
640 }
641 }
642
643 void
644 _dispatch_gate_broadcast_slow(dispatch_gate_t dgl, dispatch_lock tid_cur)
645 {
646 dispatch_lock_owner tid_self = _dispatch_tid_self();
647 if (unlikely(!_dispatch_lock_is_locked_by(tid_cur, tid_self))) {
648 DISPATCH_CLIENT_CRASH(tid_cur, "lock not owned by current thread");
649 }
650
651 #if HAVE_UL_UNFAIR_LOCK
652 _dispatch_unfair_lock_wake(&dgl->dgl_lock, ULF_WAKE_ALL);
653 #elif HAVE_FUTEX
654 _dispatch_futex_wake(&dgl->dgl_lock, INT_MAX, FUTEX_PRIVATE_FLAG);
655 #else
656 (void)dgl;
657 #endif
658 }