2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 // semaphores are too fundamental to use the dispatch_assume*() macros
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
27 DISPATCH_CRASH("flawed group/semaphore logic"); \
31 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
32 if (slowpath((x) == -1)) { \
33 DISPATCH_CRASH("flawed group/semaphore logic"); \
39 // rdar://problem/8428132
40 static DWORD best_resolution
= 1; // 1ms
43 _push_timer_resolution(DWORD ms
)
46 static dispatch_once_t once
;
49 // only update timer resolution if smaller than default 15.6ms
50 // zero means not updated
54 // aim for the best resolution we can accomplish
55 dispatch_once(&once
, ^{
58 res
= timeGetDevCaps(&tc
, sizeof(tc
));
59 if (res
== MMSYSERR_NOERROR
) {
60 best_resolution
= min(max(tc
.wPeriodMin
, best_resolution
),
65 res
= timeBeginPeriod(best_resolution
);
66 if (res
== TIMERR_NOERROR
) {
67 return best_resolution
;
69 // zero means not updated
73 // match ms parameter to result from _push_timer_resolution
75 _pop_timer_resolution(DWORD ms
)
81 #endif /* USE_WIN32_SEM */
84 DISPATCH_WEAK
// rdar://problem/8503746
85 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
);
87 static long _dispatch_group_wake(dispatch_semaphore_t dsema
);
90 #pragma mark dispatch_semaphore_t
93 _dispatch_semaphore_init(long value
, dispatch_object_t dou
)
95 dispatch_semaphore_t dsema
= dou
._dsema
;
97 dsema
->do_next
= (dispatch_semaphore_t
)DISPATCH_OBJECT_LISTLESS
;
98 dsema
->do_targetq
= dispatch_get_global_queue(
99 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
100 dsema
->dsema_value
= value
;
101 dsema
->dsema_orig
= value
;
103 int ret
= sem_init(&dsema
->dsema_sem
, 0, 0);
104 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
109 dispatch_semaphore_create(long value
)
111 dispatch_semaphore_t dsema
;
113 // If the internal value is negative, then the absolute of the value is
114 // equal to the number of waiting threads. Therefore it is bogus to
115 // initialize the semaphore with a negative value.
120 dsema
= (dispatch_semaphore_t
)_dispatch_alloc(DISPATCH_VTABLE(semaphore
),
121 sizeof(struct dispatch_semaphore_s
) -
122 sizeof(dsema
->dsema_notify_head
) -
123 sizeof(dsema
->dsema_notify_tail
));
124 _dispatch_semaphore_init(value
, dsema
);
130 _dispatch_semaphore_create_port(semaphore_t
*s4
)
138 _dispatch_safe_fork
= false;
140 // lazily allocate the semaphore port
143 // 1) Switch to a doubly-linked FIFO in user-space.
144 // 2) User-space timers for the timeout.
145 // 3) Use the per-thread semaphore port.
147 while ((kr
= semaphore_create(mach_task_self(), &tmp
,
148 SYNC_POLICY_FIFO
, 0))) {
149 DISPATCH_VERIFY_MIG(kr
);
150 _dispatch_temporary_resource_shortage();
153 if (!dispatch_atomic_cmpxchg(s4
, 0, tmp
, relaxed
)) {
154 kr
= semaphore_destroy(mach_task_self(), tmp
);
155 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
160 _dispatch_semaphore_create_handle(HANDLE
*s4
)
168 // lazily allocate the semaphore port
170 while (!dispatch_assume(tmp
= CreateSemaphore(NULL
, 0, LONG_MAX
, NULL
))) {
171 _dispatch_temporary_resource_shortage();
174 if (!dispatch_atomic_cmpxchg(s4
, 0, tmp
)) {
181 _dispatch_semaphore_dispose(dispatch_object_t dou
)
183 dispatch_semaphore_t dsema
= dou
._dsema
;
185 if (dsema
->dsema_value
< dsema
->dsema_orig
) {
186 DISPATCH_CLIENT_CRASH(
187 "Semaphore/group object deallocated while in use");
192 if (dsema
->dsema_port
) {
193 kr
= semaphore_destroy(mach_task_self(), dsema
->dsema_port
);
194 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
197 int ret
= sem_destroy(&dsema
->dsema_sem
);
198 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
200 if (dsema
->dsema_handle
) {
201 CloseHandle(dsema
->dsema_handle
);
207 _dispatch_semaphore_debug(dispatch_object_t dou
, char *buf
, size_t bufsiz
)
209 dispatch_semaphore_t dsema
= dou
._dsema
;
212 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "%s[%p] = { ",
213 dx_kind(dsema
), dsema
);
214 offset
+= _dispatch_object_debug_attr(dsema
, &buf
[offset
], bufsiz
- offset
);
216 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "port = 0x%u, ",
219 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
220 "value = %ld, orig = %ld }", dsema
->dsema_value
, dsema
->dsema_orig
);
226 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
)
228 // Before dsema_sent_ksignals is incremented we can rely on the reference
229 // held by the waiter. However, once this value is incremented the waiter
230 // may return between the atomic increment and the semaphore_signal(),
231 // therefore an explicit reference must be held in order to safely access
232 // dsema after the atomic increment.
233 _dispatch_retain(dsema
);
235 #if USE_MACH_SEM || USE_POSIX_SEM
236 (void)dispatch_atomic_inc2o(dsema
, dsema_sent_ksignals
, relaxed
);
240 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
241 kern_return_t kr
= semaphore_signal(dsema
->dsema_port
);
242 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
244 int ret
= sem_post(&dsema
->dsema_sem
);
245 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
247 _dispatch_semaphore_create_handle(&dsema
->dsema_handle
);
248 int ret
= ReleaseSemaphore(dsema
->dsema_handle
, 1, NULL
);
249 dispatch_assume(ret
);
252 _dispatch_release(dsema
);
257 dispatch_semaphore_signal(dispatch_semaphore_t dsema
)
259 long value
= dispatch_atomic_inc2o(dsema
, dsema_value
, release
);
260 if (fastpath(value
> 0)) {
263 if (slowpath(value
== LONG_MIN
)) {
264 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
266 return _dispatch_semaphore_signal_slow(dsema
);
271 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema
,
272 dispatch_time_t timeout
)
277 mach_timespec_t _timeout
;
280 struct timespec _timeout
;
289 #if USE_MACH_SEM || USE_POSIX_SEM
291 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
292 // we keep a parallel count of the number of times a Mach semaphore is
293 // signaled (6880961).
294 orig
= dsema
->dsema_sent_ksignals
;
296 if (dispatch_atomic_cmpxchgvw2o(dsema
, dsema_sent_ksignals
, orig
,
297 orig
- 1, &orig
, relaxed
)) {
304 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
306 _dispatch_semaphore_create_handle(&dsema
->dsema_handle
);
309 // From xnu/osfmk/kern/sync_sema.c:
310 // wait_semaphore->count = -1; /* we don't keep an actual count */
312 // The code above does not match the documentation, and that fact is
313 // not surprising. The documented semantics are clumsy to use in any
314 // practical way. The above hack effectively tricks the rest of the
315 // Mach semaphore logic to behave like the libdispatch algorithm.
321 uint64_t nsec
= _dispatch_timeout(timeout
);
322 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
323 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
324 kr
= slowpath(semaphore_timedwait(dsema
->dsema_port
, _timeout
));
325 } while (kr
== KERN_ABORTED
);
327 if (kr
!= KERN_OPERATION_TIMED_OUT
) {
328 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
333 uint64_t nsec
= _dispatch_timeout(timeout
);
334 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
335 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
336 ret
= slowpath(sem_timedwait(&dsema
->dsema_sem
, &_timeout
));
337 } while (ret
== -1 && errno
== EINTR
);
339 if (ret
== -1 && errno
!= ETIMEDOUT
) {
340 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
344 nsec
= _dispatch_timeout(timeout
);
345 msec
= (DWORD
)(nsec
/ (uint64_t)1000000);
346 resolution
= _push_timer_resolution(msec
);
347 wait_result
= WaitForSingleObject(dsema
->dsema_handle
, msec
);
348 _pop_timer_resolution(resolution
);
349 if (wait_result
!= WAIT_TIMEOUT
) {
353 // Fall through and try to undo what the fast path did to
354 // dsema->dsema_value
355 case DISPATCH_TIME_NOW
:
356 orig
= dsema
->dsema_value
;
358 if (dispatch_atomic_cmpxchgvw2o(dsema
, dsema_value
, orig
, orig
+ 1,
361 return KERN_OPERATION_TIMED_OUT
;
362 #elif USE_POSIX_SEM || USE_WIN32_SEM
368 // Another thread called semaphore_signal().
369 // Fall through and drain the wakeup.
370 case DISPATCH_TIME_FOREVER
:
373 kr
= semaphore_wait(dsema
->dsema_port
);
374 } while (kr
== KERN_ABORTED
);
375 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
378 ret
= sem_wait(&dsema
->dsema_sem
);
380 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
382 WaitForSingleObject(dsema
->dsema_handle
, INFINITE
);
386 #if USE_MACH_SEM || USE_POSIX_SEM
394 dispatch_semaphore_wait(dispatch_semaphore_t dsema
, dispatch_time_t timeout
)
396 long value
= dispatch_atomic_dec2o(dsema
, dsema_value
, acquire
);
397 if (fastpath(value
>= 0)) {
400 return _dispatch_semaphore_wait_slow(dsema
, timeout
);
404 #pragma mark dispatch_group_t
407 dispatch_group_create(void)
409 dispatch_group_t dg
= (dispatch_group_t
)_dispatch_alloc(
410 DISPATCH_VTABLE(group
), sizeof(struct dispatch_semaphore_s
));
411 _dispatch_semaphore_init(LONG_MAX
, dg
);
416 dispatch_group_enter(dispatch_group_t dg
)
418 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
419 long value
= dispatch_atomic_dec2o(dsema
, dsema_value
, acquire
);
420 if (slowpath(value
< 0)) {
421 DISPATCH_CLIENT_CRASH(
422 "Too many nested calls to dispatch_group_enter()");
428 _dispatch_group_wake(dispatch_semaphore_t dsema
)
430 dispatch_continuation_t next
, head
, tail
= NULL
, dc
;
433 head
= dispatch_atomic_xchg2o(dsema
, dsema_notify_head
, NULL
, relaxed
);
435 // snapshot before anything is notified/woken <rdar://problem/8554546>
436 tail
= dispatch_atomic_xchg2o(dsema
, dsema_notify_tail
, NULL
, relaxed
);
438 rval
= (long)dispatch_atomic_xchg2o(dsema
, dsema_group_waiters
, 0, relaxed
);
440 // wake group waiters
442 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
444 kern_return_t kr
= semaphore_signal(dsema
->dsema_port
);
445 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
449 int ret
= sem_post(&dsema
->dsema_sem
);
450 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
453 _dispatch_semaphore_create_handle(&dsema
->dsema_handle
);
455 ret
= ReleaseSemaphore(dsema
->dsema_handle
, rval
, NULL
);
456 dispatch_assume(ret
);
458 #error "No supported semaphore type"
462 // async group notify blocks
464 next
= fastpath(head
->do_next
);
465 if (!next
&& head
!= tail
) {
466 while (!(next
= fastpath(head
->do_next
))) {
467 dispatch_hardware_pause();
470 dispatch_queue_t dsn_queue
= (dispatch_queue_t
)head
->dc_data
;
471 dc
= _dispatch_continuation_free_cacheonly(head
);
472 dispatch_async_f(dsn_queue
, head
->dc_ctxt
, head
->dc_func
);
473 _dispatch_release(dsn_queue
);
475 _dispatch_continuation_free_to_cache_limit(dc
);
477 } while ((head
= next
));
478 _dispatch_release(dsema
);
484 dispatch_group_leave(dispatch_group_t dg
)
486 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
487 long value
= dispatch_atomic_inc2o(dsema
, dsema_value
, release
);
488 if (slowpath(value
< 0)) {
489 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
491 if (slowpath(value
== LONG_MAX
)) {
492 (void)_dispatch_group_wake(dsema
);
498 _dispatch_group_wait_slow(dispatch_semaphore_t dsema
, dispatch_time_t timeout
)
503 mach_timespec_t _timeout
;
505 #elif USE_POSIX_SEM // KVV
506 struct timespec _timeout
;
508 #elif USE_WIN32_SEM // KVV
516 // check before we cause another signal to be sent by incrementing
517 // dsema->dsema_group_waiters
518 if (dsema
->dsema_value
== LONG_MAX
) {
519 return _dispatch_group_wake(dsema
);
521 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
522 // we keep a parallel count of the number of times a Mach semaphore is
523 // signaled (6880961).
524 (void)dispatch_atomic_inc2o(dsema
, dsema_group_waiters
, relaxed
);
525 // check the values again in case we need to wake any threads
526 if (dsema
->dsema_value
== LONG_MAX
) {
527 return _dispatch_group_wake(dsema
);
531 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
533 _dispatch_semaphore_create_handle(&dsema
->dsema_handle
);
536 // From xnu/osfmk/kern/sync_sema.c:
537 // wait_semaphore->count = -1; /* we don't keep an actual count */
539 // The code above does not match the documentation, and that fact is
540 // not surprising. The documented semantics are clumsy to use in any
541 // practical way. The above hack effectively tricks the rest of the
542 // Mach semaphore logic to behave like the libdispatch algorithm.
548 uint64_t nsec
= _dispatch_timeout(timeout
);
549 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
550 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
551 kr
= slowpath(semaphore_timedwait(dsema
->dsema_port
, _timeout
));
552 } while (kr
== KERN_ABORTED
);
554 if (kr
!= KERN_OPERATION_TIMED_OUT
) {
555 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
560 uint64_t nsec
= _dispatch_timeout(timeout
);
561 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
562 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
563 ret
= slowpath(sem_timedwait(&dsema
->dsema_sem
, &_timeout
));
564 } while (ret
== -1 && errno
== EINTR
);
566 if (!(ret
== -1 && errno
== ETIMEDOUT
)) {
567 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
571 nsec
= _dispatch_timeout(timeout
);
572 msec
= (DWORD
)(nsec
/ (uint64_t)1000000);
573 resolution
= _push_timer_resolution(msec
);
574 wait_result
= WaitForSingleObject(dsema
->dsema_handle
, msec
);
575 _pop_timer_resolution(resolution
);
576 if (wait_result
!= WAIT_TIMEOUT
) {
580 // Fall through and try to undo the earlier change to
581 // dsema->dsema_group_waiters
582 case DISPATCH_TIME_NOW
:
583 orig
= dsema
->dsema_group_waiters
;
585 if (dispatch_atomic_cmpxchgvw2o(dsema
, dsema_group_waiters
, orig
,
586 orig
- 1, &orig
, relaxed
)) {
588 return KERN_OPERATION_TIMED_OUT
;
589 #elif USE_POSIX_SEM || USE_WIN32_SEM
595 // Another thread called semaphore_signal().
596 // Fall through and drain the wakeup.
597 case DISPATCH_TIME_FOREVER
:
600 kr
= semaphore_wait(dsema
->dsema_port
);
601 } while (kr
== KERN_ABORTED
);
602 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
605 ret
= sem_wait(&dsema
->dsema_sem
);
606 } while (ret
== -1 && errno
== EINTR
);
607 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
609 WaitForSingleObject(dsema
->dsema_handle
, INFINITE
);
617 dispatch_group_wait(dispatch_group_t dg
, dispatch_time_t timeout
)
619 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
621 if (dsema
->dsema_value
== LONG_MAX
) {
626 return KERN_OPERATION_TIMED_OUT
;
627 #elif USE_POSIX_SEM || USE_WIN32_SEM
632 return _dispatch_group_wait_slow(dsema
, timeout
);
637 dispatch_group_notify_f(dispatch_group_t dg
, dispatch_queue_t dq
, void *ctxt
,
638 void (*func
)(void *))
640 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
641 dispatch_continuation_t prev
, dsn
= _dispatch_continuation_alloc();
642 dsn
->do_vtable
= (void *)DISPATCH_OBJ_ASYNC_BIT
;
647 _dispatch_retain(dq
);
648 prev
= dispatch_atomic_xchg2o(dsema
, dsema_notify_tail
, dsn
, release
);
649 if (fastpath(prev
)) {
652 _dispatch_retain(dg
);
653 dispatch_atomic_store2o(dsema
, dsema_notify_head
, dsn
, seq_cst
);
654 dispatch_atomic_barrier(seq_cst
); // <rdar://problem/11750916>
655 if (dispatch_atomic_load2o(dsema
, dsema_value
, seq_cst
) == LONG_MAX
) {
656 _dispatch_group_wake(dsema
);
663 dispatch_group_notify(dispatch_group_t dg
, dispatch_queue_t dq
,
666 dispatch_group_notify_f(dg
, dq
, _dispatch_Block_copy(db
),
667 _dispatch_call_block_and_release
);
672 #pragma mark _dispatch_thread_semaphore_t
674 _dispatch_thread_semaphore_t
675 _dispatch_thread_semaphore_create(void)
677 _dispatch_safe_fork
= false;
678 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
679 return _os_semaphore_create();
683 while (slowpath(kr
= semaphore_create(mach_task_self(), &s4
,
684 SYNC_POLICY_FIFO
, 0))) {
685 DISPATCH_VERIFY_MIG(kr
);
686 _dispatch_temporary_resource_shortage();
691 int ret
= sem_init(&s4
, 0, 0);
692 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
696 while (!dispatch_assume(tmp
= CreateSemaphore(NULL
, 0, LONG_MAX
, NULL
))) {
697 _dispatch_temporary_resource_shortage();
699 return (_dispatch_thread_semaphore_t
)tmp
;
701 #error "No supported semaphore type"
706 _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema
)
708 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
709 return _os_semaphore_dispose(sema
);
711 semaphore_t s4
= (semaphore_t
)sema
;
712 kern_return_t kr
= semaphore_destroy(mach_task_self(), s4
);
713 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
715 sem_t s4
= (sem_t
)sema
;
716 int ret
= sem_destroy(&s4
);
717 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
719 // XXX: signal the semaphore?
721 success
= CloseHandle((HANDLE
)sema
);
722 dispatch_assume(success
);
724 #error "No supported semaphore type"
729 _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema
)
731 // assumed to contain a release barrier
732 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
733 return _os_semaphore_signal(sema
);
735 semaphore_t s4
= (semaphore_t
)sema
;
736 kern_return_t kr
= semaphore_signal(s4
);
737 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
739 sem_t s4
= (sem_t
)sema
;
740 int ret
= sem_post(&s4
);
741 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
744 ret
= ReleaseSemaphore((HANDLE
)sema
, 1, NULL
);
745 dispatch_assume(ret
);
747 #error "No supported semaphore type"
752 _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema
)
754 // assumed to contain an acquire barrier
755 #if DISPATCH_USE_OS_SEMAPHORE_CACHE
756 return _os_semaphore_wait(sema
);
758 semaphore_t s4
= (semaphore_t
)sema
;
761 kr
= semaphore_wait(s4
);
762 } while (slowpath(kr
== KERN_ABORTED
));
763 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
765 sem_t s4
= (sem_t
)sema
;
769 } while (slowpath(ret
!= 0));
770 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
774 wait_result
= WaitForSingleObject((HANDLE
)sema
, INFINITE
);
775 } while (wait_result
!= WAIT_OBJECT_0
);
777 #error "No supported semaphore type"