2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 // semaphores are too fundamental to use the dispatch_assume*() macros
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
27 DISPATCH_CRASH("flawed group/semaphore logic"); \
31 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
32 if (slowpath((x) == -1)) { \
33 DISPATCH_CRASH("flawed group/semaphore logic"); \
38 DISPATCH_WEAK
// rdar://problem/8503746
39 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
);
41 static long _dispatch_group_wake(dispatch_semaphore_t dsema
);
44 #pragma mark dispatch_semaphore_t
47 _dispatch_semaphore_init(long value
, dispatch_object_t dou
)
49 dispatch_semaphore_t dsema
= dou
._dsema
;
51 dsema
->do_next
= DISPATCH_OBJECT_LISTLESS
;
52 dsema
->do_targetq
= dispatch_get_global_queue(
53 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
54 dsema
->dsema_value
= value
;
55 dsema
->dsema_orig
= value
;
57 int ret
= sem_init(&dsema
->dsema_sem
, 0, 0);
58 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
63 dispatch_semaphore_create(long value
)
65 dispatch_semaphore_t dsema
;
67 // If the internal value is negative, then the absolute of the value is
68 // equal to the number of waiting threads. Therefore it is bogus to
69 // initialize the semaphore with a negative value.
74 dsema
= _dispatch_alloc(DISPATCH_VTABLE(semaphore
),
75 sizeof(struct dispatch_semaphore_s
));
76 _dispatch_semaphore_init(value
, dsema
);
82 _dispatch_semaphore_create_port(semaphore_t
*s4
)
90 _dispatch_safe_fork
= false;
92 // lazily allocate the semaphore port
95 // 1) Switch to a doubly-linked FIFO in user-space.
96 // 2) User-space timers for the timeout.
97 // 3) Use the per-thread semaphore port.
99 while ((kr
= semaphore_create(mach_task_self(), &tmp
,
100 SYNC_POLICY_FIFO
, 0))) {
101 DISPATCH_VERIFY_MIG(kr
);
105 if (!dispatch_atomic_cmpxchg(s4
, 0, tmp
)) {
106 kr
= semaphore_destroy(mach_task_self(), tmp
);
107 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
113 _dispatch_semaphore_dispose(dispatch_object_t dou
)
115 dispatch_semaphore_t dsema
= dou
._dsema
;
117 if (dsema
->dsema_value
< dsema
->dsema_orig
) {
118 DISPATCH_CLIENT_CRASH(
119 "Semaphore/group object deallocated while in use");
124 if (dsema
->dsema_port
) {
125 kr
= semaphore_destroy(mach_task_self(), dsema
->dsema_port
);
126 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
128 if (dsema
->dsema_waiter_port
) {
129 kr
= semaphore_destroy(mach_task_self(), dsema
->dsema_waiter_port
);
130 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
133 int ret
= sem_destroy(&dsema
->dsema_sem
);
134 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
139 _dispatch_semaphore_debug(dispatch_object_t dou
, char *buf
, size_t bufsiz
)
141 dispatch_semaphore_t dsema
= dou
._dsema
;
144 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
, "%s[%p] = { ",
145 dx_kind(dsema
), dsema
);
146 offset
+= _dispatch_object_debug_attr(dsema
, &buf
[offset
], bufsiz
- offset
);
148 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
, "port = 0x%u, ",
151 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
,
152 "value = %ld, orig = %ld }", dsema
->dsema_value
, dsema
->dsema_orig
);
158 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
)
160 // Before dsema_sent_ksignals is incremented we can rely on the reference
161 // held by the waiter. However, once this value is incremented the waiter
162 // may return between the atomic increment and the semaphore_signal(),
163 // therefore an explicit reference must be held in order to safely access
164 // dsema after the atomic increment.
165 _dispatch_retain(dsema
);
167 (void)dispatch_atomic_inc2o(dsema
, dsema_sent_ksignals
);
170 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
171 kern_return_t kr
= semaphore_signal(dsema
->dsema_port
);
172 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
174 int ret
= sem_post(&dsema
->dsema_sem
);
175 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
178 _dispatch_release(dsema
);
183 dispatch_semaphore_signal(dispatch_semaphore_t dsema
)
185 dispatch_atomic_release_barrier();
186 long value
= dispatch_atomic_inc2o(dsema
, dsema_value
);
187 if (fastpath(value
> 0)) {
190 if (slowpath(value
== LONG_MIN
)) {
191 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
193 return _dispatch_semaphore_signal_slow(dsema
);
198 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema
,
199 dispatch_time_t timeout
)
204 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
205 // we keep a parallel count of the number of times a Mach semaphore is
206 // signaled (6880961).
207 while ((orig
= dsema
->dsema_sent_ksignals
)) {
208 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_sent_ksignals
, orig
,
215 mach_timespec_t _timeout
;
218 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
220 // From xnu/osfmk/kern/sync_sema.c:
221 // wait_semaphore->count = -1; /* we don't keep an actual count */
223 // The code above does not match the documentation, and that fact is
224 // not surprising. The documented semantics are clumsy to use in any
225 // practical way. The above hack effectively tricks the rest of the
226 // Mach semaphore logic to behave like the libdispatch algorithm.
231 uint64_t nsec
= _dispatch_timeout(timeout
);
232 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
233 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
234 kr
= slowpath(semaphore_timedwait(dsema
->dsema_port
, _timeout
));
235 } while (kr
== KERN_ABORTED
);
237 if (kr
!= KERN_OPERATION_TIMED_OUT
) {
238 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
241 // Fall through and try to undo what the fast path did to
242 // dsema->dsema_value
243 case DISPATCH_TIME_NOW
:
244 while ((orig
= dsema
->dsema_value
) < 0) {
245 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_value
, orig
, orig
+ 1)) {
246 return KERN_OPERATION_TIMED_OUT
;
249 // Another thread called semaphore_signal().
250 // Fall through and drain the wakeup.
251 case DISPATCH_TIME_FOREVER
:
253 kr
= semaphore_wait(dsema
->dsema_port
);
254 } while (kr
== KERN_ABORTED
);
255 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
259 struct timespec _timeout
;
265 uint64_t nsec
= _dispatch_timeout(timeout
);
266 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
267 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
268 ret
= slowpath(sem_timedwait(&dsema
->dsema_sem
, &_timeout
));
269 } while (ret
== -1 && errno
== EINTR
);
271 if (ret
== -1 && errno
!= ETIMEDOUT
) {
272 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
275 // Fall through and try to undo what the fast path did to
276 // dsema->dsema_value
277 case DISPATCH_TIME_NOW
:
278 while ((orig
= dsema
->dsema_value
) < 0) {
279 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_value
, orig
, orig
+ 1)) {
284 // Another thread called semaphore_signal().
285 // Fall through and drain the wakeup.
286 case DISPATCH_TIME_FOREVER
:
288 ret
= sem_wait(&dsema
->dsema_sem
);
290 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
299 dispatch_semaphore_wait(dispatch_semaphore_t dsema
, dispatch_time_t timeout
)
301 long value
= dispatch_atomic_dec2o(dsema
, dsema_value
);
302 dispatch_atomic_acquire_barrier();
303 if (fastpath(value
>= 0)) {
306 return _dispatch_semaphore_wait_slow(dsema
, timeout
);
310 #pragma mark dispatch_group_t
313 dispatch_group_create(void)
315 dispatch_group_t dg
= _dispatch_alloc(DISPATCH_VTABLE(group
),
316 sizeof(struct dispatch_semaphore_s
));
317 _dispatch_semaphore_init(LONG_MAX
, dg
);
322 dispatch_group_enter(dispatch_group_t dg
)
324 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
326 (void)dispatch_semaphore_wait(dsema
, DISPATCH_TIME_FOREVER
);
331 _dispatch_group_wake(dispatch_semaphore_t dsema
)
333 struct dispatch_sema_notify_s
*next
, *head
, *tail
= NULL
;
336 head
= dispatch_atomic_xchg2o(dsema
, dsema_notify_head
, NULL
);
338 // snapshot before anything is notified/woken <rdar://problem/8554546>
339 tail
= dispatch_atomic_xchg2o(dsema
, dsema_notify_tail
, NULL
);
341 rval
= dispatch_atomic_xchg2o(dsema
, dsema_group_waiters
, 0);
343 // wake group waiters
345 _dispatch_semaphore_create_port(&dsema
->dsema_waiter_port
);
347 kern_return_t kr
= semaphore_signal(dsema
->dsema_waiter_port
);
348 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
352 int ret
= sem_post(&dsema
->dsema_sem
);
353 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
358 // async group notify blocks
360 dispatch_async_f(head
->dsn_queue
, head
->dsn_ctxt
, head
->dsn_func
);
361 _dispatch_release(head
->dsn_queue
);
362 next
= fastpath(head
->dsn_next
);
363 if (!next
&& head
!= tail
) {
364 while (!(next
= fastpath(head
->dsn_next
))) {
365 _dispatch_hardware_pause();
369 } while ((head
= next
));
370 _dispatch_release(dsema
);
376 dispatch_group_leave(dispatch_group_t dg
)
378 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
379 dispatch_atomic_release_barrier();
380 long value
= dispatch_atomic_inc2o(dsema
, dsema_value
);
381 if (slowpath(value
== LONG_MIN
)) {
382 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
384 if (slowpath(value
== dsema
->dsema_orig
)) {
385 (void)_dispatch_group_wake(dsema
);
391 _dispatch_group_wait_slow(dispatch_semaphore_t dsema
, dispatch_time_t timeout
)
396 // check before we cause another signal to be sent by incrementing
397 // dsema->dsema_group_waiters
398 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
399 return _dispatch_group_wake(dsema
);
401 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
402 // we keep a parallel count of the number of times a Mach semaphore is
403 // signaled (6880961).
404 (void)dispatch_atomic_inc2o(dsema
, dsema_group_waiters
);
405 // check the values again in case we need to wake any threads
406 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
407 return _dispatch_group_wake(dsema
);
411 mach_timespec_t _timeout
;
414 _dispatch_semaphore_create_port(&dsema
->dsema_waiter_port
);
416 // From xnu/osfmk/kern/sync_sema.c:
417 // wait_semaphore->count = -1; /* we don't keep an actual count */
419 // The code above does not match the documentation, and that fact is
420 // not surprising. The documented semantics are clumsy to use in any
421 // practical way. The above hack effectively tricks the rest of the
422 // Mach semaphore logic to behave like the libdispatch algorithm.
427 uint64_t nsec
= _dispatch_timeout(timeout
);
428 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
429 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
430 kr
= slowpath(semaphore_timedwait(dsema
->dsema_waiter_port
,
432 } while (kr
== KERN_ABORTED
);
434 if (kr
!= KERN_OPERATION_TIMED_OUT
) {
435 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
438 // Fall through and try to undo the earlier change to
439 // dsema->dsema_group_waiters
440 case DISPATCH_TIME_NOW
:
441 while ((orig
= dsema
->dsema_group_waiters
)) {
442 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_group_waiters
, orig
,
444 return KERN_OPERATION_TIMED_OUT
;
447 // Another thread called semaphore_signal().
448 // Fall through and drain the wakeup.
449 case DISPATCH_TIME_FOREVER
:
451 kr
= semaphore_wait(dsema
->dsema_waiter_port
);
452 } while (kr
== KERN_ABORTED
);
453 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
457 struct timespec _timeout
;
463 uint64_t nsec
= _dispatch_timeout(timeout
);
464 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
465 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
466 ret
= slowpath(sem_timedwait(&dsema
->dsema_sem
, &_timeout
));
467 } while (ret
== -1 && errno
== EINTR
);
469 if (!(ret
== -1 && errno
== ETIMEDOUT
)) {
470 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
473 // Fall through and try to undo the earlier change to
474 // dsema->dsema_group_waiters
475 case DISPATCH_TIME_NOW
:
476 while ((orig
= dsema
->dsema_group_waiters
)) {
477 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_group_waiters
, orig
,
483 // Another thread called semaphore_signal().
484 // Fall through and drain the wakeup.
485 case DISPATCH_TIME_FOREVER
:
487 ret
= sem_wait(&dsema
->dsema_sem
);
488 } while (ret
== -1 && errno
== EINTR
);
489 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
498 dispatch_group_wait(dispatch_group_t dg
, dispatch_time_t timeout
)
500 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
502 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
507 return KERN_OPERATION_TIMED_OUT
;
513 return _dispatch_group_wait_slow(dsema
, timeout
);
518 dispatch_group_notify_f(dispatch_group_t dg
, dispatch_queue_t dq
, void *ctxt
,
519 void (*func
)(void *))
521 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
522 struct dispatch_sema_notify_s
*dsn
, *prev
;
524 // FIXME -- this should be updated to use the continuation cache
525 while (!(dsn
= calloc(1, sizeof(*dsn
)))) {
530 dsn
->dsn_ctxt
= ctxt
;
531 dsn
->dsn_func
= func
;
532 _dispatch_retain(dq
);
533 dispatch_atomic_store_barrier();
534 prev
= dispatch_atomic_xchg2o(dsema
, dsema_notify_tail
, dsn
);
535 if (fastpath(prev
)) {
536 prev
->dsn_next
= dsn
;
538 _dispatch_retain(dg
);
539 (void)dispatch_atomic_xchg2o(dsema
, dsema_notify_head
, dsn
);
540 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
541 _dispatch_group_wake(dsema
);
548 dispatch_group_notify(dispatch_group_t dg
, dispatch_queue_t dq
,
551 dispatch_group_notify_f(dg
, dq
, _dispatch_Block_copy(db
),
552 _dispatch_call_block_and_release
);
557 #pragma mark _dispatch_thread_semaphore_t
560 static _dispatch_thread_semaphore_t
561 _dispatch_thread_semaphore_create(void)
563 _dispatch_safe_fork
= false;
567 while (slowpath(kr
= semaphore_create(mach_task_self(), &s4
,
568 SYNC_POLICY_FIFO
, 0))) {
569 DISPATCH_VERIFY_MIG(kr
);
575 int ret
= sem_init(&s4
, 0, 0);
576 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
583 _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema
)
586 semaphore_t s4
= (semaphore_t
)sema
;
587 kern_return_t kr
= semaphore_destroy(mach_task_self(), s4
);
588 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
590 sem_t s4
= (sem_t
)sema
;
591 int ret
= sem_destroy(&s4
);
592 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
597 _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema
)
600 semaphore_t s4
= (semaphore_t
)sema
;
601 kern_return_t kr
= semaphore_signal(s4
);
602 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
604 sem_t s4
= (sem_t
)sema
;
605 int ret
= sem_post(&s4
);
606 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
611 _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema
)
614 semaphore_t s4
= (semaphore_t
)sema
;
617 kr
= semaphore_wait(s4
);
618 } while (slowpath(kr
== KERN_ABORTED
));
619 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
621 sem_t s4
= (sem_t
)sema
;
625 } while (slowpath(ret
!= 0));
626 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
630 _dispatch_thread_semaphore_t
631 _dispatch_get_thread_semaphore(void)
633 _dispatch_thread_semaphore_t sema
= (_dispatch_thread_semaphore_t
)
634 _dispatch_thread_getspecific(dispatch_sema4_key
);
635 if (slowpath(!sema
)) {
636 return _dispatch_thread_semaphore_create();
638 _dispatch_thread_setspecific(dispatch_sema4_key
, NULL
);
643 _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema
)
645 _dispatch_thread_semaphore_t old_sema
= (_dispatch_thread_semaphore_t
)
646 _dispatch_thread_getspecific(dispatch_sema4_key
);
647 _dispatch_thread_setspecific(dispatch_sema4_key
, (void*)sema
);
648 if (slowpath(old_sema
)) {
649 return _dispatch_thread_semaphore_dispose(old_sema
);