2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 // semaphores are too fundamental to use the dispatch_assume*() macros
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
27 DISPATCH_CRASH("flawed group/semaphore logic"); \
31 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
32 if (slowpath((x) == -1)) { \
33 DISPATCH_CRASH("flawed group/semaphore logic"); \
38 DISPATCH_WEAK
// rdar://problem/8503746
39 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
);
41 static void _dispatch_semaphore_dispose(dispatch_semaphore_t dsema
);
42 static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema
, char *buf
,
44 static long _dispatch_group_wake(dispatch_semaphore_t dsema
);
47 #pragma mark dispatch_semaphore_t
49 struct dispatch_semaphore_vtable_s
{
50 DISPATCH_VTABLE_HEADER(dispatch_semaphore_s
);
53 const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable
= {
54 .do_type
= DISPATCH_SEMAPHORE_TYPE
,
55 .do_kind
= "semaphore",
56 .do_dispose
= _dispatch_semaphore_dispose
,
57 .do_debug
= _dispatch_semaphore_debug
,
61 dispatch_semaphore_create(long value
)
63 dispatch_semaphore_t dsema
;
65 // If the internal value is negative, then the absolute of the value is
66 // equal to the number of waiting threads. Therefore it is bogus to
67 // initialize the semaphore with a negative value.
72 dsema
= calloc(1, sizeof(struct dispatch_semaphore_s
));
74 if (fastpath(dsema
)) {
75 dsema
->do_vtable
= &_dispatch_semaphore_vtable
;
76 dsema
->do_next
= DISPATCH_OBJECT_LISTLESS
;
77 dsema
->do_ref_cnt
= 1;
78 dsema
->do_xref_cnt
= 1;
79 dsema
->do_targetq
= dispatch_get_global_queue(
80 DISPATCH_QUEUE_PRIORITY_DEFAULT
, 0);
81 dsema
->dsema_value
= value
;
82 dsema
->dsema_orig
= value
;
84 int ret
= sem_init(&dsema
->dsema_sem
, 0, 0);
85 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
94 _dispatch_semaphore_create_port(semaphore_t
*s4
)
103 // lazily allocate the semaphore port
106 // 1) Switch to a doubly-linked FIFO in user-space.
107 // 2) User-space timers for the timeout.
108 // 3) Use the per-thread semaphore port.
110 while ((kr
= semaphore_create(mach_task_self(), &tmp
,
111 SYNC_POLICY_FIFO
, 0))) {
112 DISPATCH_VERIFY_MIG(kr
);
116 if (!dispatch_atomic_cmpxchg(s4
, 0, tmp
)) {
117 kr
= semaphore_destroy(mach_task_self(), tmp
);
118 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
121 _dispatch_safe_fork
= false;
126 _dispatch_semaphore_dispose(dispatch_semaphore_t dsema
)
128 if (dsema
->dsema_value
< dsema
->dsema_orig
) {
129 DISPATCH_CLIENT_CRASH(
130 "Semaphore/group object deallocated while in use");
135 if (dsema
->dsema_port
) {
136 kr
= semaphore_destroy(mach_task_self(), dsema
->dsema_port
);
137 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
139 if (dsema
->dsema_waiter_port
) {
140 kr
= semaphore_destroy(mach_task_self(), dsema
->dsema_waiter_port
);
141 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
144 int ret
= sem_destroy(&dsema
->dsema_sem
);
145 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
148 _dispatch_dispose(dsema
);
152 _dispatch_semaphore_debug(dispatch_semaphore_t dsema
, char *buf
, size_t bufsiz
)
155 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
, "%s[%p] = { ",
156 dx_kind(dsema
), dsema
);
157 offset
+= _dispatch_object_debug_attr(dsema
, &buf
[offset
], bufsiz
- offset
);
159 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
, "port = 0x%u, ",
162 offset
+= snprintf(&buf
[offset
], bufsiz
- offset
,
163 "value = %ld, orig = %ld }", dsema
->dsema_value
, dsema
->dsema_orig
);
169 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
)
171 // Before dsema_sent_ksignals is incremented we can rely on the reference
172 // held by the waiter. However, once this value is incremented the waiter
173 // may return between the atomic increment and the semaphore_signal(),
174 // therefore an explicit reference must be held in order to safely access
175 // dsema after the atomic increment.
176 _dispatch_retain(dsema
);
178 (void)dispatch_atomic_inc2o(dsema
, dsema_sent_ksignals
);
181 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
182 kern_return_t kr
= semaphore_signal(dsema
->dsema_port
);
183 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
185 int ret
= sem_post(&dsema
->dsema_sem
);
186 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
189 _dispatch_release(dsema
);
194 dispatch_semaphore_signal(dispatch_semaphore_t dsema
)
196 dispatch_atomic_release_barrier();
197 long value
= dispatch_atomic_inc2o(dsema
, dsema_value
);
198 if (fastpath(value
> 0)) {
201 if (slowpath(value
== LONG_MIN
)) {
202 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
204 return _dispatch_semaphore_signal_slow(dsema
);
209 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema
,
210 dispatch_time_t timeout
)
215 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
216 // we keep a parallel count of the number of times a Mach semaphore is
217 // signaled (6880961).
218 while ((orig
= dsema
->dsema_sent_ksignals
)) {
219 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_sent_ksignals
, orig
,
226 mach_timespec_t _timeout
;
229 _dispatch_semaphore_create_port(&dsema
->dsema_port
);
231 // From xnu/osfmk/kern/sync_sema.c:
232 // wait_semaphore->count = -1; /* we don't keep an actual count */
234 // The code above does not match the documentation, and that fact is
235 // not surprising. The documented semantics are clumsy to use in any
236 // practical way. The above hack effectively tricks the rest of the
237 // Mach semaphore logic to behave like the libdispatch algorithm.
242 uint64_t nsec
= _dispatch_timeout(timeout
);
243 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
244 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
245 kr
= slowpath(semaphore_timedwait(dsema
->dsema_port
, _timeout
));
246 } while (kr
== KERN_ABORTED
);
248 if (kr
!= KERN_OPERATION_TIMED_OUT
) {
249 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
252 // Fall through and try to undo what the fast path did to
253 // dsema->dsema_value
254 case DISPATCH_TIME_NOW
:
255 while ((orig
= dsema
->dsema_value
) < 0) {
256 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_value
, orig
, orig
+ 1)) {
257 return KERN_OPERATION_TIMED_OUT
;
260 // Another thread called semaphore_signal().
261 // Fall through and drain the wakeup.
262 case DISPATCH_TIME_FOREVER
:
264 kr
= semaphore_wait(dsema
->dsema_port
);
265 } while (kr
== KERN_ABORTED
);
266 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
270 struct timespec _timeout
;
276 uint64_t nsec
= _dispatch_timeout(timeout
);
277 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
278 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
279 ret
= slowpath(sem_timedwait(&dsema
->dsema_sem
, &_timeout
));
280 } while (ret
== -1 && errno
== EINTR
);
282 if (ret
== -1 && errno
!= ETIMEDOUT
) {
283 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
286 // Fall through and try to undo what the fast path did to
287 // dsema->dsema_value
288 case DISPATCH_TIME_NOW
:
289 while ((orig
= dsema
->dsema_value
) < 0) {
290 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_value
, orig
, orig
+ 1)) {
295 // Another thread called semaphore_signal().
296 // Fall through and drain the wakeup.
297 case DISPATCH_TIME_FOREVER
:
299 ret
= sem_wait(&dsema
->dsema_sem
);
301 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
310 dispatch_semaphore_wait(dispatch_semaphore_t dsema
, dispatch_time_t timeout
)
312 long value
= dispatch_atomic_dec2o(dsema
, dsema_value
);
313 dispatch_atomic_acquire_barrier();
314 if (fastpath(value
>= 0)) {
317 return _dispatch_semaphore_wait_slow(dsema
, timeout
);
321 #pragma mark dispatch_group_t
324 dispatch_group_create(void)
326 return (dispatch_group_t
)dispatch_semaphore_create(LONG_MAX
);
330 dispatch_group_enter(dispatch_group_t dg
)
332 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
334 (void)dispatch_semaphore_wait(dsema
, DISPATCH_TIME_FOREVER
);
339 _dispatch_group_wake(dispatch_semaphore_t dsema
)
341 struct dispatch_sema_notify_s
*next
, *head
, *tail
= NULL
;
344 head
= dispatch_atomic_xchg2o(dsema
, dsema_notify_head
, NULL
);
346 // snapshot before anything is notified/woken <rdar://problem/8554546>
347 tail
= dispatch_atomic_xchg2o(dsema
, dsema_notify_tail
, NULL
);
349 rval
= dispatch_atomic_xchg2o(dsema
, dsema_group_waiters
, 0);
351 // wake group waiters
353 _dispatch_semaphore_create_port(&dsema
->dsema_waiter_port
);
355 kern_return_t kr
= semaphore_signal(dsema
->dsema_waiter_port
);
356 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
360 int ret
= sem_post(&dsema
->dsema_sem
);
361 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
366 // async group notify blocks
368 dispatch_async_f(head
->dsn_queue
, head
->dsn_ctxt
, head
->dsn_func
);
369 _dispatch_release(head
->dsn_queue
);
370 next
= fastpath(head
->dsn_next
);
371 if (!next
&& head
!= tail
) {
372 while (!(next
= fastpath(head
->dsn_next
))) {
373 _dispatch_hardware_pause();
377 } while ((head
= next
));
378 _dispatch_release(dsema
);
384 dispatch_group_leave(dispatch_group_t dg
)
386 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
387 dispatch_atomic_release_barrier();
388 long value
= dispatch_atomic_inc2o(dsema
, dsema_value
);
389 if (slowpath(value
== LONG_MIN
)) {
390 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
392 if (slowpath(value
== dsema
->dsema_orig
)) {
393 (void)_dispatch_group_wake(dsema
);
399 _dispatch_group_wait_slow(dispatch_semaphore_t dsema
, dispatch_time_t timeout
)
404 // check before we cause another signal to be sent by incrementing
405 // dsema->dsema_group_waiters
406 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
407 return _dispatch_group_wake(dsema
);
409 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
410 // we keep a parallel count of the number of times a Mach semaphore is
411 // signaled (6880961).
412 (void)dispatch_atomic_inc2o(dsema
, dsema_group_waiters
);
413 // check the values again in case we need to wake any threads
414 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
415 return _dispatch_group_wake(dsema
);
419 mach_timespec_t _timeout
;
422 _dispatch_semaphore_create_port(&dsema
->dsema_waiter_port
);
424 // From xnu/osfmk/kern/sync_sema.c:
425 // wait_semaphore->count = -1; /* we don't keep an actual count */
427 // The code above does not match the documentation, and that fact is
428 // not surprising. The documented semantics are clumsy to use in any
429 // practical way. The above hack effectively tricks the rest of the
430 // Mach semaphore logic to behave like the libdispatch algorithm.
435 uint64_t nsec
= _dispatch_timeout(timeout
);
436 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
437 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
438 kr
= slowpath(semaphore_timedwait(dsema
->dsema_waiter_port
,
440 } while (kr
== KERN_ABORTED
);
442 if (kr
!= KERN_OPERATION_TIMED_OUT
) {
443 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
446 // Fall through and try to undo the earlier change to
447 // dsema->dsema_group_waiters
448 case DISPATCH_TIME_NOW
:
449 while ((orig
= dsema
->dsema_group_waiters
)) {
450 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_group_waiters
, orig
,
452 return KERN_OPERATION_TIMED_OUT
;
455 // Another thread called semaphore_signal().
456 // Fall through and drain the wakeup.
457 case DISPATCH_TIME_FOREVER
:
459 kr
= semaphore_wait(dsema
->dsema_waiter_port
);
460 } while (kr
== KERN_ABORTED
);
461 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
465 struct timespec _timeout
;
471 uint64_t nsec
= _dispatch_timeout(timeout
);
472 _timeout
.tv_sec
= (typeof(_timeout
.tv_sec
))(nsec
/ NSEC_PER_SEC
);
473 _timeout
.tv_nsec
= (typeof(_timeout
.tv_nsec
))(nsec
% NSEC_PER_SEC
);
474 ret
= slowpath(sem_timedwait(&dsema
->dsema_sem
, &_timeout
));
475 } while (ret
== -1 && errno
== EINTR
);
477 if (!(ret
== -1 && errno
== ETIMEDOUT
)) {
478 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
481 // Fall through and try to undo the earlier change to
482 // dsema->dsema_group_waiters
483 case DISPATCH_TIME_NOW
:
484 while ((orig
= dsema
->dsema_group_waiters
)) {
485 if (dispatch_atomic_cmpxchg2o(dsema
, dsema_group_waiters
, orig
,
491 // Another thread called semaphore_signal().
492 // Fall through and drain the wakeup.
493 case DISPATCH_TIME_FOREVER
:
495 ret
= sem_wait(&dsema
->dsema_sem
);
496 } while (ret
== -1 && errno
== EINTR
);
497 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
506 dispatch_group_wait(dispatch_group_t dg
, dispatch_time_t timeout
)
508 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
510 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
515 return KERN_OPERATION_TIMED_OUT
;
521 return _dispatch_group_wait_slow(dsema
, timeout
);
526 dispatch_group_notify_f(dispatch_group_t dg
, dispatch_queue_t dq
, void *ctxt
,
527 void (*func
)(void *))
529 dispatch_semaphore_t dsema
= (dispatch_semaphore_t
)dg
;
530 struct dispatch_sema_notify_s
*dsn
, *prev
;
532 // FIXME -- this should be updated to use the continuation cache
533 while (!(dsn
= calloc(1, sizeof(*dsn
)))) {
538 dsn
->dsn_ctxt
= ctxt
;
539 dsn
->dsn_func
= func
;
540 _dispatch_retain(dq
);
541 dispatch_atomic_store_barrier();
542 prev
= dispatch_atomic_xchg2o(dsema
, dsema_notify_tail
, dsn
);
543 if (fastpath(prev
)) {
544 prev
->dsn_next
= dsn
;
546 _dispatch_retain(dg
);
547 (void)dispatch_atomic_xchg2o(dsema
, dsema_notify_head
, dsn
);
548 if (dsema
->dsema_value
== dsema
->dsema_orig
) {
549 _dispatch_group_wake(dsema
);
556 dispatch_group_notify(dispatch_group_t dg
, dispatch_queue_t dq
,
559 dispatch_group_notify_f(dg
, dq
, _dispatch_Block_copy(db
),
560 _dispatch_call_block_and_release
);
565 #pragma mark _dispatch_thread_semaphore_t
568 static _dispatch_thread_semaphore_t
569 _dispatch_thread_semaphore_create(void)
574 while (slowpath(kr
= semaphore_create(mach_task_self(), &s4
,
575 SYNC_POLICY_FIFO
, 0))) {
576 DISPATCH_VERIFY_MIG(kr
);
582 int ret
= sem_init(&s4
, 0, 0);
583 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
590 _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema
)
593 semaphore_t s4
= (semaphore_t
)sema
;
594 kern_return_t kr
= semaphore_destroy(mach_task_self(), s4
);
595 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
597 sem_t s4
= (sem_t
)sema
;
598 int ret
= sem_destroy(&s4
);
599 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
604 _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema
)
607 semaphore_t s4
= (semaphore_t
)sema
;
608 kern_return_t kr
= semaphore_signal(s4
);
609 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
611 sem_t s4
= (sem_t
)sema
;
612 int ret
= sem_post(&s4
);
613 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
618 _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema
)
621 semaphore_t s4
= (semaphore_t
)sema
;
624 kr
= semaphore_wait(s4
);
625 } while (slowpath(kr
== KERN_ABORTED
));
626 DISPATCH_SEMAPHORE_VERIFY_KR(kr
);
628 sem_t s4
= (sem_t
)sema
;
632 } while (slowpath(ret
!= 0));
633 DISPATCH_SEMAPHORE_VERIFY_RET(ret
);
637 _dispatch_thread_semaphore_t
638 _dispatch_get_thread_semaphore(void)
640 _dispatch_thread_semaphore_t sema
= (_dispatch_thread_semaphore_t
)
641 _dispatch_thread_getspecific(dispatch_sema4_key
);
642 if (slowpath(!sema
)) {
643 return _dispatch_thread_semaphore_create();
645 _dispatch_thread_setspecific(dispatch_sema4_key
, NULL
);
650 _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema
)
652 _dispatch_thread_semaphore_t old_sema
= (_dispatch_thread_semaphore_t
)
653 _dispatch_thread_getspecific(dispatch_sema4_key
);
654 _dispatch_thread_setspecific(dispatch_sema4_key
, (void*)sema
);
655 if (slowpath(old_sema
)) {
656 return _dispatch_thread_semaphore_dispose(old_sema
);