2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
4 * @APPLE_APACHE_LICENSE_HEADER_START@
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
18 * @APPLE_APACHE_LICENSE_HEADER_END@
23 DISPATCH_WEAK
// rdar://problem/8503746
24 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
);
27 #pragma mark dispatch_semaphore_class_t
30 _dispatch_semaphore_class_init(long value
, dispatch_semaphore_class_t dsemau
)
32 struct dispatch_semaphore_header_s
*dsema
= dsemau
._dsema_hdr
;
34 dsema
->do_next
= DISPATCH_OBJECT_LISTLESS
;
35 dsema
->do_targetq
= _dispatch_get_root_queue(DISPATCH_QOS_DEFAULT
, false);
36 dsema
->dsema_value
= value
;
37 _dispatch_sema4_init(&dsema
->dsema_sema
, _DSEMA4_POLICY_FIFO
);
41 #pragma mark dispatch_semaphore_t
44 dispatch_semaphore_create(long value
)
46 dispatch_semaphore_t dsema
;
48 // If the internal value is negative, then the absolute of the value is
49 // equal to the number of waiting threads. Therefore it is bogus to
50 // initialize the semaphore with a negative value.
52 return DISPATCH_BAD_INPUT
;
55 dsema
= (dispatch_semaphore_t
)_dispatch_object_alloc(
56 DISPATCH_VTABLE(semaphore
), sizeof(struct dispatch_semaphore_s
));
57 _dispatch_semaphore_class_init(value
, dsema
);
58 dsema
->dsema_orig
= value
;
63 _dispatch_semaphore_dispose(dispatch_object_t dou
,
64 DISPATCH_UNUSED
bool *allow_free
)
66 dispatch_semaphore_t dsema
= dou
._dsema
;
68 if (dsema
->dsema_value
< dsema
->dsema_orig
) {
69 DISPATCH_CLIENT_CRASH(dsema
->dsema_orig
- dsema
->dsema_value
,
70 "Semaphore object deallocated while in use");
73 _dispatch_sema4_dispose(&dsema
->dsema_sema
, _DSEMA4_POLICY_FIFO
);
77 _dispatch_semaphore_debug(dispatch_object_t dou
, char *buf
, size_t bufsiz
)
79 dispatch_semaphore_t dsema
= dou
._dsema
;
82 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "%s[%p] = { ",
83 dx_kind(dsema
), dsema
);
84 offset
+= _dispatch_object_debug_attr(dsema
, &buf
[offset
], bufsiz
- offset
);
86 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "port = 0x%u, ",
89 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
90 "value = %ld, orig = %ld }", dsema
->dsema_value
, dsema
->dsema_orig
);
96 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema
)
98 _dispatch_sema4_create(&dsema
->dsema_sema
, _DSEMA4_POLICY_FIFO
);
99 _dispatch_sema4_signal(&dsema
->dsema_sema
, 1);
104 dispatch_semaphore_signal(dispatch_semaphore_t dsema
)
106 long value
= os_atomic_inc2o(dsema
, dsema_value
, release
);
107 if (fastpath(value
> 0)) {
110 if (slowpath(value
== LONG_MIN
)) {
111 DISPATCH_CLIENT_CRASH(value
,
112 "Unbalanced call to dispatch_semaphore_signal()");
114 return _dispatch_semaphore_signal_slow(dsema
);
119 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema
,
120 dispatch_time_t timeout
)
124 _dispatch_sema4_create(&dsema
->dsema_sema
, _DSEMA4_POLICY_FIFO
);
127 if (!_dispatch_sema4_timedwait(&dsema
->dsema_sema
, timeout
)) {
130 // Fall through and try to undo what the fast path did to
131 // dsema->dsema_value
132 case DISPATCH_TIME_NOW
:
133 orig
= dsema
->dsema_value
;
135 if (os_atomic_cmpxchgvw2o(dsema
, dsema_value
, orig
, orig
+ 1,
137 return _DSEMA4_TIMEOUT();
140 // Another thread called semaphore_signal().
141 // Fall through and drain the wakeup.
142 case DISPATCH_TIME_FOREVER
:
143 _dispatch_sema4_wait(&dsema
->dsema_sema
);
150 dispatch_semaphore_wait(dispatch_semaphore_t dsema
, dispatch_time_t timeout
)
152 long value
= os_atomic_dec2o(dsema
, dsema_value
, acquire
);
153 if (fastpath(value
>= 0)) {
156 return _dispatch_semaphore_wait_slow(dsema
, timeout
);
160 #pragma mark dispatch_group_t
162 DISPATCH_ALWAYS_INLINE
163 static inline dispatch_group_t
164 _dispatch_group_create_with_count(long count
)
166 dispatch_group_t dg
= (dispatch_group_t
)_dispatch_object_alloc(
167 DISPATCH_VTABLE(group
), sizeof(struct dispatch_group_s
));
168 _dispatch_semaphore_class_init(count
, dg
);
170 os_atomic_store2o(dg
, do_ref_cnt
, 1, relaxed
); // <rdar://problem/22318411>
176 dispatch_group_create(void)
178 return _dispatch_group_create_with_count(0);
182 _dispatch_group_create_and_enter(void)
184 return _dispatch_group_create_with_count(1);
188 dispatch_group_enter(dispatch_group_t dg
)
190 long value
= os_atomic_inc_orig2o(dg
, dg_value
, acquire
);
191 if (slowpath((unsigned long)value
>= (unsigned long)LONG_MAX
)) {
192 DISPATCH_CLIENT_CRASH(value
,
193 "Too many nested calls to dispatch_group_enter()");
196 _dispatch_retain(dg
); // <rdar://problem/22318411>
202 _dispatch_group_wake(dispatch_group_t dg
, bool needs_release
)
204 dispatch_continuation_t next
, head
, tail
= NULL
;
207 // cannot use os_mpsc_capture_snapshot() because we can have concurrent
208 // _dispatch_group_wake() calls
209 head
= os_atomic_xchg2o(dg
, dg_notify_head
, NULL
, relaxed
);
211 // snapshot before anything is notified/woken <rdar://problem/8554546>
212 tail
= os_atomic_xchg2o(dg
, dg_notify_tail
, NULL
, release
);
214 rval
= (long)os_atomic_xchg2o(dg
, dg_waiters
, 0, relaxed
);
216 // wake group waiters
217 _dispatch_sema4_create(&dg
->dg_sema
, _DSEMA4_POLICY_FIFO
);
218 _dispatch_sema4_signal(&dg
->dg_sema
, rval
);
220 uint16_t refs
= needs_release
? 1 : 0; // <rdar://problem/22318411>
222 // async group notify blocks
224 next
= os_mpsc_pop_snapshot_head(head
, tail
, do_next
);
225 dispatch_queue_t dsn_queue
= (dispatch_queue_t
)head
->dc_data
;
226 _dispatch_continuation_async(dsn_queue
, head
);
227 _dispatch_release(dsn_queue
);
228 } while ((head
= next
));
231 if (refs
) _dispatch_release_n(dg
, refs
);
236 dispatch_group_leave(dispatch_group_t dg
)
238 long value
= os_atomic_dec2o(dg
, dg_value
, release
);
239 if (slowpath(value
== 0)) {
240 return (void)_dispatch_group_wake(dg
, true);
242 if (slowpath(value
< 0)) {
243 DISPATCH_CLIENT_CRASH(value
,
244 "Unbalanced call to dispatch_group_leave()");
249 _dispatch_group_dispose(dispatch_object_t dou
, DISPATCH_UNUSED
bool *allow_free
)
251 dispatch_group_t dg
= dou
._dg
;
254 DISPATCH_CLIENT_CRASH(dg
->dg_value
,
255 "Group object deallocated while in use");
258 _dispatch_sema4_dispose(&dg
->dg_sema
, _DSEMA4_POLICY_FIFO
);
262 _dispatch_group_debug(dispatch_object_t dou
, char *buf
, size_t bufsiz
)
264 dispatch_group_t dg
= dou
._dg
;
267 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "%s[%p] = { ",
269 offset
+= _dispatch_object_debug_attr(dg
, &buf
[offset
], bufsiz
- offset
);
271 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
, "port = 0x%u, ",
274 offset
+= dsnprintf(&buf
[offset
], bufsiz
- offset
,
275 "count = %ld, waiters = %d }", dg
->dg_value
, dg
->dg_waiters
);
281 _dispatch_group_wait_slow(dispatch_group_t dg
, dispatch_time_t timeout
)
286 // check before we cause another signal to be sent by incrementing
288 value
= os_atomic_load2o(dg
, dg_value
, ordered
); // 19296565
290 return _dispatch_group_wake(dg
, false);
293 (void)os_atomic_inc2o(dg
, dg_waiters
, relaxed
);
294 // check the values again in case we need to wake any threads
295 value
= os_atomic_load2o(dg
, dg_value
, ordered
); // 19296565
297 _dispatch_group_wake(dg
, false);
298 // Fall through to consume the extra signal, forcing timeout to avoid
299 // useless setups as it won't block
300 timeout
= DISPATCH_TIME_FOREVER
;
303 _dispatch_sema4_create(&dg
->dg_sema
, _DSEMA4_POLICY_FIFO
);
306 if (!_dispatch_sema4_timedwait(&dg
->dg_sema
, timeout
)) {
309 // Fall through and try to undo the earlier change to
311 case DISPATCH_TIME_NOW
:
312 orig_waiters
= dg
->dg_waiters
;
313 while (orig_waiters
) {
314 if (os_atomic_cmpxchgvw2o(dg
, dg_waiters
, orig_waiters
,
315 orig_waiters
- 1, &orig_waiters
, relaxed
)) {
316 return _DSEMA4_TIMEOUT();
319 // Another thread is running _dispatch_group_wake()
320 // Fall through and drain the wakeup.
321 case DISPATCH_TIME_FOREVER
:
322 _dispatch_sema4_wait(&dg
->dg_sema
);
329 dispatch_group_wait(dispatch_group_t dg
, dispatch_time_t timeout
)
331 if (dg
->dg_value
== 0) {
335 return _DSEMA4_TIMEOUT();
337 return _dispatch_group_wait_slow(dg
, timeout
);
340 DISPATCH_ALWAYS_INLINE
342 _dispatch_group_notify(dispatch_group_t dg
, dispatch_queue_t dq
,
343 dispatch_continuation_t dsn
)
347 _dispatch_retain(dq
);
348 if (os_mpsc_push_update_tail(dg
, dg_notify
, dsn
, do_next
)) {
349 _dispatch_retain(dg
);
350 os_atomic_store2o(dg
, dg_notify_head
, dsn
, ordered
);
351 // seq_cst with atomic store to notify_head <rdar://problem/11750916>
352 if (os_atomic_load2o(dg
, dg_value
, ordered
) == 0) {
353 _dispatch_group_wake(dg
, false);
360 dispatch_group_notify_f(dispatch_group_t dg
, dispatch_queue_t dq
, void *ctxt
,
361 dispatch_function_t func
)
363 dispatch_continuation_t dsn
= _dispatch_continuation_alloc();
364 _dispatch_continuation_init_f(dsn
, dq
, ctxt
, func
, 0, 0,
365 DISPATCH_OBJ_CONSUME_BIT
);
366 _dispatch_group_notify(dg
, dq
, dsn
);
371 dispatch_group_notify(dispatch_group_t dg
, dispatch_queue_t dq
,
374 dispatch_continuation_t dsn
= _dispatch_continuation_alloc();
375 _dispatch_continuation_init(dsn
, dq
, db
, 0, 0, DISPATCH_OBJ_CONSUME_BIT
);
376 _dispatch_group_notify(dg
, dq
, dsn
);