]> git.saurik.com Git - apple/libdispatch.git/blob - src/semaphore.c
d3fd431174cf6d385ac6831cfaea9da846bde467
[apple/libdispatch.git] / src / semaphore.c
1 /*
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 // semaphores are too fundamental to use the dispatch_assume*() macros
24 #if USE_MACH_SEM
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
26 if (slowpath(x)) { \
27 DISPATCH_CRASH("flawed group/semaphore logic"); \
28 } \
29 } while (0)
30 #elif USE_POSIX_SEM
31 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
32 if (slowpath((x) == -1)) { \
33 DISPATCH_CRASH("flawed group/semaphore logic"); \
34 } \
35 } while (0)
36 #endif
37
38 DISPATCH_WEAK // rdar://problem/8503746
39 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema);
40
41 static long _dispatch_group_wake(dispatch_semaphore_t dsema);
42
43 #pragma mark -
44 #pragma mark dispatch_semaphore_t
45
46 static void
47 _dispatch_semaphore_init(long value, dispatch_object_t dou)
48 {
49 dispatch_semaphore_t dsema = dou._dsema;
50
51 dsema->do_next = DISPATCH_OBJECT_LISTLESS;
52 dsema->do_targetq = dispatch_get_global_queue(
53 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
54 dsema->dsema_value = value;
55 dsema->dsema_orig = value;
56 #if USE_POSIX_SEM
57 int ret = sem_init(&dsema->dsema_sem, 0, 0);
58 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
59 #endif
60 }
61
62 dispatch_semaphore_t
63 dispatch_semaphore_create(long value)
64 {
65 dispatch_semaphore_t dsema;
66
67 // If the internal value is negative, then the absolute of the value is
68 // equal to the number of waiting threads. Therefore it is bogus to
69 // initialize the semaphore with a negative value.
70 if (value < 0) {
71 return NULL;
72 }
73
74 dsema = _dispatch_alloc(DISPATCH_VTABLE(semaphore),
75 sizeof(struct dispatch_semaphore_s));
76 _dispatch_semaphore_init(value, dsema);
77 return dsema;
78 }
79
80 #if USE_MACH_SEM
81 static void
82 _dispatch_semaphore_create_port(semaphore_t *s4)
83 {
84 kern_return_t kr;
85 semaphore_t tmp;
86
87 if (*s4) {
88 return;
89 }
90 _dispatch_safe_fork = false;
91
92 // lazily allocate the semaphore port
93
94 // Someday:
95 // 1) Switch to a doubly-linked FIFO in user-space.
96 // 2) User-space timers for the timeout.
97 // 3) Use the per-thread semaphore port.
98
99 while ((kr = semaphore_create(mach_task_self(), &tmp,
100 SYNC_POLICY_FIFO, 0))) {
101 DISPATCH_VERIFY_MIG(kr);
102 sleep(1);
103 }
104
105 if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) {
106 kr = semaphore_destroy(mach_task_self(), tmp);
107 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
108 }
109 }
110 #endif
111
112 void
113 _dispatch_semaphore_dispose(dispatch_object_t dou)
114 {
115 dispatch_semaphore_t dsema = dou._dsema;
116
117 if (dsema->dsema_value < dsema->dsema_orig) {
118 DISPATCH_CLIENT_CRASH(
119 "Semaphore/group object deallocated while in use");
120 }
121
122 #if USE_MACH_SEM
123 kern_return_t kr;
124 if (dsema->dsema_port) {
125 kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
126 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
127 }
128 if (dsema->dsema_waiter_port) {
129 kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port);
130 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
131 }
132 #elif USE_POSIX_SEM
133 int ret = sem_destroy(&dsema->dsema_sem);
134 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
135 #endif
136 }
137
138 size_t
139 _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz)
140 {
141 dispatch_semaphore_t dsema = dou._dsema;
142
143 size_t offset = 0;
144 offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
145 dx_kind(dsema), dsema);
146 offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
147 #if USE_MACH_SEM
148 offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
149 dsema->dsema_port);
150 #endif
151 offset += snprintf(&buf[offset], bufsiz - offset,
152 "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
153 return offset;
154 }
155
156 DISPATCH_NOINLINE
157 long
158 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
159 {
160 // Before dsema_sent_ksignals is incremented we can rely on the reference
161 // held by the waiter. However, once this value is incremented the waiter
162 // may return between the atomic increment and the semaphore_signal(),
163 // therefore an explicit reference must be held in order to safely access
164 // dsema after the atomic increment.
165 _dispatch_retain(dsema);
166
167 (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals);
168
169 #if USE_MACH_SEM
170 _dispatch_semaphore_create_port(&dsema->dsema_port);
171 kern_return_t kr = semaphore_signal(dsema->dsema_port);
172 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
173 #elif USE_POSIX_SEM
174 int ret = sem_post(&dsema->dsema_sem);
175 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
176 #endif
177
178 _dispatch_release(dsema);
179 return 1;
180 }
181
182 long
183 dispatch_semaphore_signal(dispatch_semaphore_t dsema)
184 {
185 dispatch_atomic_release_barrier();
186 long value = dispatch_atomic_inc2o(dsema, dsema_value);
187 if (fastpath(value > 0)) {
188 return 0;
189 }
190 if (slowpath(value == LONG_MIN)) {
191 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave() or "
192 "dispatch_semaphore_signal()");
193 }
194 return _dispatch_semaphore_signal_slow(dsema);
195 }
196
197 DISPATCH_NOINLINE
198 static long
199 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
200 dispatch_time_t timeout)
201 {
202 long orig;
203
204 again:
205 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
206 // we keep a parallel count of the number of times a Mach semaphore is
207 // signaled (6880961).
208 while ((orig = dsema->dsema_sent_ksignals)) {
209 if (dispatch_atomic_cmpxchg2o(dsema, dsema_sent_ksignals, orig,
210 orig - 1)) {
211 return 0;
212 }
213 }
214
215 #if USE_MACH_SEM
216 mach_timespec_t _timeout;
217 kern_return_t kr;
218
219 _dispatch_semaphore_create_port(&dsema->dsema_port);
220
221 // From xnu/osfmk/kern/sync_sema.c:
222 // wait_semaphore->count = -1; /* we don't keep an actual count */
223 //
224 // The code above does not match the documentation, and that fact is
225 // not surprising. The documented semantics are clumsy to use in any
226 // practical way. The above hack effectively tricks the rest of the
227 // Mach semaphore logic to behave like the libdispatch algorithm.
228
229 switch (timeout) {
230 default:
231 do {
232 uint64_t nsec = _dispatch_timeout(timeout);
233 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
234 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
235 kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout));
236 } while (kr == KERN_ABORTED);
237
238 if (kr != KERN_OPERATION_TIMED_OUT) {
239 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
240 break;
241 }
242 // Fall through and try to undo what the fast path did to
243 // dsema->dsema_value
244 case DISPATCH_TIME_NOW:
245 while ((orig = dsema->dsema_value) < 0) {
246 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
247 return KERN_OPERATION_TIMED_OUT;
248 }
249 }
250 // Another thread called semaphore_signal().
251 // Fall through and drain the wakeup.
252 case DISPATCH_TIME_FOREVER:
253 do {
254 kr = semaphore_wait(dsema->dsema_port);
255 } while (kr == KERN_ABORTED);
256 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
257 break;
258 }
259 #elif USE_POSIX_SEM
260 struct timespec _timeout;
261 int ret;
262
263 switch (timeout) {
264 default:
265 do {
266 uint64_t nsec = _dispatch_timeout(timeout);
267 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
268 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
269 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
270 } while (ret == -1 && errno == EINTR);
271
272 if (ret == -1 && errno != ETIMEDOUT) {
273 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
274 break;
275 }
276 // Fall through and try to undo what the fast path did to
277 // dsema->dsema_value
278 case DISPATCH_TIME_NOW:
279 while ((orig = dsema->dsema_value) < 0) {
280 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
281 errno = ETIMEDOUT;
282 return -1;
283 }
284 }
285 // Another thread called semaphore_signal().
286 // Fall through and drain the wakeup.
287 case DISPATCH_TIME_FOREVER:
288 do {
289 ret = sem_wait(&dsema->dsema_sem);
290 } while (ret != 0);
291 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
292 break;
293 }
294 #endif
295
296 goto again;
297 }
298
299 long
300 dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
301 {
302 long value = dispatch_atomic_dec2o(dsema, dsema_value);
303 dispatch_atomic_acquire_barrier();
304 if (fastpath(value >= 0)) {
305 return 0;
306 }
307 return _dispatch_semaphore_wait_slow(dsema, timeout);
308 }
309
310 #pragma mark -
311 #pragma mark dispatch_group_t
312
313 dispatch_group_t
314 dispatch_group_create(void)
315 {
316 dispatch_group_t dg = _dispatch_alloc(DISPATCH_VTABLE(group),
317 sizeof(struct dispatch_semaphore_s));
318 _dispatch_semaphore_init(LONG_MAX, dg);
319 return dg;
320 }
321
322 void
323 dispatch_group_enter(dispatch_group_t dg)
324 {
325 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
326
327 (void)dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER);
328 }
329
330 DISPATCH_NOINLINE
331 static long
332 _dispatch_group_wake(dispatch_semaphore_t dsema)
333 {
334 struct dispatch_sema_notify_s *next, *head, *tail = NULL;
335 long rval;
336
337 head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL);
338 if (head) {
339 // snapshot before anything is notified/woken <rdar://problem/8554546>
340 tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL);
341 }
342 rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0);
343 if (rval) {
344 // wake group waiters
345 #if USE_MACH_SEM
346 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
347 do {
348 kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port);
349 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
350 } while (--rval);
351 #elif USE_POSIX_SEM
352 do {
353 int ret = sem_post(&dsema->dsema_sem);
354 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
355 } while (--rval);
356 #endif
357 }
358 if (head) {
359 // async group notify blocks
360 do {
361 dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func);
362 _dispatch_release(head->dsn_queue);
363 next = fastpath(head->dsn_next);
364 if (!next && head != tail) {
365 while (!(next = fastpath(head->dsn_next))) {
366 _dispatch_hardware_pause();
367 }
368 }
369 free(head);
370 } while ((head = next));
371 _dispatch_release(dsema);
372 }
373 return 0;
374 }
375
376 void
377 dispatch_group_leave(dispatch_group_t dg)
378 {
379 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
380
381 dispatch_semaphore_signal(dsema);
382 if (dsema->dsema_value == dsema->dsema_orig) {
383 (void)_dispatch_group_wake(dsema);
384 }
385 }
386
387 DISPATCH_NOINLINE
388 static long
389 _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
390 {
391 long orig;
392
393 again:
394 // check before we cause another signal to be sent by incrementing
395 // dsema->dsema_group_waiters
396 if (dsema->dsema_value == dsema->dsema_orig) {
397 return _dispatch_group_wake(dsema);
398 }
399 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
400 // we keep a parallel count of the number of times a Mach semaphore is
401 // signaled (6880961).
402 (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters);
403 // check the values again in case we need to wake any threads
404 if (dsema->dsema_value == dsema->dsema_orig) {
405 return _dispatch_group_wake(dsema);
406 }
407
408 #if USE_MACH_SEM
409 mach_timespec_t _timeout;
410 kern_return_t kr;
411
412 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
413
414 // From xnu/osfmk/kern/sync_sema.c:
415 // wait_semaphore->count = -1; /* we don't keep an actual count */
416 //
417 // The code above does not match the documentation, and that fact is
418 // not surprising. The documented semantics are clumsy to use in any
419 // practical way. The above hack effectively tricks the rest of the
420 // Mach semaphore logic to behave like the libdispatch algorithm.
421
422 switch (timeout) {
423 default:
424 do {
425 uint64_t nsec = _dispatch_timeout(timeout);
426 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
427 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
428 kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port,
429 _timeout));
430 } while (kr == KERN_ABORTED);
431
432 if (kr != KERN_OPERATION_TIMED_OUT) {
433 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
434 break;
435 }
436 // Fall through and try to undo the earlier change to
437 // dsema->dsema_group_waiters
438 case DISPATCH_TIME_NOW:
439 while ((orig = dsema->dsema_group_waiters)) {
440 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
441 orig - 1)) {
442 return KERN_OPERATION_TIMED_OUT;
443 }
444 }
445 // Another thread called semaphore_signal().
446 // Fall through and drain the wakeup.
447 case DISPATCH_TIME_FOREVER:
448 do {
449 kr = semaphore_wait(dsema->dsema_waiter_port);
450 } while (kr == KERN_ABORTED);
451 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
452 break;
453 }
454 #elif USE_POSIX_SEM
455 struct timespec _timeout;
456 int ret;
457
458 switch (timeout) {
459 default:
460 do {
461 uint64_t nsec = _dispatch_timeout(timeout);
462 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
463 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
464 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
465 } while (ret == -1 && errno == EINTR);
466
467 if (!(ret == -1 && errno == ETIMEDOUT)) {
468 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
469 break;
470 }
471 // Fall through and try to undo the earlier change to
472 // dsema->dsema_group_waiters
473 case DISPATCH_TIME_NOW:
474 while ((orig = dsema->dsema_group_waiters)) {
475 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
476 orig - 1)) {
477 errno = ETIMEDOUT;
478 return -1;
479 }
480 }
481 // Another thread called semaphore_signal().
482 // Fall through and drain the wakeup.
483 case DISPATCH_TIME_FOREVER:
484 do {
485 ret = sem_wait(&dsema->dsema_sem);
486 } while (ret == -1 && errno == EINTR);
487 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
488 break;
489 }
490 #endif
491
492 goto again;
493 }
494
495 long
496 dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout)
497 {
498 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
499
500 if (dsema->dsema_value == dsema->dsema_orig) {
501 return 0;
502 }
503 if (timeout == 0) {
504 #if USE_MACH_SEM
505 return KERN_OPERATION_TIMED_OUT;
506 #elif USE_POSIX_SEM
507 errno = ETIMEDOUT;
508 return (-1);
509 #endif
510 }
511 return _dispatch_group_wait_slow(dsema, timeout);
512 }
513
514 DISPATCH_NOINLINE
515 void
516 dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
517 void (*func)(void *))
518 {
519 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
520 struct dispatch_sema_notify_s *dsn, *prev;
521
522 // FIXME -- this should be updated to use the continuation cache
523 while (!(dsn = calloc(1, sizeof(*dsn)))) {
524 sleep(1);
525 }
526
527 dsn->dsn_queue = dq;
528 dsn->dsn_ctxt = ctxt;
529 dsn->dsn_func = func;
530 _dispatch_retain(dq);
531 dispatch_atomic_store_barrier();
532 prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn);
533 if (fastpath(prev)) {
534 prev->dsn_next = dsn;
535 } else {
536 _dispatch_retain(dg);
537 dsema->dsema_notify_head = dsn;
538 if (dsema->dsema_value == dsema->dsema_orig) {
539 _dispatch_group_wake(dsema);
540 }
541 }
542 }
543
544 #ifdef __BLOCKS__
545 void
546 dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
547 dispatch_block_t db)
548 {
549 dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db),
550 _dispatch_call_block_and_release);
551 }
552 #endif
553
554 #pragma mark -
555 #pragma mark _dispatch_thread_semaphore_t
556
557 DISPATCH_NOINLINE
558 static _dispatch_thread_semaphore_t
559 _dispatch_thread_semaphore_create(void)
560 {
561 _dispatch_safe_fork = false;
562 #if USE_MACH_SEM
563 semaphore_t s4;
564 kern_return_t kr;
565 while (slowpath(kr = semaphore_create(mach_task_self(), &s4,
566 SYNC_POLICY_FIFO, 0))) {
567 DISPATCH_VERIFY_MIG(kr);
568 sleep(1);
569 }
570 return s4;
571 #elif USE_POSIX_SEM
572 sem_t s4;
573 int ret = sem_init(&s4, 0, 0);
574 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
575 return s4;
576 #endif
577 }
578
579 DISPATCH_NOINLINE
580 void
581 _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema)
582 {
583 #if USE_MACH_SEM
584 semaphore_t s4 = (semaphore_t)sema;
585 kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
586 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
587 #elif USE_POSIX_SEM
588 sem_t s4 = (sem_t)sema;
589 int ret = sem_destroy(&s4);
590 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
591 #endif
592 }
593
594 void
595 _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema)
596 {
597 #if USE_MACH_SEM
598 semaphore_t s4 = (semaphore_t)sema;
599 kern_return_t kr = semaphore_signal(s4);
600 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
601 #elif USE_POSIX_SEM
602 sem_t s4 = (sem_t)sema;
603 int ret = sem_post(&s4);
604 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
605 #endif
606 }
607
608 void
609 _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema)
610 {
611 #if USE_MACH_SEM
612 semaphore_t s4 = (semaphore_t)sema;
613 kern_return_t kr;
614 do {
615 kr = semaphore_wait(s4);
616 } while (slowpath(kr == KERN_ABORTED));
617 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
618 #elif USE_POSIX_SEM
619 sem_t s4 = (sem_t)sema;
620 int ret;
621 do {
622 ret = sem_wait(&s4);
623 } while (slowpath(ret != 0));
624 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
625 #endif
626 }
627
628 _dispatch_thread_semaphore_t
629 _dispatch_get_thread_semaphore(void)
630 {
631 _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t)
632 _dispatch_thread_getspecific(dispatch_sema4_key);
633 if (slowpath(!sema)) {
634 return _dispatch_thread_semaphore_create();
635 }
636 _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
637 return sema;
638 }
639
640 void
641 _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema)
642 {
643 _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t)
644 _dispatch_thread_getspecific(dispatch_sema4_key);
645 _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema);
646 if (slowpath(old_sema)) {
647 return _dispatch_thread_semaphore_dispose(old_sema);
648 }
649 }