]> git.saurik.com Git - apple/libdispatch.git/blob - src/semaphore.c
libdispatch-228.23.tar.gz
[apple/libdispatch.git] / src / semaphore.c
1 /*
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 // semaphores are too fundamental to use the dispatch_assume*() macros
24 #if USE_MACH_SEM
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
26 if (slowpath(x)) { \
27 DISPATCH_CRASH("flawed group/semaphore logic"); \
28 } \
29 } while (0)
30 #elif USE_POSIX_SEM
31 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
32 if (slowpath((x) == -1)) { \
33 DISPATCH_CRASH("flawed group/semaphore logic"); \
34 } \
35 } while (0)
36 #endif
37
38 DISPATCH_WEAK // rdar://problem/8503746
39 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema);
40
41 static long _dispatch_group_wake(dispatch_semaphore_t dsema);
42
43 #pragma mark -
44 #pragma mark dispatch_semaphore_t
45
46 static void
47 _dispatch_semaphore_init(long value, dispatch_object_t dou)
48 {
49 dispatch_semaphore_t dsema = dou._dsema;
50
51 dsema->do_next = DISPATCH_OBJECT_LISTLESS;
52 dsema->do_targetq = dispatch_get_global_queue(
53 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
54 dsema->dsema_value = value;
55 dsema->dsema_orig = value;
56 #if USE_POSIX_SEM
57 int ret = sem_init(&dsema->dsema_sem, 0, 0);
58 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
59 #endif
60 }
61
62 dispatch_semaphore_t
63 dispatch_semaphore_create(long value)
64 {
65 dispatch_semaphore_t dsema;
66
67 // If the internal value is negative, then the absolute of the value is
68 // equal to the number of waiting threads. Therefore it is bogus to
69 // initialize the semaphore with a negative value.
70 if (value < 0) {
71 return NULL;
72 }
73
74 dsema = _dispatch_alloc(DISPATCH_VTABLE(semaphore),
75 sizeof(struct dispatch_semaphore_s));
76 _dispatch_semaphore_init(value, dsema);
77 return dsema;
78 }
79
80 #if USE_MACH_SEM
81 static void
82 _dispatch_semaphore_create_port(semaphore_t *s4)
83 {
84 kern_return_t kr;
85 semaphore_t tmp;
86
87 if (*s4) {
88 return;
89 }
90 _dispatch_safe_fork = false;
91
92 // lazily allocate the semaphore port
93
94 // Someday:
95 // 1) Switch to a doubly-linked FIFO in user-space.
96 // 2) User-space timers for the timeout.
97 // 3) Use the per-thread semaphore port.
98
99 while ((kr = semaphore_create(mach_task_self(), &tmp,
100 SYNC_POLICY_FIFO, 0))) {
101 DISPATCH_VERIFY_MIG(kr);
102 sleep(1);
103 }
104
105 if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) {
106 kr = semaphore_destroy(mach_task_self(), tmp);
107 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
108 }
109 }
110 #endif
111
112 void
113 _dispatch_semaphore_dispose(dispatch_object_t dou)
114 {
115 dispatch_semaphore_t dsema = dou._dsema;
116
117 if (dsema->dsema_value < dsema->dsema_orig) {
118 DISPATCH_CLIENT_CRASH(
119 "Semaphore/group object deallocated while in use");
120 }
121
122 #if USE_MACH_SEM
123 kern_return_t kr;
124 if (dsema->dsema_port) {
125 kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
126 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
127 }
128 if (dsema->dsema_waiter_port) {
129 kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port);
130 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
131 }
132 #elif USE_POSIX_SEM
133 int ret = sem_destroy(&dsema->dsema_sem);
134 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
135 #endif
136 }
137
138 size_t
139 _dispatch_semaphore_debug(dispatch_object_t dou, char *buf, size_t bufsiz)
140 {
141 dispatch_semaphore_t dsema = dou._dsema;
142
143 size_t offset = 0;
144 offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
145 dx_kind(dsema), dsema);
146 offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
147 #if USE_MACH_SEM
148 offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
149 dsema->dsema_port);
150 #endif
151 offset += snprintf(&buf[offset], bufsiz - offset,
152 "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
153 return offset;
154 }
155
156 DISPATCH_NOINLINE
157 long
158 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
159 {
160 // Before dsema_sent_ksignals is incremented we can rely on the reference
161 // held by the waiter. However, once this value is incremented the waiter
162 // may return between the atomic increment and the semaphore_signal(),
163 // therefore an explicit reference must be held in order to safely access
164 // dsema after the atomic increment.
165 _dispatch_retain(dsema);
166
167 (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals);
168
169 #if USE_MACH_SEM
170 _dispatch_semaphore_create_port(&dsema->dsema_port);
171 kern_return_t kr = semaphore_signal(dsema->dsema_port);
172 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
173 #elif USE_POSIX_SEM
174 int ret = sem_post(&dsema->dsema_sem);
175 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
176 #endif
177
178 _dispatch_release(dsema);
179 return 1;
180 }
181
182 long
183 dispatch_semaphore_signal(dispatch_semaphore_t dsema)
184 {
185 dispatch_atomic_release_barrier();
186 long value = dispatch_atomic_inc2o(dsema, dsema_value);
187 if (fastpath(value > 0)) {
188 return 0;
189 }
190 if (slowpath(value == LONG_MIN)) {
191 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
192 }
193 return _dispatch_semaphore_signal_slow(dsema);
194 }
195
196 DISPATCH_NOINLINE
197 static long
198 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
199 dispatch_time_t timeout)
200 {
201 long orig;
202
203 again:
204 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
205 // we keep a parallel count of the number of times a Mach semaphore is
206 // signaled (6880961).
207 while ((orig = dsema->dsema_sent_ksignals)) {
208 if (dispatch_atomic_cmpxchg2o(dsema, dsema_sent_ksignals, orig,
209 orig - 1)) {
210 return 0;
211 }
212 }
213
214 #if USE_MACH_SEM
215 mach_timespec_t _timeout;
216 kern_return_t kr;
217
218 _dispatch_semaphore_create_port(&dsema->dsema_port);
219
220 // From xnu/osfmk/kern/sync_sema.c:
221 // wait_semaphore->count = -1; /* we don't keep an actual count */
222 //
223 // The code above does not match the documentation, and that fact is
224 // not surprising. The documented semantics are clumsy to use in any
225 // practical way. The above hack effectively tricks the rest of the
226 // Mach semaphore logic to behave like the libdispatch algorithm.
227
228 switch (timeout) {
229 default:
230 do {
231 uint64_t nsec = _dispatch_timeout(timeout);
232 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
233 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
234 kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout));
235 } while (kr == KERN_ABORTED);
236
237 if (kr != KERN_OPERATION_TIMED_OUT) {
238 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
239 break;
240 }
241 // Fall through and try to undo what the fast path did to
242 // dsema->dsema_value
243 case DISPATCH_TIME_NOW:
244 while ((orig = dsema->dsema_value) < 0) {
245 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
246 return KERN_OPERATION_TIMED_OUT;
247 }
248 }
249 // Another thread called semaphore_signal().
250 // Fall through and drain the wakeup.
251 case DISPATCH_TIME_FOREVER:
252 do {
253 kr = semaphore_wait(dsema->dsema_port);
254 } while (kr == KERN_ABORTED);
255 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
256 break;
257 }
258 #elif USE_POSIX_SEM
259 struct timespec _timeout;
260 int ret;
261
262 switch (timeout) {
263 default:
264 do {
265 uint64_t nsec = _dispatch_timeout(timeout);
266 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
267 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
268 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
269 } while (ret == -1 && errno == EINTR);
270
271 if (ret == -1 && errno != ETIMEDOUT) {
272 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
273 break;
274 }
275 // Fall through and try to undo what the fast path did to
276 // dsema->dsema_value
277 case DISPATCH_TIME_NOW:
278 while ((orig = dsema->dsema_value) < 0) {
279 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
280 errno = ETIMEDOUT;
281 return -1;
282 }
283 }
284 // Another thread called semaphore_signal().
285 // Fall through and drain the wakeup.
286 case DISPATCH_TIME_FOREVER:
287 do {
288 ret = sem_wait(&dsema->dsema_sem);
289 } while (ret != 0);
290 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
291 break;
292 }
293 #endif
294
295 goto again;
296 }
297
298 long
299 dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
300 {
301 long value = dispatch_atomic_dec2o(dsema, dsema_value);
302 dispatch_atomic_acquire_barrier();
303 if (fastpath(value >= 0)) {
304 return 0;
305 }
306 return _dispatch_semaphore_wait_slow(dsema, timeout);
307 }
308
309 #pragma mark -
310 #pragma mark dispatch_group_t
311
312 dispatch_group_t
313 dispatch_group_create(void)
314 {
315 dispatch_group_t dg = _dispatch_alloc(DISPATCH_VTABLE(group),
316 sizeof(struct dispatch_semaphore_s));
317 _dispatch_semaphore_init(LONG_MAX, dg);
318 return dg;
319 }
320
321 void
322 dispatch_group_enter(dispatch_group_t dg)
323 {
324 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
325
326 (void)dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER);
327 }
328
329 DISPATCH_NOINLINE
330 static long
331 _dispatch_group_wake(dispatch_semaphore_t dsema)
332 {
333 struct dispatch_sema_notify_s *next, *head, *tail = NULL;
334 long rval;
335
336 head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL);
337 if (head) {
338 // snapshot before anything is notified/woken <rdar://problem/8554546>
339 tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL);
340 }
341 rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0);
342 if (rval) {
343 // wake group waiters
344 #if USE_MACH_SEM
345 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
346 do {
347 kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port);
348 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
349 } while (--rval);
350 #elif USE_POSIX_SEM
351 do {
352 int ret = sem_post(&dsema->dsema_sem);
353 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
354 } while (--rval);
355 #endif
356 }
357 if (head) {
358 // async group notify blocks
359 do {
360 dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func);
361 _dispatch_release(head->dsn_queue);
362 next = fastpath(head->dsn_next);
363 if (!next && head != tail) {
364 while (!(next = fastpath(head->dsn_next))) {
365 _dispatch_hardware_pause();
366 }
367 }
368 free(head);
369 } while ((head = next));
370 _dispatch_release(dsema);
371 }
372 return 0;
373 }
374
375 void
376 dispatch_group_leave(dispatch_group_t dg)
377 {
378 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
379 dispatch_atomic_release_barrier();
380 long value = dispatch_atomic_inc2o(dsema, dsema_value);
381 if (slowpath(value == LONG_MIN)) {
382 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
383 }
384 if (slowpath(value == dsema->dsema_orig)) {
385 (void)_dispatch_group_wake(dsema);
386 }
387 }
388
389 DISPATCH_NOINLINE
390 static long
391 _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
392 {
393 long orig;
394
395 again:
396 // check before we cause another signal to be sent by incrementing
397 // dsema->dsema_group_waiters
398 if (dsema->dsema_value == dsema->dsema_orig) {
399 return _dispatch_group_wake(dsema);
400 }
401 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
402 // we keep a parallel count of the number of times a Mach semaphore is
403 // signaled (6880961).
404 (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters);
405 // check the values again in case we need to wake any threads
406 if (dsema->dsema_value == dsema->dsema_orig) {
407 return _dispatch_group_wake(dsema);
408 }
409
410 #if USE_MACH_SEM
411 mach_timespec_t _timeout;
412 kern_return_t kr;
413
414 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
415
416 // From xnu/osfmk/kern/sync_sema.c:
417 // wait_semaphore->count = -1; /* we don't keep an actual count */
418 //
419 // The code above does not match the documentation, and that fact is
420 // not surprising. The documented semantics are clumsy to use in any
421 // practical way. The above hack effectively tricks the rest of the
422 // Mach semaphore logic to behave like the libdispatch algorithm.
423
424 switch (timeout) {
425 default:
426 do {
427 uint64_t nsec = _dispatch_timeout(timeout);
428 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
429 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
430 kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port,
431 _timeout));
432 } while (kr == KERN_ABORTED);
433
434 if (kr != KERN_OPERATION_TIMED_OUT) {
435 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
436 break;
437 }
438 // Fall through and try to undo the earlier change to
439 // dsema->dsema_group_waiters
440 case DISPATCH_TIME_NOW:
441 while ((orig = dsema->dsema_group_waiters)) {
442 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
443 orig - 1)) {
444 return KERN_OPERATION_TIMED_OUT;
445 }
446 }
447 // Another thread called semaphore_signal().
448 // Fall through and drain the wakeup.
449 case DISPATCH_TIME_FOREVER:
450 do {
451 kr = semaphore_wait(dsema->dsema_waiter_port);
452 } while (kr == KERN_ABORTED);
453 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
454 break;
455 }
456 #elif USE_POSIX_SEM
457 struct timespec _timeout;
458 int ret;
459
460 switch (timeout) {
461 default:
462 do {
463 uint64_t nsec = _dispatch_timeout(timeout);
464 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
465 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
466 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
467 } while (ret == -1 && errno == EINTR);
468
469 if (!(ret == -1 && errno == ETIMEDOUT)) {
470 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
471 break;
472 }
473 // Fall through and try to undo the earlier change to
474 // dsema->dsema_group_waiters
475 case DISPATCH_TIME_NOW:
476 while ((orig = dsema->dsema_group_waiters)) {
477 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
478 orig - 1)) {
479 errno = ETIMEDOUT;
480 return -1;
481 }
482 }
483 // Another thread called semaphore_signal().
484 // Fall through and drain the wakeup.
485 case DISPATCH_TIME_FOREVER:
486 do {
487 ret = sem_wait(&dsema->dsema_sem);
488 } while (ret == -1 && errno == EINTR);
489 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
490 break;
491 }
492 #endif
493
494 goto again;
495 }
496
497 long
498 dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout)
499 {
500 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
501
502 if (dsema->dsema_value == dsema->dsema_orig) {
503 return 0;
504 }
505 if (timeout == 0) {
506 #if USE_MACH_SEM
507 return KERN_OPERATION_TIMED_OUT;
508 #elif USE_POSIX_SEM
509 errno = ETIMEDOUT;
510 return (-1);
511 #endif
512 }
513 return _dispatch_group_wait_slow(dsema, timeout);
514 }
515
516 DISPATCH_NOINLINE
517 void
518 dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
519 void (*func)(void *))
520 {
521 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
522 struct dispatch_sema_notify_s *dsn, *prev;
523
524 // FIXME -- this should be updated to use the continuation cache
525 while (!(dsn = calloc(1, sizeof(*dsn)))) {
526 sleep(1);
527 }
528
529 dsn->dsn_queue = dq;
530 dsn->dsn_ctxt = ctxt;
531 dsn->dsn_func = func;
532 _dispatch_retain(dq);
533 dispatch_atomic_store_barrier();
534 prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn);
535 if (fastpath(prev)) {
536 prev->dsn_next = dsn;
537 } else {
538 _dispatch_retain(dg);
539 (void)dispatch_atomic_xchg2o(dsema, dsema_notify_head, dsn);
540 if (dsema->dsema_value == dsema->dsema_orig) {
541 _dispatch_group_wake(dsema);
542 }
543 }
544 }
545
546 #ifdef __BLOCKS__
547 void
548 dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
549 dispatch_block_t db)
550 {
551 dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db),
552 _dispatch_call_block_and_release);
553 }
554 #endif
555
556 #pragma mark -
557 #pragma mark _dispatch_thread_semaphore_t
558
559 DISPATCH_NOINLINE
560 static _dispatch_thread_semaphore_t
561 _dispatch_thread_semaphore_create(void)
562 {
563 _dispatch_safe_fork = false;
564 #if USE_MACH_SEM
565 semaphore_t s4;
566 kern_return_t kr;
567 while (slowpath(kr = semaphore_create(mach_task_self(), &s4,
568 SYNC_POLICY_FIFO, 0))) {
569 DISPATCH_VERIFY_MIG(kr);
570 sleep(1);
571 }
572 return s4;
573 #elif USE_POSIX_SEM
574 sem_t s4;
575 int ret = sem_init(&s4, 0, 0);
576 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
577 return s4;
578 #endif
579 }
580
581 DISPATCH_NOINLINE
582 void
583 _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema)
584 {
585 #if USE_MACH_SEM
586 semaphore_t s4 = (semaphore_t)sema;
587 kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
588 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
589 #elif USE_POSIX_SEM
590 sem_t s4 = (sem_t)sema;
591 int ret = sem_destroy(&s4);
592 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
593 #endif
594 }
595
596 void
597 _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema)
598 {
599 #if USE_MACH_SEM
600 semaphore_t s4 = (semaphore_t)sema;
601 kern_return_t kr = semaphore_signal(s4);
602 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
603 #elif USE_POSIX_SEM
604 sem_t s4 = (sem_t)sema;
605 int ret = sem_post(&s4);
606 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
607 #endif
608 }
609
610 void
611 _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema)
612 {
613 #if USE_MACH_SEM
614 semaphore_t s4 = (semaphore_t)sema;
615 kern_return_t kr;
616 do {
617 kr = semaphore_wait(s4);
618 } while (slowpath(kr == KERN_ABORTED));
619 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
620 #elif USE_POSIX_SEM
621 sem_t s4 = (sem_t)sema;
622 int ret;
623 do {
624 ret = sem_wait(&s4);
625 } while (slowpath(ret != 0));
626 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
627 #endif
628 }
629
630 _dispatch_thread_semaphore_t
631 _dispatch_get_thread_semaphore(void)
632 {
633 _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t)
634 _dispatch_thread_getspecific(dispatch_sema4_key);
635 if (slowpath(!sema)) {
636 return _dispatch_thread_semaphore_create();
637 }
638 _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
639 return sema;
640 }
641
642 void
643 _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema)
644 {
645 _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t)
646 _dispatch_thread_getspecific(dispatch_sema4_key);
647 _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema);
648 if (slowpath(old_sema)) {
649 return _dispatch_thread_semaphore_dispose(old_sema);
650 }
651 }