]> git.saurik.com Git - apple/libdispatch.git/blob - src/semaphore.c
libdispatch-187.10.tar.gz
[apple/libdispatch.git] / src / semaphore.c
1 /*
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 // semaphores are too fundamental to use the dispatch_assume*() macros
24 #if USE_MACH_SEM
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
26 if (slowpath(x)) { \
27 DISPATCH_CRASH("flawed group/semaphore logic"); \
28 } \
29 } while (0)
30 #elif USE_POSIX_SEM
31 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
32 if (slowpath((x) == -1)) { \
33 DISPATCH_CRASH("flawed group/semaphore logic"); \
34 } \
35 } while (0)
36 #endif
37
38 DISPATCH_WEAK // rdar://problem/8503746
39 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema);
40
41 static void _dispatch_semaphore_dispose(dispatch_semaphore_t dsema);
42 static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf,
43 size_t bufsiz);
44 static long _dispatch_group_wake(dispatch_semaphore_t dsema);
45
46 #pragma mark -
47 #pragma mark dispatch_semaphore_t
48
49 struct dispatch_semaphore_vtable_s {
50 DISPATCH_VTABLE_HEADER(dispatch_semaphore_s);
51 };
52
53 const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = {
54 .do_type = DISPATCH_SEMAPHORE_TYPE,
55 .do_kind = "semaphore",
56 .do_dispose = _dispatch_semaphore_dispose,
57 .do_debug = _dispatch_semaphore_debug,
58 };
59
60 dispatch_semaphore_t
61 dispatch_semaphore_create(long value)
62 {
63 dispatch_semaphore_t dsema;
64
65 // If the internal value is negative, then the absolute of the value is
66 // equal to the number of waiting threads. Therefore it is bogus to
67 // initialize the semaphore with a negative value.
68 if (value < 0) {
69 return NULL;
70 }
71
72 dsema = calloc(1, sizeof(struct dispatch_semaphore_s));
73
74 if (fastpath(dsema)) {
75 dsema->do_vtable = &_dispatch_semaphore_vtable;
76 dsema->do_next = DISPATCH_OBJECT_LISTLESS;
77 dsema->do_ref_cnt = 1;
78 dsema->do_xref_cnt = 1;
79 dsema->do_targetq = dispatch_get_global_queue(
80 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
81 dsema->dsema_value = value;
82 dsema->dsema_orig = value;
83 #if USE_POSIX_SEM
84 int ret = sem_init(&dsema->dsema_sem, 0, 0);
85 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
86 #endif
87 }
88
89 return dsema;
90 }
91
92 #if USE_MACH_SEM
93 static void
94 _dispatch_semaphore_create_port(semaphore_t *s4)
95 {
96 kern_return_t kr;
97 semaphore_t tmp;
98
99 if (*s4) {
100 return;
101 }
102
103 // lazily allocate the semaphore port
104
105 // Someday:
106 // 1) Switch to a doubly-linked FIFO in user-space.
107 // 2) User-space timers for the timeout.
108 // 3) Use the per-thread semaphore port.
109
110 while ((kr = semaphore_create(mach_task_self(), &tmp,
111 SYNC_POLICY_FIFO, 0))) {
112 DISPATCH_VERIFY_MIG(kr);
113 sleep(1);
114 }
115
116 if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) {
117 kr = semaphore_destroy(mach_task_self(), tmp);
118 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
119 }
120
121 _dispatch_safe_fork = false;
122 }
123 #endif
124
125 static void
126 _dispatch_semaphore_dispose(dispatch_semaphore_t dsema)
127 {
128 if (dsema->dsema_value < dsema->dsema_orig) {
129 DISPATCH_CLIENT_CRASH(
130 "Semaphore/group object deallocated while in use");
131 }
132
133 #if USE_MACH_SEM
134 kern_return_t kr;
135 if (dsema->dsema_port) {
136 kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
137 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
138 }
139 if (dsema->dsema_waiter_port) {
140 kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port);
141 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
142 }
143 #elif USE_POSIX_SEM
144 int ret = sem_destroy(&dsema->dsema_sem);
145 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
146 #endif
147
148 _dispatch_dispose(dsema);
149 }
150
151 static size_t
152 _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz)
153 {
154 size_t offset = 0;
155 offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
156 dx_kind(dsema), dsema);
157 offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
158 #if USE_MACH_SEM
159 offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
160 dsema->dsema_port);
161 #endif
162 offset += snprintf(&buf[offset], bufsiz - offset,
163 "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
164 return offset;
165 }
166
167 DISPATCH_NOINLINE
168 long
169 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
170 {
171 // Before dsema_sent_ksignals is incremented we can rely on the reference
172 // held by the waiter. However, once this value is incremented the waiter
173 // may return between the atomic increment and the semaphore_signal(),
174 // therefore an explicit reference must be held in order to safely access
175 // dsema after the atomic increment.
176 _dispatch_retain(dsema);
177
178 (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals);
179
180 #if USE_MACH_SEM
181 _dispatch_semaphore_create_port(&dsema->dsema_port);
182 kern_return_t kr = semaphore_signal(dsema->dsema_port);
183 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
184 #elif USE_POSIX_SEM
185 int ret = sem_post(&dsema->dsema_sem);
186 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
187 #endif
188
189 _dispatch_release(dsema);
190 return 1;
191 }
192
193 long
194 dispatch_semaphore_signal(dispatch_semaphore_t dsema)
195 {
196 dispatch_atomic_release_barrier();
197 long value = dispatch_atomic_inc2o(dsema, dsema_value);
198 if (fastpath(value > 0)) {
199 return 0;
200 }
201 if (slowpath(value == LONG_MIN)) {
202 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_semaphore_signal()");
203 }
204 return _dispatch_semaphore_signal_slow(dsema);
205 }
206
207 DISPATCH_NOINLINE
208 static long
209 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
210 dispatch_time_t timeout)
211 {
212 long orig;
213
214 again:
215 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
216 // we keep a parallel count of the number of times a Mach semaphore is
217 // signaled (6880961).
218 while ((orig = dsema->dsema_sent_ksignals)) {
219 if (dispatch_atomic_cmpxchg2o(dsema, dsema_sent_ksignals, orig,
220 orig - 1)) {
221 return 0;
222 }
223 }
224
225 #if USE_MACH_SEM
226 mach_timespec_t _timeout;
227 kern_return_t kr;
228
229 _dispatch_semaphore_create_port(&dsema->dsema_port);
230
231 // From xnu/osfmk/kern/sync_sema.c:
232 // wait_semaphore->count = -1; /* we don't keep an actual count */
233 //
234 // The code above does not match the documentation, and that fact is
235 // not surprising. The documented semantics are clumsy to use in any
236 // practical way. The above hack effectively tricks the rest of the
237 // Mach semaphore logic to behave like the libdispatch algorithm.
238
239 switch (timeout) {
240 default:
241 do {
242 uint64_t nsec = _dispatch_timeout(timeout);
243 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
244 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
245 kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout));
246 } while (kr == KERN_ABORTED);
247
248 if (kr != KERN_OPERATION_TIMED_OUT) {
249 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
250 break;
251 }
252 // Fall through and try to undo what the fast path did to
253 // dsema->dsema_value
254 case DISPATCH_TIME_NOW:
255 while ((orig = dsema->dsema_value) < 0) {
256 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
257 return KERN_OPERATION_TIMED_OUT;
258 }
259 }
260 // Another thread called semaphore_signal().
261 // Fall through and drain the wakeup.
262 case DISPATCH_TIME_FOREVER:
263 do {
264 kr = semaphore_wait(dsema->dsema_port);
265 } while (kr == KERN_ABORTED);
266 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
267 break;
268 }
269 #elif USE_POSIX_SEM
270 struct timespec _timeout;
271 int ret;
272
273 switch (timeout) {
274 default:
275 do {
276 uint64_t nsec = _dispatch_timeout(timeout);
277 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
278 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
279 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
280 } while (ret == -1 && errno == EINTR);
281
282 if (ret == -1 && errno != ETIMEDOUT) {
283 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
284 break;
285 }
286 // Fall through and try to undo what the fast path did to
287 // dsema->dsema_value
288 case DISPATCH_TIME_NOW:
289 while ((orig = dsema->dsema_value) < 0) {
290 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
291 errno = ETIMEDOUT;
292 return -1;
293 }
294 }
295 // Another thread called semaphore_signal().
296 // Fall through and drain the wakeup.
297 case DISPATCH_TIME_FOREVER:
298 do {
299 ret = sem_wait(&dsema->dsema_sem);
300 } while (ret != 0);
301 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
302 break;
303 }
304 #endif
305
306 goto again;
307 }
308
309 long
310 dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
311 {
312 long value = dispatch_atomic_dec2o(dsema, dsema_value);
313 dispatch_atomic_acquire_barrier();
314 if (fastpath(value >= 0)) {
315 return 0;
316 }
317 return _dispatch_semaphore_wait_slow(dsema, timeout);
318 }
319
320 #pragma mark -
321 #pragma mark dispatch_group_t
322
323 dispatch_group_t
324 dispatch_group_create(void)
325 {
326 return (dispatch_group_t)dispatch_semaphore_create(LONG_MAX);
327 }
328
329 void
330 dispatch_group_enter(dispatch_group_t dg)
331 {
332 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
333
334 (void)dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER);
335 }
336
337 DISPATCH_NOINLINE
338 static long
339 _dispatch_group_wake(dispatch_semaphore_t dsema)
340 {
341 struct dispatch_sema_notify_s *next, *head, *tail = NULL;
342 long rval;
343
344 head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL);
345 if (head) {
346 // snapshot before anything is notified/woken <rdar://problem/8554546>
347 tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL);
348 }
349 rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0);
350 if (rval) {
351 // wake group waiters
352 #if USE_MACH_SEM
353 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
354 do {
355 kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port);
356 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
357 } while (--rval);
358 #elif USE_POSIX_SEM
359 do {
360 int ret = sem_post(&dsema->dsema_sem);
361 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
362 } while (--rval);
363 #endif
364 }
365 if (head) {
366 // async group notify blocks
367 do {
368 dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func);
369 _dispatch_release(head->dsn_queue);
370 next = fastpath(head->dsn_next);
371 if (!next && head != tail) {
372 while (!(next = fastpath(head->dsn_next))) {
373 _dispatch_hardware_pause();
374 }
375 }
376 free(head);
377 } while ((head = next));
378 _dispatch_release(dsema);
379 }
380 return 0;
381 }
382
383 void
384 dispatch_group_leave(dispatch_group_t dg)
385 {
386 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
387 dispatch_atomic_release_barrier();
388 long value = dispatch_atomic_inc2o(dsema, dsema_value);
389 if (slowpath(value == LONG_MIN)) {
390 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave()");
391 }
392 if (slowpath(value == dsema->dsema_orig)) {
393 (void)_dispatch_group_wake(dsema);
394 }
395 }
396
397 DISPATCH_NOINLINE
398 static long
399 _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
400 {
401 long orig;
402
403 again:
404 // check before we cause another signal to be sent by incrementing
405 // dsema->dsema_group_waiters
406 if (dsema->dsema_value == dsema->dsema_orig) {
407 return _dispatch_group_wake(dsema);
408 }
409 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
410 // we keep a parallel count of the number of times a Mach semaphore is
411 // signaled (6880961).
412 (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters);
413 // check the values again in case we need to wake any threads
414 if (dsema->dsema_value == dsema->dsema_orig) {
415 return _dispatch_group_wake(dsema);
416 }
417
418 #if USE_MACH_SEM
419 mach_timespec_t _timeout;
420 kern_return_t kr;
421
422 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
423
424 // From xnu/osfmk/kern/sync_sema.c:
425 // wait_semaphore->count = -1; /* we don't keep an actual count */
426 //
427 // The code above does not match the documentation, and that fact is
428 // not surprising. The documented semantics are clumsy to use in any
429 // practical way. The above hack effectively tricks the rest of the
430 // Mach semaphore logic to behave like the libdispatch algorithm.
431
432 switch (timeout) {
433 default:
434 do {
435 uint64_t nsec = _dispatch_timeout(timeout);
436 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
437 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
438 kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port,
439 _timeout));
440 } while (kr == KERN_ABORTED);
441
442 if (kr != KERN_OPERATION_TIMED_OUT) {
443 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
444 break;
445 }
446 // Fall through and try to undo the earlier change to
447 // dsema->dsema_group_waiters
448 case DISPATCH_TIME_NOW:
449 while ((orig = dsema->dsema_group_waiters)) {
450 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
451 orig - 1)) {
452 return KERN_OPERATION_TIMED_OUT;
453 }
454 }
455 // Another thread called semaphore_signal().
456 // Fall through and drain the wakeup.
457 case DISPATCH_TIME_FOREVER:
458 do {
459 kr = semaphore_wait(dsema->dsema_waiter_port);
460 } while (kr == KERN_ABORTED);
461 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
462 break;
463 }
464 #elif USE_POSIX_SEM
465 struct timespec _timeout;
466 int ret;
467
468 switch (timeout) {
469 default:
470 do {
471 uint64_t nsec = _dispatch_timeout(timeout);
472 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
473 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
474 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
475 } while (ret == -1 && errno == EINTR);
476
477 if (!(ret == -1 && errno == ETIMEDOUT)) {
478 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
479 break;
480 }
481 // Fall through and try to undo the earlier change to
482 // dsema->dsema_group_waiters
483 case DISPATCH_TIME_NOW:
484 while ((orig = dsema->dsema_group_waiters)) {
485 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
486 orig - 1)) {
487 errno = ETIMEDOUT;
488 return -1;
489 }
490 }
491 // Another thread called semaphore_signal().
492 // Fall through and drain the wakeup.
493 case DISPATCH_TIME_FOREVER:
494 do {
495 ret = sem_wait(&dsema->dsema_sem);
496 } while (ret == -1 && errno == EINTR);
497 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
498 break;
499 }
500 #endif
501
502 goto again;
503 }
504
505 long
506 dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout)
507 {
508 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
509
510 if (dsema->dsema_value == dsema->dsema_orig) {
511 return 0;
512 }
513 if (timeout == 0) {
514 #if USE_MACH_SEM
515 return KERN_OPERATION_TIMED_OUT;
516 #elif USE_POSIX_SEM
517 errno = ETIMEDOUT;
518 return (-1);
519 #endif
520 }
521 return _dispatch_group_wait_slow(dsema, timeout);
522 }
523
524 DISPATCH_NOINLINE
525 void
526 dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
527 void (*func)(void *))
528 {
529 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
530 struct dispatch_sema_notify_s *dsn, *prev;
531
532 // FIXME -- this should be updated to use the continuation cache
533 while (!(dsn = calloc(1, sizeof(*dsn)))) {
534 sleep(1);
535 }
536
537 dsn->dsn_queue = dq;
538 dsn->dsn_ctxt = ctxt;
539 dsn->dsn_func = func;
540 _dispatch_retain(dq);
541 dispatch_atomic_store_barrier();
542 prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn);
543 if (fastpath(prev)) {
544 prev->dsn_next = dsn;
545 } else {
546 _dispatch_retain(dg);
547 (void)dispatch_atomic_xchg2o(dsema, dsema_notify_head, dsn);
548 if (dsema->dsema_value == dsema->dsema_orig) {
549 _dispatch_group_wake(dsema);
550 }
551 }
552 }
553
554 #ifdef __BLOCKS__
555 void
556 dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
557 dispatch_block_t db)
558 {
559 dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db),
560 _dispatch_call_block_and_release);
561 }
562 #endif
563
564 #pragma mark -
565 #pragma mark _dispatch_thread_semaphore_t
566
567 DISPATCH_NOINLINE
568 static _dispatch_thread_semaphore_t
569 _dispatch_thread_semaphore_create(void)
570 {
571 #if USE_MACH_SEM
572 semaphore_t s4;
573 kern_return_t kr;
574 while (slowpath(kr = semaphore_create(mach_task_self(), &s4,
575 SYNC_POLICY_FIFO, 0))) {
576 DISPATCH_VERIFY_MIG(kr);
577 sleep(1);
578 }
579 return s4;
580 #elif USE_POSIX_SEM
581 sem_t s4;
582 int ret = sem_init(&s4, 0, 0);
583 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
584 return s4;
585 #endif
586 }
587
588 DISPATCH_NOINLINE
589 void
590 _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema)
591 {
592 #if USE_MACH_SEM
593 semaphore_t s4 = (semaphore_t)sema;
594 kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
595 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
596 #elif USE_POSIX_SEM
597 sem_t s4 = (sem_t)sema;
598 int ret = sem_destroy(&s4);
599 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
600 #endif
601 }
602
603 void
604 _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema)
605 {
606 #if USE_MACH_SEM
607 semaphore_t s4 = (semaphore_t)sema;
608 kern_return_t kr = semaphore_signal(s4);
609 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
610 #elif USE_POSIX_SEM
611 sem_t s4 = (sem_t)sema;
612 int ret = sem_post(&s4);
613 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
614 #endif
615 }
616
617 void
618 _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema)
619 {
620 #if USE_MACH_SEM
621 semaphore_t s4 = (semaphore_t)sema;
622 kern_return_t kr;
623 do {
624 kr = semaphore_wait(s4);
625 } while (slowpath(kr == KERN_ABORTED));
626 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
627 #elif USE_POSIX_SEM
628 sem_t s4 = (sem_t)sema;
629 int ret;
630 do {
631 ret = sem_wait(&s4);
632 } while (slowpath(ret != 0));
633 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
634 #endif
635 }
636
637 _dispatch_thread_semaphore_t
638 _dispatch_get_thread_semaphore(void)
639 {
640 _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t)
641 _dispatch_thread_getspecific(dispatch_sema4_key);
642 if (slowpath(!sema)) {
643 return _dispatch_thread_semaphore_create();
644 }
645 _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
646 return sema;
647 }
648
649 void
650 _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema)
651 {
652 _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t)
653 _dispatch_thread_getspecific(dispatch_sema4_key);
654 _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema);
655 if (slowpath(old_sema)) {
656 return _dispatch_thread_semaphore_dispose(old_sema);
657 }
658 }