]> git.saurik.com Git - apple/libdispatch.git/blob - src/semaphore.c
libdispatch-187.5.tar.gz
[apple/libdispatch.git] / src / semaphore.c
1 /*
2 * Copyright (c) 2008-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_APACHE_LICENSE_HEADER_START@
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * @APPLE_APACHE_LICENSE_HEADER_END@
19 */
20
21 #include "internal.h"
22
23 // semaphores are too fundamental to use the dispatch_assume*() macros
24 #if USE_MACH_SEM
25 #define DISPATCH_SEMAPHORE_VERIFY_KR(x) do { \
26 if (slowpath(x)) { \
27 DISPATCH_CRASH("flawed group/semaphore logic"); \
28 } \
29 } while (0)
30 #elif USE_POSIX_SEM
31 #define DISPATCH_SEMAPHORE_VERIFY_RET(x) do { \
32 if (slowpath((x) == -1)) { \
33 DISPATCH_CRASH("flawed group/semaphore logic"); \
34 } \
35 } while (0)
36 #endif
37
38 DISPATCH_WEAK // rdar://problem/8503746
39 long _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema);
40
41 static void _dispatch_semaphore_dispose(dispatch_semaphore_t dsema);
42 static size_t _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf,
43 size_t bufsiz);
44 static long _dispatch_group_wake(dispatch_semaphore_t dsema);
45
46 #pragma mark -
47 #pragma mark dispatch_semaphore_t
48
49 struct dispatch_semaphore_vtable_s {
50 DISPATCH_VTABLE_HEADER(dispatch_semaphore_s);
51 };
52
53 const struct dispatch_semaphore_vtable_s _dispatch_semaphore_vtable = {
54 .do_type = DISPATCH_SEMAPHORE_TYPE,
55 .do_kind = "semaphore",
56 .do_dispose = _dispatch_semaphore_dispose,
57 .do_debug = _dispatch_semaphore_debug,
58 };
59
60 dispatch_semaphore_t
61 dispatch_semaphore_create(long value)
62 {
63 dispatch_semaphore_t dsema;
64
65 // If the internal value is negative, then the absolute of the value is
66 // equal to the number of waiting threads. Therefore it is bogus to
67 // initialize the semaphore with a negative value.
68 if (value < 0) {
69 return NULL;
70 }
71
72 dsema = calloc(1, sizeof(struct dispatch_semaphore_s));
73
74 if (fastpath(dsema)) {
75 dsema->do_vtable = &_dispatch_semaphore_vtable;
76 dsema->do_next = DISPATCH_OBJECT_LISTLESS;
77 dsema->do_ref_cnt = 1;
78 dsema->do_xref_cnt = 1;
79 dsema->do_targetq = dispatch_get_global_queue(
80 DISPATCH_QUEUE_PRIORITY_DEFAULT, 0);
81 dsema->dsema_value = value;
82 dsema->dsema_orig = value;
83 #if USE_POSIX_SEM
84 int ret = sem_init(&dsema->dsema_sem, 0, 0);
85 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
86 #endif
87 }
88
89 return dsema;
90 }
91
92 #if USE_MACH_SEM
93 static void
94 _dispatch_semaphore_create_port(semaphore_t *s4)
95 {
96 kern_return_t kr;
97 semaphore_t tmp;
98
99 if (*s4) {
100 return;
101 }
102
103 // lazily allocate the semaphore port
104
105 // Someday:
106 // 1) Switch to a doubly-linked FIFO in user-space.
107 // 2) User-space timers for the timeout.
108 // 3) Use the per-thread semaphore port.
109
110 while ((kr = semaphore_create(mach_task_self(), &tmp,
111 SYNC_POLICY_FIFO, 0))) {
112 DISPATCH_VERIFY_MIG(kr);
113 sleep(1);
114 }
115
116 if (!dispatch_atomic_cmpxchg(s4, 0, tmp)) {
117 kr = semaphore_destroy(mach_task_self(), tmp);
118 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
119 }
120
121 _dispatch_safe_fork = false;
122 }
123 #endif
124
125 static void
126 _dispatch_semaphore_dispose(dispatch_semaphore_t dsema)
127 {
128 if (dsema->dsema_value < dsema->dsema_orig) {
129 DISPATCH_CLIENT_CRASH(
130 "Semaphore/group object deallocated while in use");
131 }
132
133 #if USE_MACH_SEM
134 kern_return_t kr;
135 if (dsema->dsema_port) {
136 kr = semaphore_destroy(mach_task_self(), dsema->dsema_port);
137 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
138 }
139 if (dsema->dsema_waiter_port) {
140 kr = semaphore_destroy(mach_task_self(), dsema->dsema_waiter_port);
141 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
142 }
143 #elif USE_POSIX_SEM
144 int ret = sem_destroy(&dsema->dsema_sem);
145 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
146 #endif
147
148 _dispatch_dispose(dsema);
149 }
150
151 static size_t
152 _dispatch_semaphore_debug(dispatch_semaphore_t dsema, char *buf, size_t bufsiz)
153 {
154 size_t offset = 0;
155 offset += snprintf(&buf[offset], bufsiz - offset, "%s[%p] = { ",
156 dx_kind(dsema), dsema);
157 offset += _dispatch_object_debug_attr(dsema, &buf[offset], bufsiz - offset);
158 #if USE_MACH_SEM
159 offset += snprintf(&buf[offset], bufsiz - offset, "port = 0x%u, ",
160 dsema->dsema_port);
161 #endif
162 offset += snprintf(&buf[offset], bufsiz - offset,
163 "value = %ld, orig = %ld }", dsema->dsema_value, dsema->dsema_orig);
164 return offset;
165 }
166
167 DISPATCH_NOINLINE
168 long
169 _dispatch_semaphore_signal_slow(dispatch_semaphore_t dsema)
170 {
171 // Before dsema_sent_ksignals is incremented we can rely on the reference
172 // held by the waiter. However, once this value is incremented the waiter
173 // may return between the atomic increment and the semaphore_signal(),
174 // therefore an explicit reference must be held in order to safely access
175 // dsema after the atomic increment.
176 _dispatch_retain(dsema);
177
178 (void)dispatch_atomic_inc2o(dsema, dsema_sent_ksignals);
179
180 #if USE_MACH_SEM
181 _dispatch_semaphore_create_port(&dsema->dsema_port);
182 kern_return_t kr = semaphore_signal(dsema->dsema_port);
183 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
184 #elif USE_POSIX_SEM
185 int ret = sem_post(&dsema->dsema_sem);
186 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
187 #endif
188
189 _dispatch_release(dsema);
190 return 1;
191 }
192
193 long
194 dispatch_semaphore_signal(dispatch_semaphore_t dsema)
195 {
196 dispatch_atomic_release_barrier();
197 long value = dispatch_atomic_inc2o(dsema, dsema_value);
198 if (fastpath(value > 0)) {
199 return 0;
200 }
201 if (slowpath(value == LONG_MIN)) {
202 DISPATCH_CLIENT_CRASH("Unbalanced call to dispatch_group_leave() or "
203 "dispatch_semaphore_signal()");
204 }
205 return _dispatch_semaphore_signal_slow(dsema);
206 }
207
208 DISPATCH_NOINLINE
209 static long
210 _dispatch_semaphore_wait_slow(dispatch_semaphore_t dsema,
211 dispatch_time_t timeout)
212 {
213 long orig;
214
215 again:
216 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
217 // we keep a parallel count of the number of times a Mach semaphore is
218 // signaled (6880961).
219 while ((orig = dsema->dsema_sent_ksignals)) {
220 if (dispatch_atomic_cmpxchg2o(dsema, dsema_sent_ksignals, orig,
221 orig - 1)) {
222 return 0;
223 }
224 }
225
226 #if USE_MACH_SEM
227 mach_timespec_t _timeout;
228 kern_return_t kr;
229
230 _dispatch_semaphore_create_port(&dsema->dsema_port);
231
232 // From xnu/osfmk/kern/sync_sema.c:
233 // wait_semaphore->count = -1; /* we don't keep an actual count */
234 //
235 // The code above does not match the documentation, and that fact is
236 // not surprising. The documented semantics are clumsy to use in any
237 // practical way. The above hack effectively tricks the rest of the
238 // Mach semaphore logic to behave like the libdispatch algorithm.
239
240 switch (timeout) {
241 default:
242 do {
243 uint64_t nsec = _dispatch_timeout(timeout);
244 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
245 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
246 kr = slowpath(semaphore_timedwait(dsema->dsema_port, _timeout));
247 } while (kr == KERN_ABORTED);
248
249 if (kr != KERN_OPERATION_TIMED_OUT) {
250 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
251 break;
252 }
253 // Fall through and try to undo what the fast path did to
254 // dsema->dsema_value
255 case DISPATCH_TIME_NOW:
256 while ((orig = dsema->dsema_value) < 0) {
257 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
258 return KERN_OPERATION_TIMED_OUT;
259 }
260 }
261 // Another thread called semaphore_signal().
262 // Fall through and drain the wakeup.
263 case DISPATCH_TIME_FOREVER:
264 do {
265 kr = semaphore_wait(dsema->dsema_port);
266 } while (kr == KERN_ABORTED);
267 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
268 break;
269 }
270 #elif USE_POSIX_SEM
271 struct timespec _timeout;
272 int ret;
273
274 switch (timeout) {
275 default:
276 do {
277 uint64_t nsec = _dispatch_timeout(timeout);
278 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
279 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
280 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
281 } while (ret == -1 && errno == EINTR);
282
283 if (ret == -1 && errno != ETIMEDOUT) {
284 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
285 break;
286 }
287 // Fall through and try to undo what the fast path did to
288 // dsema->dsema_value
289 case DISPATCH_TIME_NOW:
290 while ((orig = dsema->dsema_value) < 0) {
291 if (dispatch_atomic_cmpxchg2o(dsema, dsema_value, orig, orig + 1)) {
292 errno = ETIMEDOUT;
293 return -1;
294 }
295 }
296 // Another thread called semaphore_signal().
297 // Fall through and drain the wakeup.
298 case DISPATCH_TIME_FOREVER:
299 do {
300 ret = sem_wait(&dsema->dsema_sem);
301 } while (ret != 0);
302 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
303 break;
304 }
305 #endif
306
307 goto again;
308 }
309
310 long
311 dispatch_semaphore_wait(dispatch_semaphore_t dsema, dispatch_time_t timeout)
312 {
313 long value = dispatch_atomic_dec2o(dsema, dsema_value);
314 dispatch_atomic_acquire_barrier();
315 if (fastpath(value >= 0)) {
316 return 0;
317 }
318 return _dispatch_semaphore_wait_slow(dsema, timeout);
319 }
320
321 #pragma mark -
322 #pragma mark dispatch_group_t
323
324 dispatch_group_t
325 dispatch_group_create(void)
326 {
327 return (dispatch_group_t)dispatch_semaphore_create(LONG_MAX);
328 }
329
330 void
331 dispatch_group_enter(dispatch_group_t dg)
332 {
333 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
334
335 (void)dispatch_semaphore_wait(dsema, DISPATCH_TIME_FOREVER);
336 }
337
338 DISPATCH_NOINLINE
339 static long
340 _dispatch_group_wake(dispatch_semaphore_t dsema)
341 {
342 struct dispatch_sema_notify_s *next, *head, *tail = NULL;
343 long rval;
344
345 head = dispatch_atomic_xchg2o(dsema, dsema_notify_head, NULL);
346 if (head) {
347 // snapshot before anything is notified/woken <rdar://problem/8554546>
348 tail = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, NULL);
349 }
350 rval = dispatch_atomic_xchg2o(dsema, dsema_group_waiters, 0);
351 if (rval) {
352 // wake group waiters
353 #if USE_MACH_SEM
354 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
355 do {
356 kern_return_t kr = semaphore_signal(dsema->dsema_waiter_port);
357 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
358 } while (--rval);
359 #elif USE_POSIX_SEM
360 do {
361 int ret = sem_post(&dsema->dsema_sem);
362 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
363 } while (--rval);
364 #endif
365 }
366 if (head) {
367 // async group notify blocks
368 do {
369 dispatch_async_f(head->dsn_queue, head->dsn_ctxt, head->dsn_func);
370 _dispatch_release(head->dsn_queue);
371 next = fastpath(head->dsn_next);
372 if (!next && head != tail) {
373 while (!(next = fastpath(head->dsn_next))) {
374 _dispatch_hardware_pause();
375 }
376 }
377 free(head);
378 } while ((head = next));
379 _dispatch_release(dsema);
380 }
381 return 0;
382 }
383
384 void
385 dispatch_group_leave(dispatch_group_t dg)
386 {
387 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
388
389 dispatch_semaphore_signal(dsema);
390 if (dsema->dsema_value == dsema->dsema_orig) {
391 (void)_dispatch_group_wake(dsema);
392 }
393 }
394
395 DISPATCH_NOINLINE
396 static long
397 _dispatch_group_wait_slow(dispatch_semaphore_t dsema, dispatch_time_t timeout)
398 {
399 long orig;
400
401 again:
402 // check before we cause another signal to be sent by incrementing
403 // dsema->dsema_group_waiters
404 if (dsema->dsema_value == dsema->dsema_orig) {
405 return _dispatch_group_wake(dsema);
406 }
407 // Mach semaphores appear to sometimes spuriously wake up. Therefore,
408 // we keep a parallel count of the number of times a Mach semaphore is
409 // signaled (6880961).
410 (void)dispatch_atomic_inc2o(dsema, dsema_group_waiters);
411 // check the values again in case we need to wake any threads
412 if (dsema->dsema_value == dsema->dsema_orig) {
413 return _dispatch_group_wake(dsema);
414 }
415
416 #if USE_MACH_SEM
417 mach_timespec_t _timeout;
418 kern_return_t kr;
419
420 _dispatch_semaphore_create_port(&dsema->dsema_waiter_port);
421
422 // From xnu/osfmk/kern/sync_sema.c:
423 // wait_semaphore->count = -1; /* we don't keep an actual count */
424 //
425 // The code above does not match the documentation, and that fact is
426 // not surprising. The documented semantics are clumsy to use in any
427 // practical way. The above hack effectively tricks the rest of the
428 // Mach semaphore logic to behave like the libdispatch algorithm.
429
430 switch (timeout) {
431 default:
432 do {
433 uint64_t nsec = _dispatch_timeout(timeout);
434 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
435 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
436 kr = slowpath(semaphore_timedwait(dsema->dsema_waiter_port,
437 _timeout));
438 } while (kr == KERN_ABORTED);
439
440 if (kr != KERN_OPERATION_TIMED_OUT) {
441 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
442 break;
443 }
444 // Fall through and try to undo the earlier change to
445 // dsema->dsema_group_waiters
446 case DISPATCH_TIME_NOW:
447 while ((orig = dsema->dsema_group_waiters)) {
448 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
449 orig - 1)) {
450 return KERN_OPERATION_TIMED_OUT;
451 }
452 }
453 // Another thread called semaphore_signal().
454 // Fall through and drain the wakeup.
455 case DISPATCH_TIME_FOREVER:
456 do {
457 kr = semaphore_wait(dsema->dsema_waiter_port);
458 } while (kr == KERN_ABORTED);
459 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
460 break;
461 }
462 #elif USE_POSIX_SEM
463 struct timespec _timeout;
464 int ret;
465
466 switch (timeout) {
467 default:
468 do {
469 uint64_t nsec = _dispatch_timeout(timeout);
470 _timeout.tv_sec = (typeof(_timeout.tv_sec))(nsec / NSEC_PER_SEC);
471 _timeout.tv_nsec = (typeof(_timeout.tv_nsec))(nsec % NSEC_PER_SEC);
472 ret = slowpath(sem_timedwait(&dsema->dsema_sem, &_timeout));
473 } while (ret == -1 && errno == EINTR);
474
475 if (!(ret == -1 && errno == ETIMEDOUT)) {
476 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
477 break;
478 }
479 // Fall through and try to undo the earlier change to
480 // dsema->dsema_group_waiters
481 case DISPATCH_TIME_NOW:
482 while ((orig = dsema->dsema_group_waiters)) {
483 if (dispatch_atomic_cmpxchg2o(dsema, dsema_group_waiters, orig,
484 orig - 1)) {
485 errno = ETIMEDOUT;
486 return -1;
487 }
488 }
489 // Another thread called semaphore_signal().
490 // Fall through and drain the wakeup.
491 case DISPATCH_TIME_FOREVER:
492 do {
493 ret = sem_wait(&dsema->dsema_sem);
494 } while (ret == -1 && errno == EINTR);
495 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
496 break;
497 }
498 #endif
499
500 goto again;
501 }
502
503 long
504 dispatch_group_wait(dispatch_group_t dg, dispatch_time_t timeout)
505 {
506 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
507
508 if (dsema->dsema_value == dsema->dsema_orig) {
509 return 0;
510 }
511 if (timeout == 0) {
512 #if USE_MACH_SEM
513 return KERN_OPERATION_TIMED_OUT;
514 #elif USE_POSIX_SEM
515 errno = ETIMEDOUT;
516 return (-1);
517 #endif
518 }
519 return _dispatch_group_wait_slow(dsema, timeout);
520 }
521
522 DISPATCH_NOINLINE
523 void
524 dispatch_group_notify_f(dispatch_group_t dg, dispatch_queue_t dq, void *ctxt,
525 void (*func)(void *))
526 {
527 dispatch_semaphore_t dsema = (dispatch_semaphore_t)dg;
528 struct dispatch_sema_notify_s *dsn, *prev;
529
530 // FIXME -- this should be updated to use the continuation cache
531 while (!(dsn = calloc(1, sizeof(*dsn)))) {
532 sleep(1);
533 }
534
535 dsn->dsn_queue = dq;
536 dsn->dsn_ctxt = ctxt;
537 dsn->dsn_func = func;
538 _dispatch_retain(dq);
539 dispatch_atomic_store_barrier();
540 prev = dispatch_atomic_xchg2o(dsema, dsema_notify_tail, dsn);
541 if (fastpath(prev)) {
542 prev->dsn_next = dsn;
543 } else {
544 _dispatch_retain(dg);
545 dsema->dsema_notify_head = dsn;
546 if (dsema->dsema_value == dsema->dsema_orig) {
547 _dispatch_group_wake(dsema);
548 }
549 }
550 }
551
552 #ifdef __BLOCKS__
553 void
554 dispatch_group_notify(dispatch_group_t dg, dispatch_queue_t dq,
555 dispatch_block_t db)
556 {
557 dispatch_group_notify_f(dg, dq, _dispatch_Block_copy(db),
558 _dispatch_call_block_and_release);
559 }
560 #endif
561
562 #pragma mark -
563 #pragma mark _dispatch_thread_semaphore_t
564
565 DISPATCH_NOINLINE
566 static _dispatch_thread_semaphore_t
567 _dispatch_thread_semaphore_create(void)
568 {
569 #if USE_MACH_SEM
570 semaphore_t s4;
571 kern_return_t kr;
572 while (slowpath(kr = semaphore_create(mach_task_self(), &s4,
573 SYNC_POLICY_FIFO, 0))) {
574 DISPATCH_VERIFY_MIG(kr);
575 sleep(1);
576 }
577 return s4;
578 #elif USE_POSIX_SEM
579 sem_t s4;
580 int ret = sem_init(&s4, 0, 0);
581 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
582 return s4;
583 #endif
584 }
585
586 DISPATCH_NOINLINE
587 void
588 _dispatch_thread_semaphore_dispose(_dispatch_thread_semaphore_t sema)
589 {
590 #if USE_MACH_SEM
591 semaphore_t s4 = (semaphore_t)sema;
592 kern_return_t kr = semaphore_destroy(mach_task_self(), s4);
593 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
594 #elif USE_POSIX_SEM
595 sem_t s4 = (sem_t)sema;
596 int ret = sem_destroy(&s4);
597 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
598 #endif
599 }
600
601 void
602 _dispatch_thread_semaphore_signal(_dispatch_thread_semaphore_t sema)
603 {
604 #if USE_MACH_SEM
605 semaphore_t s4 = (semaphore_t)sema;
606 kern_return_t kr = semaphore_signal(s4);
607 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
608 #elif USE_POSIX_SEM
609 sem_t s4 = (sem_t)sema;
610 int ret = sem_post(&s4);
611 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
612 #endif
613 }
614
615 void
616 _dispatch_thread_semaphore_wait(_dispatch_thread_semaphore_t sema)
617 {
618 #if USE_MACH_SEM
619 semaphore_t s4 = (semaphore_t)sema;
620 kern_return_t kr;
621 do {
622 kr = semaphore_wait(s4);
623 } while (slowpath(kr == KERN_ABORTED));
624 DISPATCH_SEMAPHORE_VERIFY_KR(kr);
625 #elif USE_POSIX_SEM
626 sem_t s4 = (sem_t)sema;
627 int ret;
628 do {
629 ret = sem_wait(&s4);
630 } while (slowpath(ret != 0));
631 DISPATCH_SEMAPHORE_VERIFY_RET(ret);
632 #endif
633 }
634
635 _dispatch_thread_semaphore_t
636 _dispatch_get_thread_semaphore(void)
637 {
638 _dispatch_thread_semaphore_t sema = (_dispatch_thread_semaphore_t)
639 _dispatch_thread_getspecific(dispatch_sema4_key);
640 if (slowpath(!sema)) {
641 return _dispatch_thread_semaphore_create();
642 }
643 _dispatch_thread_setspecific(dispatch_sema4_key, NULL);
644 return sema;
645 }
646
647 void
648 _dispatch_put_thread_semaphore(_dispatch_thread_semaphore_t sema)
649 {
650 _dispatch_thread_semaphore_t old_sema = (_dispatch_thread_semaphore_t)
651 _dispatch_thread_getspecific(dispatch_sema4_key);
652 _dispatch_thread_setspecific(dispatch_sema4_key, (void*)sema);
653 if (slowpath(old_sema)) {
654 return _dispatch_thread_semaphore_dispose(old_sema);
655 }
656 }