]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_sema.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / kern / sync_sema.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 *
31 */
32 /*
33 * File: kern/sync_sema.c
34 * Author: Joseph CaraDonna
35 *
36 * Contains RT distributed semaphore synchronization services.
37 */
38
39 #include <mach/mach_types.h>
40 #include <mach/mach_traps.h>
41 #include <mach/kern_return.h>
42 #include <mach/semaphore.h>
43 #include <mach/sync_policy.h>
44 #include <mach/task.h>
45
46 #include <kern/misc_protos.h>
47 #include <kern/sync_sema.h>
48 #include <kern/spl.h>
49 #include <kern/ipc_kobject.h>
50 #include <kern/ipc_sync.h>
51 #include <kern/ipc_tt.h>
52 #include <kern/thread.h>
53 #include <kern/clock.h>
54 #include <ipc/ipc_port.h>
55 #include <ipc/ipc_space.h>
56 #include <kern/host.h>
57 #include <kern/waitq.h>
58 #include <kern/zalloc.h>
59 #include <kern/mach_param.h>
60
61 #include <libkern/OSAtomic.h>
62
63 static unsigned int semaphore_event;
64 #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
65
66 zone_t semaphore_zone;
67 unsigned int semaphore_max;
68
69 /* Forward declarations */
70
71
72 kern_return_t
73 semaphore_wait_trap_internal(
74 mach_port_name_t name,
75 void (*caller_cont)(kern_return_t));
76
77 kern_return_t
78 semaphore_wait_signal_trap_internal(
79 mach_port_name_t wait_name,
80 mach_port_name_t signal_name,
81 void (*caller_cont)(kern_return_t));
82
83 kern_return_t
84 semaphore_timedwait_trap_internal(
85 mach_port_name_t name,
86 unsigned int sec,
87 clock_res_t nsec,
88 void (*caller_cont)(kern_return_t));
89
90 kern_return_t
91 semaphore_timedwait_signal_trap_internal(
92 mach_port_name_t wait_name,
93 mach_port_name_t signal_name,
94 unsigned int sec,
95 clock_res_t nsec,
96 void (*caller_cont)(kern_return_t));
97
98 kern_return_t
99 semaphore_signal_internal_trap(mach_port_name_t sema_name);
100
101 kern_return_t
102 semaphore_signal_internal(
103 semaphore_t semaphore,
104 thread_t thread,
105 int options);
106
107 kern_return_t
108 semaphore_convert_wait_result(
109 int wait_result);
110
111 void
112 semaphore_wait_continue(void);
113
114 static kern_return_t
115 semaphore_wait_internal(
116 semaphore_t wait_semaphore,
117 semaphore_t signal_semaphore,
118 uint64_t deadline,
119 int option,
120 void (*caller_cont)(kern_return_t));
121
122 static __inline__ uint64_t
123 semaphore_deadline(
124 unsigned int sec,
125 clock_res_t nsec)
126 {
127 uint64_t abstime;
128
129 nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime);
130 clock_absolutetime_interval_to_deadline(abstime, &abstime);
131
132 return (abstime);
133 }
134
135 /*
136 * ROUTINE: semaphore_init [private]
137 *
138 * Initialize the semaphore mechanisms.
139 * Right now, we only need to initialize the semaphore zone.
140 */
141 void
142 semaphore_init(void)
143 {
144 semaphore_zone = zinit(sizeof(struct semaphore),
145 semaphore_max * sizeof(struct semaphore),
146 sizeof(struct semaphore),
147 "semaphores");
148 zone_change(semaphore_zone, Z_NOENCRYPT, TRUE);
149 }
150
151 /*
152 * Routine: semaphore_create
153 *
154 * Creates a semaphore.
155 * The port representing the semaphore is returned as a parameter.
156 */
157 kern_return_t
158 semaphore_create(
159 task_t task,
160 semaphore_t *new_semaphore,
161 int policy,
162 int value)
163 {
164 semaphore_t s = SEMAPHORE_NULL;
165 kern_return_t kret;
166
167
168 *new_semaphore = SEMAPHORE_NULL;
169 if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX)
170 return KERN_INVALID_ARGUMENT;
171
172 s = (semaphore_t) zalloc (semaphore_zone);
173
174 if (s == SEMAPHORE_NULL)
175 return KERN_RESOURCE_SHORTAGE;
176
177 kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */
178 if (kret != KERN_SUCCESS) {
179 zfree(semaphore_zone, s);
180 return kret;
181 }
182
183 /*
184 * Initialize the semaphore values.
185 */
186 s->port = IP_NULL;
187 s->ref_count = 1;
188 s->count = value;
189 s->active = TRUE;
190 s->owner = task;
191
192 /*
193 * Associate the new semaphore with the task by adding
194 * the new semaphore to the task's semaphore list.
195 */
196 task_lock(task);
197 enqueue_head(&task->semaphore_list, (queue_entry_t) s);
198 task->semaphores_owned++;
199 task_unlock(task);
200
201 *new_semaphore = s;
202
203 return KERN_SUCCESS;
204 }
205
206 /*
207 * Routine: semaphore_destroy_internal
208 *
209 * Disassociate a semaphore from its owning task, mark it inactive,
210 * and set any waiting threads running with THREAD_RESTART.
211 *
212 * Conditions:
213 * task is locked
214 * semaphore is locked
215 * semaphore is owned by the specified task
216 * Returns:
217 * with semaphore unlocked
218 */
219 static void
220 semaphore_destroy_internal(
221 task_t task,
222 semaphore_t semaphore)
223 {
224 int old_count;
225
226 /* unlink semaphore from owning task */
227 assert(semaphore->owner == task);
228 remqueue((queue_entry_t) semaphore);
229 semaphore->owner = TASK_NULL;
230 task->semaphores_owned--;
231
232 /*
233 * Deactivate semaphore
234 */
235 assert(semaphore->active);
236 semaphore->active = FALSE;
237
238 /*
239 * Wakeup blocked threads
240 */
241 old_count = semaphore->count;
242 semaphore->count = 0;
243
244 if (old_count < 0) {
245 waitq_wakeup64_all_locked(&semaphore->waitq,
246 SEMAPHORE_EVENT,
247 THREAD_RESTART, NULL,
248 WAITQ_ALL_PRIORITIES,
249 WAITQ_UNLOCK);
250 /* waitq/semaphore is unlocked */
251 } else {
252 semaphore_unlock(semaphore);
253 }
254 }
255
256 /*
257 * Routine: semaphore_destroy
258 *
259 * Destroys a semaphore and consume the caller's reference on the
260 * semaphore.
261 */
262 kern_return_t
263 semaphore_destroy(
264 task_t task,
265 semaphore_t semaphore)
266 {
267 spl_t spl_level;
268
269 if (semaphore == SEMAPHORE_NULL)
270 return KERN_INVALID_ARGUMENT;
271
272 if (task == TASK_NULL) {
273 semaphore_dereference(semaphore);
274 return KERN_INVALID_ARGUMENT;
275 }
276
277 task_lock(task);
278 spl_level = splsched();
279 semaphore_lock(semaphore);
280
281 if (semaphore->owner != task) {
282 semaphore_unlock(semaphore);
283 splx(spl_level);
284 task_unlock(task);
285 return KERN_INVALID_ARGUMENT;
286 }
287
288 semaphore_destroy_internal(task, semaphore);
289 /* semaphore unlocked */
290
291 splx(spl_level);
292 task_unlock(task);
293
294 semaphore_dereference(semaphore);
295 return KERN_SUCCESS;
296 }
297
298 /*
299 * Routine: semaphore_destroy_all
300 *
301 * Destroy all the semaphores associated with a given task.
302 */
303 #define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */
304
305 void
306 semaphore_destroy_all(
307 task_t task)
308 {
309 uint32_t count;
310 spl_t spl_level;
311
312 count = 0;
313 task_lock(task);
314 while (!queue_empty(&task->semaphore_list)) {
315 semaphore_t semaphore;
316
317 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
318
319 if (count == 0)
320 spl_level = splsched();
321 semaphore_lock(semaphore);
322
323 semaphore_destroy_internal(task, semaphore);
324 /* semaphore unlocked */
325
326 /* throttle number of semaphores per interrupt disablement */
327 if (++count == SEMASPERSPL) {
328 count = 0;
329 splx(spl_level);
330 }
331 }
332 if (count != 0)
333 splx(spl_level);
334
335 task_unlock(task);
336 }
337
338 /*
339 * Routine: semaphore_signal_internal
340 *
341 * Signals the semaphore as direct.
342 * Assumptions:
343 * Semaphore is locked.
344 */
345 kern_return_t
346 semaphore_signal_internal(
347 semaphore_t semaphore,
348 thread_t thread,
349 int options)
350 {
351 kern_return_t kr;
352 spl_t spl_level;
353
354 spl_level = splsched();
355 semaphore_lock(semaphore);
356
357 if (!semaphore->active) {
358 semaphore_unlock(semaphore);
359 splx(spl_level);
360 return KERN_TERMINATED;
361 }
362
363 if (thread != THREAD_NULL) {
364 if (semaphore->count < 0) {
365 kr = waitq_wakeup64_thread_locked(
366 &semaphore->waitq,
367 SEMAPHORE_EVENT,
368 thread,
369 THREAD_AWAKENED,
370 WAITQ_UNLOCK);
371 /* waitq/semaphore is unlocked */
372 } else {
373 kr = KERN_NOT_WAITING;
374 semaphore_unlock(semaphore);
375 }
376 splx(spl_level);
377 return kr;
378 }
379
380 if (options & SEMAPHORE_SIGNAL_ALL) {
381 int old_count = semaphore->count;
382
383 kr = KERN_NOT_WAITING;
384 if (old_count < 0) {
385 semaphore->count = 0; /* always reset */
386 kr = waitq_wakeup64_all_locked(
387 &semaphore->waitq,
388 SEMAPHORE_EVENT,
389 THREAD_AWAKENED, NULL,
390 WAITQ_ALL_PRIORITIES,
391 WAITQ_UNLOCK);
392 /* waitq / semaphore is unlocked */
393 } else {
394 if (options & SEMAPHORE_SIGNAL_PREPOST)
395 semaphore->count++;
396 kr = KERN_SUCCESS;
397 semaphore_unlock(semaphore);
398 }
399 splx(spl_level);
400 return kr;
401 }
402
403 if (semaphore->count < 0) {
404 kr = waitq_wakeup64_one_locked(
405 &semaphore->waitq,
406 SEMAPHORE_EVENT,
407 THREAD_AWAKENED, NULL,
408 WAITQ_ALL_PRIORITIES,
409 WAITQ_KEEP_LOCKED);
410 if (kr == KERN_SUCCESS) {
411 semaphore_unlock(semaphore);
412 splx(spl_level);
413 return KERN_SUCCESS;
414 } else {
415 semaphore->count = 0; /* all waiters gone */
416 }
417 }
418
419 if (options & SEMAPHORE_SIGNAL_PREPOST) {
420 semaphore->count++;
421 }
422
423 semaphore_unlock(semaphore);
424 splx(spl_level);
425 return KERN_NOT_WAITING;
426 }
427
428 /*
429 * Routine: semaphore_signal_thread
430 *
431 * If the specified thread is blocked on the semaphore, it is
432 * woken up. If a NULL thread was supplied, then any one
433 * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING
434 * and the semaphore is unchanged.
435 */
436 kern_return_t
437 semaphore_signal_thread(
438 semaphore_t semaphore,
439 thread_t thread)
440 {
441 kern_return_t ret;
442
443 if (semaphore == SEMAPHORE_NULL)
444 return KERN_INVALID_ARGUMENT;
445
446 ret = semaphore_signal_internal(semaphore,
447 thread,
448 SEMAPHORE_OPTION_NONE);
449 return ret;
450 }
451
452 /*
453 * Routine: semaphore_signal_thread_trap
454 *
455 * Trap interface to the semaphore_signal_thread function.
456 */
457 kern_return_t
458 semaphore_signal_thread_trap(
459 struct semaphore_signal_thread_trap_args *args)
460 {
461 mach_port_name_t sema_name = args->signal_name;
462 mach_port_name_t thread_name = args->thread_name;
463 semaphore_t semaphore;
464 thread_t thread;
465 kern_return_t kr;
466
467 /*
468 * MACH_PORT_NULL is not an error. It means that we want to
469 * select any one thread that is already waiting, but not to
470 * pre-post the semaphore.
471 */
472 if (thread_name != MACH_PORT_NULL) {
473 thread = port_name_to_thread(thread_name);
474 if (thread == THREAD_NULL)
475 return KERN_INVALID_ARGUMENT;
476 } else
477 thread = THREAD_NULL;
478
479 kr = port_name_to_semaphore(sema_name, &semaphore);
480 if (kr == KERN_SUCCESS) {
481 kr = semaphore_signal_internal(semaphore,
482 thread,
483 SEMAPHORE_OPTION_NONE);
484 semaphore_dereference(semaphore);
485 }
486 if (thread != THREAD_NULL) {
487 thread_deallocate(thread);
488 }
489 return kr;
490 }
491
492
493
494 /*
495 * Routine: semaphore_signal
496 *
497 * Traditional (in-kernel client and MIG interface) semaphore
498 * signal routine. Most users will access the trap version.
499 *
500 * This interface in not defined to return info about whether
501 * this call found a thread waiting or not. The internal
502 * routines (and future external routines) do. We have to
503 * convert those into plain KERN_SUCCESS returns.
504 */
505 kern_return_t
506 semaphore_signal(
507 semaphore_t semaphore)
508 {
509 kern_return_t kr;
510
511 if (semaphore == SEMAPHORE_NULL)
512 return KERN_INVALID_ARGUMENT;
513
514 kr = semaphore_signal_internal(semaphore,
515 THREAD_NULL,
516 SEMAPHORE_SIGNAL_PREPOST);
517 if (kr == KERN_NOT_WAITING)
518 return KERN_SUCCESS;
519 return kr;
520 }
521
522 /*
523 * Routine: semaphore_signal_trap
524 *
525 * Trap interface to the semaphore_signal function.
526 */
527 kern_return_t
528 semaphore_signal_trap(
529 struct semaphore_signal_trap_args *args)
530 {
531 mach_port_name_t sema_name = args->signal_name;
532
533 return (semaphore_signal_internal_trap(sema_name));
534 }
535
536 kern_return_t
537 semaphore_signal_internal_trap(mach_port_name_t sema_name)
538 {
539 semaphore_t semaphore;
540 kern_return_t kr;
541
542 kr = port_name_to_semaphore(sema_name, &semaphore);
543 if (kr == KERN_SUCCESS) {
544 kr = semaphore_signal_internal(semaphore,
545 THREAD_NULL,
546 SEMAPHORE_SIGNAL_PREPOST);
547 semaphore_dereference(semaphore);
548 if (kr == KERN_NOT_WAITING)
549 kr = KERN_SUCCESS;
550 }
551 return kr;
552 }
553
554 /*
555 * Routine: semaphore_signal_all
556 *
557 * Awakens ALL threads currently blocked on the semaphore.
558 * The semaphore count returns to zero.
559 */
560 kern_return_t
561 semaphore_signal_all(
562 semaphore_t semaphore)
563 {
564 kern_return_t kr;
565
566 if (semaphore == SEMAPHORE_NULL)
567 return KERN_INVALID_ARGUMENT;
568
569 kr = semaphore_signal_internal(semaphore,
570 THREAD_NULL,
571 SEMAPHORE_SIGNAL_ALL);
572 if (kr == KERN_NOT_WAITING)
573 return KERN_SUCCESS;
574 return kr;
575 }
576
577 /*
578 * Routine: semaphore_signal_all_trap
579 *
580 * Trap interface to the semaphore_signal_all function.
581 */
582 kern_return_t
583 semaphore_signal_all_trap(
584 struct semaphore_signal_all_trap_args *args)
585 {
586 mach_port_name_t sema_name = args->signal_name;
587 semaphore_t semaphore;
588 kern_return_t kr;
589
590 kr = port_name_to_semaphore(sema_name, &semaphore);
591 if (kr == KERN_SUCCESS) {
592 kr = semaphore_signal_internal(semaphore,
593 THREAD_NULL,
594 SEMAPHORE_SIGNAL_ALL);
595 semaphore_dereference(semaphore);
596 if (kr == KERN_NOT_WAITING)
597 kr = KERN_SUCCESS;
598 }
599 return kr;
600 }
601
602 /*
603 * Routine: semaphore_convert_wait_result
604 *
605 * Generate the return code after a semaphore wait/block. It
606 * takes the wait result as an input and coverts that to an
607 * appropriate result.
608 */
609 kern_return_t
610 semaphore_convert_wait_result(int wait_result)
611 {
612 switch (wait_result) {
613 case THREAD_AWAKENED:
614 return KERN_SUCCESS;
615
616 case THREAD_TIMED_OUT:
617 return KERN_OPERATION_TIMED_OUT;
618
619 case THREAD_INTERRUPTED:
620 return KERN_ABORTED;
621
622 case THREAD_RESTART:
623 return KERN_TERMINATED;
624
625 default:
626 panic("semaphore_block\n");
627 return KERN_FAILURE;
628 }
629 }
630
631 /*
632 * Routine: semaphore_wait_continue
633 *
634 * Common continuation routine after waiting on a semphore.
635 * It returns directly to user space.
636 */
637 void
638 semaphore_wait_continue(void)
639 {
640 thread_t self = current_thread();
641 int wait_result = self->wait_result;
642 void (*caller_cont)(kern_return_t) = self->sth_continuation;
643
644 assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
645 semaphore_dereference(self->sth_waitsemaphore);
646 if (self->sth_signalsemaphore != SEMAPHORE_NULL)
647 semaphore_dereference(self->sth_signalsemaphore);
648
649 assert(caller_cont != (void (*)(kern_return_t))0);
650 (*caller_cont)(semaphore_convert_wait_result(wait_result));
651 }
652
653 /*
654 * Routine: semaphore_wait_internal
655 *
656 * Decrements the semaphore count by one. If the count is
657 * negative after the decrement, the calling thread blocks
658 * (possibly at a continuation and/or with a timeout).
659 *
660 * Assumptions:
661 * The reference
662 * A reference is held on the signal semaphore.
663 */
664 static kern_return_t
665 semaphore_wait_internal(
666 semaphore_t wait_semaphore,
667 semaphore_t signal_semaphore,
668 uint64_t deadline,
669 int option,
670 void (*caller_cont)(kern_return_t))
671 {
672 int wait_result;
673 spl_t spl_level;
674 kern_return_t kr = KERN_ALREADY_WAITING;
675
676 spl_level = splsched();
677 semaphore_lock(wait_semaphore);
678
679 if (!wait_semaphore->active) {
680 kr = KERN_TERMINATED;
681 } else if (wait_semaphore->count > 0) {
682 wait_semaphore->count--;
683 kr = KERN_SUCCESS;
684 } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) {
685 kr = KERN_OPERATION_TIMED_OUT;
686 } else {
687 thread_t self = current_thread();
688
689 wait_semaphore->count = -1; /* we don't keep an actual count */
690 thread_lock(self);
691 (void)waitq_assert_wait64_locked(
692 &wait_semaphore->waitq,
693 SEMAPHORE_EVENT,
694 THREAD_ABORTSAFE,
695 TIMEOUT_URGENCY_USER_NORMAL,
696 deadline, TIMEOUT_NO_LEEWAY,
697 self);
698 thread_unlock(self);
699 }
700 semaphore_unlock(wait_semaphore);
701 splx(spl_level);
702
703 /*
704 * wait_semaphore is unlocked so we are free to go ahead and
705 * signal the signal_semaphore (if one was provided).
706 */
707 if (signal_semaphore != SEMAPHORE_NULL) {
708 kern_return_t signal_kr;
709
710 /*
711 * lock the signal semaphore reference we got and signal it.
712 * This will NOT block (we cannot block after having asserted
713 * our intention to wait above).
714 */
715 signal_kr = semaphore_signal_internal(signal_semaphore,
716 THREAD_NULL,
717 SEMAPHORE_SIGNAL_PREPOST);
718
719 if (signal_kr == KERN_NOT_WAITING)
720 signal_kr = KERN_SUCCESS;
721 else if (signal_kr == KERN_TERMINATED) {
722 /*
723 * Uh!Oh! The semaphore we were to signal died.
724 * We have to get ourselves out of the wait in
725 * case we get stuck here forever (it is assumed
726 * that the semaphore we were posting is gating
727 * the decision by someone else to post the
728 * semaphore we are waiting on). People will
729 * discover the other dead semaphore soon enough.
730 * If we got out of the wait cleanly (someone
731 * already posted a wakeup to us) then return that
732 * (most important) result. Otherwise,
733 * return the KERN_TERMINATED status.
734 */
735 thread_t self = current_thread();
736
737 clear_wait(self, THREAD_INTERRUPTED);
738 kr = semaphore_convert_wait_result(self->wait_result);
739 if (kr == KERN_ABORTED)
740 kr = KERN_TERMINATED;
741 }
742 }
743
744 /*
745 * If we had an error, or we didn't really need to wait we can
746 * return now that we have signalled the signal semaphore.
747 */
748 if (kr != KERN_ALREADY_WAITING)
749 return kr;
750
751 /*
752 * Now, we can block. If the caller supplied a continuation
753 * pointer of his own for after the block, block with the
754 * appropriate semaphore continuation. Thiswill gather the
755 * semaphore results, release references on the semaphore(s),
756 * and then call the caller's continuation.
757 */
758 if (caller_cont) {
759 thread_t self = current_thread();
760
761 self->sth_continuation = caller_cont;
762 self->sth_waitsemaphore = wait_semaphore;
763 self->sth_signalsemaphore = signal_semaphore;
764 wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
765 }
766 else {
767 wait_result = thread_block(THREAD_CONTINUE_NULL);
768 }
769
770 return (semaphore_convert_wait_result(wait_result));
771 }
772
773
774 /*
775 * Routine: semaphore_wait
776 *
777 * Traditional (non-continuation) interface presented to
778 * in-kernel clients to wait on a semaphore.
779 */
780 kern_return_t
781 semaphore_wait(
782 semaphore_t semaphore)
783 {
784
785 if (semaphore == SEMAPHORE_NULL)
786 return KERN_INVALID_ARGUMENT;
787
788 return(semaphore_wait_internal(semaphore,
789 SEMAPHORE_NULL,
790 0ULL, SEMAPHORE_OPTION_NONE,
791 (void (*)(kern_return_t))0));
792 }
793
794 kern_return_t
795 semaphore_wait_noblock(
796 semaphore_t semaphore)
797 {
798
799 if (semaphore == SEMAPHORE_NULL)
800 return KERN_INVALID_ARGUMENT;
801
802 return(semaphore_wait_internal(semaphore,
803 SEMAPHORE_NULL,
804 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK,
805 (void (*)(kern_return_t))0));
806 }
807
808 kern_return_t
809 semaphore_wait_deadline(
810 semaphore_t semaphore,
811 uint64_t deadline)
812 {
813
814 if (semaphore == SEMAPHORE_NULL)
815 return KERN_INVALID_ARGUMENT;
816
817 return(semaphore_wait_internal(semaphore,
818 SEMAPHORE_NULL,
819 deadline, SEMAPHORE_OPTION_NONE,
820 (void (*)(kern_return_t))0));
821 }
822
823 /*
824 * Trap: semaphore_wait_trap
825 *
826 * Trap version of semaphore wait. Called on behalf of user-level
827 * clients.
828 */
829
830 kern_return_t
831 semaphore_wait_trap(
832 struct semaphore_wait_trap_args *args)
833 {
834 return(semaphore_wait_trap_internal(args->wait_name, thread_syscall_return));
835 }
836
837
838
839 kern_return_t
840 semaphore_wait_trap_internal(
841 mach_port_name_t name,
842 void (*caller_cont)(kern_return_t))
843 {
844 semaphore_t semaphore;
845 kern_return_t kr;
846
847 kr = port_name_to_semaphore(name, &semaphore);
848 if (kr == KERN_SUCCESS) {
849 kr = semaphore_wait_internal(semaphore,
850 SEMAPHORE_NULL,
851 0ULL, SEMAPHORE_OPTION_NONE,
852 caller_cont);
853 semaphore_dereference(semaphore);
854 }
855 return kr;
856 }
857
858 /*
859 * Routine: semaphore_timedwait
860 *
861 * Traditional (non-continuation) interface presented to
862 * in-kernel clients to wait on a semaphore with a timeout.
863 *
864 * A timeout of {0,0} is considered non-blocking.
865 */
866 kern_return_t
867 semaphore_timedwait(
868 semaphore_t semaphore,
869 mach_timespec_t wait_time)
870 {
871 int option = SEMAPHORE_OPTION_NONE;
872 uint64_t deadline = 0;
873
874 if (semaphore == SEMAPHORE_NULL)
875 return KERN_INVALID_ARGUMENT;
876
877 if(BAD_MACH_TIMESPEC(&wait_time))
878 return KERN_INVALID_VALUE;
879
880 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
881 option = SEMAPHORE_TIMEOUT_NOBLOCK;
882 else
883 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
884
885 return (semaphore_wait_internal(semaphore,
886 SEMAPHORE_NULL,
887 deadline, option,
888 (void(*)(kern_return_t))0));
889
890 }
891
892 /*
893 * Trap: semaphore_timedwait_trap
894 *
895 * Trap version of a semaphore_timedwait. The timeout parameter
896 * is passed in two distinct parts and re-assembled on this side
897 * of the trap interface (to accomodate calling conventions that
898 * pass structures as pointers instead of inline in registers without
899 * having to add a copyin).
900 *
901 * A timeout of {0,0} is considered non-blocking.
902 */
903 kern_return_t
904 semaphore_timedwait_trap(
905 struct semaphore_timedwait_trap_args *args)
906 {
907
908 return(semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return));
909 }
910
911
912 kern_return_t
913 semaphore_timedwait_trap_internal(
914 mach_port_name_t name,
915 unsigned int sec,
916 clock_res_t nsec,
917 void (*caller_cont)(kern_return_t))
918 {
919 semaphore_t semaphore;
920 mach_timespec_t wait_time;
921 kern_return_t kr;
922
923 wait_time.tv_sec = sec;
924 wait_time.tv_nsec = nsec;
925 if(BAD_MACH_TIMESPEC(&wait_time))
926 return KERN_INVALID_VALUE;
927
928 kr = port_name_to_semaphore(name, &semaphore);
929 if (kr == KERN_SUCCESS) {
930 int option = SEMAPHORE_OPTION_NONE;
931 uint64_t deadline = 0;
932
933 if (sec == 0 && nsec == 0)
934 option = SEMAPHORE_TIMEOUT_NOBLOCK;
935 else
936 deadline = semaphore_deadline(sec, nsec);
937
938 kr = semaphore_wait_internal(semaphore,
939 SEMAPHORE_NULL,
940 deadline, option,
941 caller_cont);
942 semaphore_dereference(semaphore);
943 }
944 return kr;
945 }
946
947 /*
948 * Routine: semaphore_wait_signal
949 *
950 * Atomically register a wait on a semaphore and THEN signal
951 * another. This is the in-kernel entry point that does not
952 * block at a continuation and does not free a signal_semaphore
953 * reference.
954 */
955 kern_return_t
956 semaphore_wait_signal(
957 semaphore_t wait_semaphore,
958 semaphore_t signal_semaphore)
959 {
960 if (wait_semaphore == SEMAPHORE_NULL)
961 return KERN_INVALID_ARGUMENT;
962
963 return(semaphore_wait_internal(wait_semaphore,
964 signal_semaphore,
965 0ULL, SEMAPHORE_OPTION_NONE,
966 (void(*)(kern_return_t))0));
967 }
968
969 /*
970 * Trap: semaphore_wait_signal_trap
971 *
972 * Atomically register a wait on a semaphore and THEN signal
973 * another. This is the trap version from user space.
974 */
975 kern_return_t
976 semaphore_wait_signal_trap(
977 struct semaphore_wait_signal_trap_args *args)
978 {
979 return(semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return));
980 }
981
982 kern_return_t
983 semaphore_wait_signal_trap_internal(
984 mach_port_name_t wait_name,
985 mach_port_name_t signal_name,
986 void (*caller_cont)(kern_return_t))
987 {
988 semaphore_t wait_semaphore;
989 semaphore_t signal_semaphore;
990 kern_return_t kr;
991
992 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
993 if (kr == KERN_SUCCESS) {
994 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
995 if (kr == KERN_SUCCESS) {
996 kr = semaphore_wait_internal(wait_semaphore,
997 signal_semaphore,
998 0ULL, SEMAPHORE_OPTION_NONE,
999 caller_cont);
1000 semaphore_dereference(wait_semaphore);
1001 }
1002 semaphore_dereference(signal_semaphore);
1003 }
1004 return kr;
1005 }
1006
1007
1008 /*
1009 * Routine: semaphore_timedwait_signal
1010 *
1011 * Atomically register a wait on a semaphore and THEN signal
1012 * another. This is the in-kernel entry point that does not
1013 * block at a continuation.
1014 *
1015 * A timeout of {0,0} is considered non-blocking.
1016 */
1017 kern_return_t
1018 semaphore_timedwait_signal(
1019 semaphore_t wait_semaphore,
1020 semaphore_t signal_semaphore,
1021 mach_timespec_t wait_time)
1022 {
1023 int option = SEMAPHORE_OPTION_NONE;
1024 uint64_t deadline = 0;
1025
1026 if (wait_semaphore == SEMAPHORE_NULL)
1027 return KERN_INVALID_ARGUMENT;
1028
1029 if(BAD_MACH_TIMESPEC(&wait_time))
1030 return KERN_INVALID_VALUE;
1031
1032 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
1033 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1034 else
1035 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
1036
1037 return(semaphore_wait_internal(wait_semaphore,
1038 signal_semaphore,
1039 deadline, option,
1040 (void(*)(kern_return_t))0));
1041 }
1042
1043 /*
1044 * Trap: semaphore_timedwait_signal_trap
1045 *
1046 * Atomically register a timed wait on a semaphore and THEN signal
1047 * another. This is the trap version from user space.
1048 */
1049 kern_return_t
1050 semaphore_timedwait_signal_trap(
1051 struct semaphore_timedwait_signal_trap_args *args)
1052 {
1053 return(semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return));
1054 }
1055
1056 kern_return_t
1057 semaphore_timedwait_signal_trap_internal(
1058 mach_port_name_t wait_name,
1059 mach_port_name_t signal_name,
1060 unsigned int sec,
1061 clock_res_t nsec,
1062 void (*caller_cont)(kern_return_t))
1063 {
1064 semaphore_t wait_semaphore;
1065 semaphore_t signal_semaphore;
1066 mach_timespec_t wait_time;
1067 kern_return_t kr;
1068
1069 wait_time.tv_sec = sec;
1070 wait_time.tv_nsec = nsec;
1071 if(BAD_MACH_TIMESPEC(&wait_time))
1072 return KERN_INVALID_VALUE;
1073
1074 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1075 if (kr == KERN_SUCCESS) {
1076 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1077 if (kr == KERN_SUCCESS) {
1078 int option = SEMAPHORE_OPTION_NONE;
1079 uint64_t deadline = 0;
1080
1081 if (sec == 0 && nsec == 0)
1082 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1083 else
1084 deadline = semaphore_deadline(sec, nsec);
1085
1086 kr = semaphore_wait_internal(wait_semaphore,
1087 signal_semaphore,
1088 deadline, option,
1089 caller_cont);
1090 semaphore_dereference(wait_semaphore);
1091 }
1092 semaphore_dereference(signal_semaphore);
1093 }
1094 return kr;
1095 }
1096
1097
1098 /*
1099 * Routine: semaphore_reference
1100 *
1101 * Take out a reference on a semaphore. This keeps the data structure
1102 * in existence (but the semaphore may be deactivated).
1103 */
1104 void
1105 semaphore_reference(
1106 semaphore_t semaphore)
1107 {
1108 (void)hw_atomic_add(&semaphore->ref_count, 1);
1109 }
1110
1111 /*
1112 * Routine: semaphore_dereference
1113 *
1114 * Release a reference on a semaphore. If this is the last reference,
1115 * the semaphore data structure is deallocated.
1116 */
1117 void
1118 semaphore_dereference(
1119 semaphore_t semaphore)
1120 {
1121 uint32_t collisions;
1122 spl_t spl_level;
1123
1124 if (semaphore == NULL)
1125 return;
1126
1127 if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
1128 return;
1129
1130 /*
1131 * Last ref, clean up the port [if any]
1132 * associated with the semaphore, destroy
1133 * it (if still active) and then free
1134 * the semaphore.
1135 */
1136 ipc_port_t port = semaphore->port;
1137
1138 if (IP_VALID(port)) {
1139 assert(!port->ip_srights);
1140 ipc_port_dealloc_kernel(port);
1141 }
1142
1143 /*
1144 * Lock the semaphore to lock in the owner task reference.
1145 * Then continue to try to lock the task (inverse order).
1146 */
1147 spl_level = splsched();
1148 semaphore_lock(semaphore);
1149 for (collisions = 0; semaphore->active; collisions++) {
1150 task_t task = semaphore->owner;
1151
1152 assert(task != TASK_NULL);
1153
1154 if (task_lock_try(task)) {
1155 semaphore_destroy_internal(task, semaphore);
1156 /* semaphore unlocked */
1157 splx(spl_level);
1158 task_unlock(task);
1159 goto out;
1160 }
1161
1162 /* failed to get out-of-order locks */
1163 semaphore_unlock(semaphore);
1164 splx(spl_level);
1165 mutex_pause(collisions);
1166 spl_level = splsched();
1167 semaphore_lock(semaphore);
1168 }
1169 semaphore_unlock(semaphore);
1170 splx(spl_level);
1171
1172 out:
1173 zfree(semaphore_zone, semaphore);
1174 }
1175
1176