]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_sema.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / kern / sync_sema.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 *
31 */
32 /*
33 * File: kern/sync_sema.c
34 * Author: Joseph CaraDonna
35 *
36 * Contains RT distributed semaphore synchronization services.
37 */
38
39 #include <mach/mach_types.h>
40 #include <mach/mach_traps.h>
41 #include <mach/kern_return.h>
42 #include <mach/semaphore.h>
43 #include <mach/sync_policy.h>
44 #include <mach/task.h>
45
46 #include <kern/misc_protos.h>
47 #include <kern/sync_sema.h>
48 #include <kern/spl.h>
49 #include <kern/ipc_kobject.h>
50 #include <kern/ipc_sync.h>
51 #include <kern/ipc_tt.h>
52 #include <kern/thread.h>
53 #include <kern/clock.h>
54 #include <ipc/ipc_port.h>
55 #include <ipc/ipc_space.h>
56 #include <kern/host.h>
57 #include <kern/waitq.h>
58 #include <kern/zalloc.h>
59 #include <kern/mach_param.h>
60
61 #include <libkern/OSAtomic.h>
62
63 static unsigned int semaphore_event;
64 #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
65
66 zone_t semaphore_zone;
67 unsigned int semaphore_max;
68
69 /* Forward declarations */
70
71
72 kern_return_t
73 semaphore_wait_trap_internal(
74 mach_port_name_t name,
75 void (*caller_cont)(kern_return_t));
76
77 kern_return_t
78 semaphore_wait_signal_trap_internal(
79 mach_port_name_t wait_name,
80 mach_port_name_t signal_name,
81 void (*caller_cont)(kern_return_t));
82
83 kern_return_t
84 semaphore_timedwait_trap_internal(
85 mach_port_name_t name,
86 unsigned int sec,
87 clock_res_t nsec,
88 void (*caller_cont)(kern_return_t));
89
90 kern_return_t
91 semaphore_timedwait_signal_trap_internal(
92 mach_port_name_t wait_name,
93 mach_port_name_t signal_name,
94 unsigned int sec,
95 clock_res_t nsec,
96 void (*caller_cont)(kern_return_t));
97
98 kern_return_t
99 semaphore_signal_internal_trap(mach_port_name_t sema_name);
100
101 kern_return_t
102 semaphore_signal_internal(
103 semaphore_t semaphore,
104 thread_t thread,
105 int options);
106
107 kern_return_t
108 semaphore_convert_wait_result(
109 int wait_result);
110
111 void
112 semaphore_wait_continue(void);
113
114 static kern_return_t
115 semaphore_wait_internal(
116 semaphore_t wait_semaphore,
117 semaphore_t signal_semaphore,
118 uint64_t deadline,
119 int option,
120 void (*caller_cont)(kern_return_t));
121
122 static __inline__ uint64_t
123 semaphore_deadline(
124 unsigned int sec,
125 clock_res_t nsec)
126 {
127 uint64_t abstime;
128
129 nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime);
130 clock_absolutetime_interval_to_deadline(abstime, &abstime);
131
132 return (abstime);
133 }
134
135 /*
136 * ROUTINE: semaphore_init [private]
137 *
138 * Initialize the semaphore mechanisms.
139 * Right now, we only need to initialize the semaphore zone.
140 */
141 void
142 semaphore_init(void)
143 {
144 semaphore_zone = zinit(sizeof(struct semaphore),
145 semaphore_max * sizeof(struct semaphore),
146 sizeof(struct semaphore),
147 "semaphores");
148 zone_change(semaphore_zone, Z_NOENCRYPT, TRUE);
149 }
150
151 /*
152 * Routine: semaphore_create
153 *
154 * Creates a semaphore.
155 * The port representing the semaphore is returned as a parameter.
156 */
157 kern_return_t
158 semaphore_create(
159 task_t task,
160 semaphore_t *new_semaphore,
161 int policy,
162 int value)
163 {
164 semaphore_t s = SEMAPHORE_NULL;
165 kern_return_t kret;
166
167
168 *new_semaphore = SEMAPHORE_NULL;
169 if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX)
170 return KERN_INVALID_ARGUMENT;
171
172 s = (semaphore_t) zalloc (semaphore_zone);
173
174 if (s == SEMAPHORE_NULL)
175 return KERN_RESOURCE_SHORTAGE;
176
177 kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */
178 if (kret != KERN_SUCCESS) {
179 zfree(semaphore_zone, s);
180 return kret;
181 }
182
183 /*
184 * Initialize the semaphore values.
185 */
186 s->port = IP_NULL;
187 s->ref_count = 1;
188 s->count = value;
189 s->active = TRUE;
190 s->owner = task;
191
192 /*
193 * Associate the new semaphore with the task by adding
194 * the new semaphore to the task's semaphore list.
195 */
196 task_lock(task);
197 enqueue_head(&task->semaphore_list, (queue_entry_t) s);
198 task->semaphores_owned++;
199 task_unlock(task);
200
201 *new_semaphore = s;
202
203 return KERN_SUCCESS;
204 }
205
206 /*
207 * Routine: semaphore_destroy_internal
208 *
209 * Disassociate a semaphore from its owning task, mark it inactive,
210 * and set any waiting threads running with THREAD_RESTART.
211 *
212 * Conditions:
213 * task is locked
214 * semaphore is locked
215 * semaphore is owned by the specified task
216 * Returns:
217 * with semaphore unlocked
218 */
219 static void
220 semaphore_destroy_internal(
221 task_t task,
222 semaphore_t semaphore)
223 {
224 int old_count;
225
226 /* unlink semaphore from owning task */
227 assert(semaphore->owner == task);
228 remqueue((queue_entry_t) semaphore);
229 semaphore->owner = TASK_NULL;
230 task->semaphores_owned--;
231
232 /*
233 * Deactivate semaphore
234 */
235 assert(semaphore->active);
236 semaphore->active = FALSE;
237
238 /*
239 * Wakeup blocked threads
240 */
241 old_count = semaphore->count;
242 semaphore->count = 0;
243
244 if (old_count < 0) {
245 waitq_wakeup64_all_locked(&semaphore->waitq,
246 SEMAPHORE_EVENT,
247 THREAD_RESTART, NULL,
248 WAITQ_ALL_PRIORITIES,
249 WAITQ_UNLOCK);
250 /* waitq/semaphore is unlocked */
251 } else {
252 semaphore_unlock(semaphore);
253 }
254 }
255
256 /*
257 * Routine: semaphore_destroy
258 *
259 * Destroys a semaphore and consume the caller's reference on the
260 * semaphore.
261 */
262 kern_return_t
263 semaphore_destroy(
264 task_t task,
265 semaphore_t semaphore)
266 {
267 spl_t spl_level;
268
269 if (semaphore == SEMAPHORE_NULL)
270 return KERN_INVALID_ARGUMENT;
271
272 if (task == TASK_NULL) {
273 semaphore_dereference(semaphore);
274 return KERN_INVALID_ARGUMENT;
275 }
276
277 task_lock(task);
278 spl_level = splsched();
279 semaphore_lock(semaphore);
280
281 if (semaphore->owner != task) {
282 semaphore_unlock(semaphore);
283 splx(spl_level);
284 task_unlock(task);
285 return KERN_INVALID_ARGUMENT;
286 }
287
288 semaphore_destroy_internal(task, semaphore);
289 /* semaphore unlocked */
290
291 splx(spl_level);
292 task_unlock(task);
293
294 semaphore_dereference(semaphore);
295 return KERN_SUCCESS;
296 }
297
298 /*
299 * Routine: semaphore_destroy_all
300 *
301 * Destroy all the semaphores associated with a given task.
302 */
303 #define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */
304
305 void
306 semaphore_destroy_all(
307 task_t task)
308 {
309 uint32_t count;
310 spl_t spl_level;
311
312 count = 0;
313 task_lock(task);
314 while (!queue_empty(&task->semaphore_list)) {
315 semaphore_t semaphore;
316
317 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
318
319 if (count == 0)
320 spl_level = splsched();
321 semaphore_lock(semaphore);
322
323 semaphore_destroy_internal(task, semaphore);
324 /* semaphore unlocked */
325
326 /* throttle number of semaphores per interrupt disablement */
327 if (++count == SEMASPERSPL) {
328 count = 0;
329 splx(spl_level);
330 }
331 }
332 if (count != 0)
333 splx(spl_level);
334
335 task_unlock(task);
336 }
337
338 /*
339 * Routine: semaphore_signal_internal
340 *
341 * Signals the semaphore as direct.
342 * Assumptions:
343 * Semaphore is locked.
344 */
345 kern_return_t
346 semaphore_signal_internal(
347 semaphore_t semaphore,
348 thread_t thread,
349 int options)
350 {
351 kern_return_t kr;
352 spl_t spl_level;
353
354 spl_level = splsched();
355 semaphore_lock(semaphore);
356
357 if (!semaphore->active) {
358 semaphore_unlock(semaphore);
359 splx(spl_level);
360 return KERN_TERMINATED;
361 }
362
363 if (thread != THREAD_NULL) {
364 if (semaphore->count < 0) {
365 kr = waitq_wakeup64_thread_locked(
366 &semaphore->waitq,
367 SEMAPHORE_EVENT,
368 thread,
369 THREAD_AWAKENED,
370 WAITQ_UNLOCK);
371 /* waitq/semaphore is unlocked */
372 } else {
373 kr = KERN_NOT_WAITING;
374 semaphore_unlock(semaphore);
375 }
376 splx(spl_level);
377 return kr;
378 }
379
380 if (options & SEMAPHORE_SIGNAL_ALL) {
381 int old_count = semaphore->count;
382
383 kr = KERN_NOT_WAITING;
384 if (old_count < 0) {
385 semaphore->count = 0; /* always reset */
386 kr = waitq_wakeup64_all_locked(
387 &semaphore->waitq,
388 SEMAPHORE_EVENT,
389 THREAD_AWAKENED, NULL,
390 WAITQ_ALL_PRIORITIES,
391 WAITQ_UNLOCK);
392 /* waitq / semaphore is unlocked */
393 } else {
394 if (options & SEMAPHORE_SIGNAL_PREPOST)
395 semaphore->count++;
396 kr = KERN_SUCCESS;
397 semaphore_unlock(semaphore);
398 }
399 splx(spl_level);
400 return kr;
401 }
402
403 if (semaphore->count < 0) {
404 kr = waitq_wakeup64_one_locked(
405 &semaphore->waitq,
406 SEMAPHORE_EVENT,
407 THREAD_AWAKENED, NULL,
408 WAITQ_ALL_PRIORITIES,
409 WAITQ_KEEP_LOCKED);
410 if (kr == KERN_SUCCESS) {
411 semaphore_unlock(semaphore);
412 splx(spl_level);
413 return KERN_SUCCESS;
414 } else {
415 semaphore->count = 0; /* all waiters gone */
416 }
417 }
418
419 if (options & SEMAPHORE_SIGNAL_PREPOST) {
420 semaphore->count++;
421 }
422
423 semaphore_unlock(semaphore);
424 splx(spl_level);
425 return KERN_NOT_WAITING;
426 }
427
428 /*
429 * Routine: semaphore_signal_thread
430 *
431 * If the specified thread is blocked on the semaphore, it is
432 * woken up. If a NULL thread was supplied, then any one
433 * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING
434 * and the semaphore is unchanged.
435 */
436 kern_return_t
437 semaphore_signal_thread(
438 semaphore_t semaphore,
439 thread_t thread)
440 {
441 kern_return_t ret;
442
443 if (semaphore == SEMAPHORE_NULL)
444 return KERN_INVALID_ARGUMENT;
445
446 ret = semaphore_signal_internal(semaphore,
447 thread,
448 SEMAPHORE_OPTION_NONE);
449 return ret;
450 }
451
452 /*
453 * Routine: semaphore_signal_thread_trap
454 *
455 * Trap interface to the semaphore_signal_thread function.
456 */
457 kern_return_t
458 semaphore_signal_thread_trap(
459 struct semaphore_signal_thread_trap_args *args)
460 {
461 mach_port_name_t sema_name = args->signal_name;
462 mach_port_name_t thread_name = args->thread_name;
463 semaphore_t semaphore;
464 thread_t thread;
465 kern_return_t kr;
466
467 /*
468 * MACH_PORT_NULL is not an error. It means that we want to
469 * select any one thread that is already waiting, but not to
470 * pre-post the semaphore.
471 */
472 if (thread_name != MACH_PORT_NULL) {
473 thread = port_name_to_thread(thread_name);
474 if (thread == THREAD_NULL)
475 return KERN_INVALID_ARGUMENT;
476 } else
477 thread = THREAD_NULL;
478
479 kr = port_name_to_semaphore(sema_name, &semaphore);
480 if (kr == KERN_SUCCESS) {
481 kr = semaphore_signal_internal(semaphore,
482 thread,
483 SEMAPHORE_OPTION_NONE);
484 semaphore_dereference(semaphore);
485 }
486 if (thread != THREAD_NULL) {
487 thread_deallocate(thread);
488 }
489 return kr;
490 }
491
492
493
494 /*
495 * Routine: semaphore_signal
496 *
497 * Traditional (in-kernel client and MIG interface) semaphore
498 * signal routine. Most users will access the trap version.
499 *
500 * This interface in not defined to return info about whether
501 * this call found a thread waiting or not. The internal
502 * routines (and future external routines) do. We have to
503 * convert those into plain KERN_SUCCESS returns.
504 */
505 kern_return_t
506 semaphore_signal(
507 semaphore_t semaphore)
508 {
509 kern_return_t kr;
510
511 if (semaphore == SEMAPHORE_NULL)
512 return KERN_INVALID_ARGUMENT;
513
514 kr = semaphore_signal_internal(semaphore,
515 THREAD_NULL,
516 SEMAPHORE_SIGNAL_PREPOST);
517 if (kr == KERN_NOT_WAITING)
518 return KERN_SUCCESS;
519 return kr;
520 }
521
522 /*
523 * Routine: semaphore_signal_trap
524 *
525 * Trap interface to the semaphore_signal function.
526 */
527 kern_return_t
528 semaphore_signal_trap(
529 struct semaphore_signal_trap_args *args)
530 {
531 mach_port_name_t sema_name = args->signal_name;
532
533 return (semaphore_signal_internal_trap(sema_name));
534 }
535
536 kern_return_t
537 semaphore_signal_internal_trap(mach_port_name_t sema_name)
538 {
539 semaphore_t semaphore;
540 kern_return_t kr;
541
542 kr = port_name_to_semaphore(sema_name, &semaphore);
543 if (kr == KERN_SUCCESS) {
544 kr = semaphore_signal_internal(semaphore,
545 THREAD_NULL,
546 SEMAPHORE_SIGNAL_PREPOST);
547 semaphore_dereference(semaphore);
548 if (kr == KERN_NOT_WAITING)
549 kr = KERN_SUCCESS;
550 }
551 return kr;
552 }
553
554 /*
555 * Routine: semaphore_signal_all
556 *
557 * Awakens ALL threads currently blocked on the semaphore.
558 * The semaphore count returns to zero.
559 */
560 kern_return_t
561 semaphore_signal_all(
562 semaphore_t semaphore)
563 {
564 kern_return_t kr;
565
566 if (semaphore == SEMAPHORE_NULL)
567 return KERN_INVALID_ARGUMENT;
568
569 kr = semaphore_signal_internal(semaphore,
570 THREAD_NULL,
571 SEMAPHORE_SIGNAL_ALL);
572 if (kr == KERN_NOT_WAITING)
573 return KERN_SUCCESS;
574 return kr;
575 }
576
577 /*
578 * Routine: semaphore_signal_all_trap
579 *
580 * Trap interface to the semaphore_signal_all function.
581 */
582 kern_return_t
583 semaphore_signal_all_trap(
584 struct semaphore_signal_all_trap_args *args)
585 {
586 mach_port_name_t sema_name = args->signal_name;
587 semaphore_t semaphore;
588 kern_return_t kr;
589
590 kr = port_name_to_semaphore(sema_name, &semaphore);
591 if (kr == KERN_SUCCESS) {
592 kr = semaphore_signal_internal(semaphore,
593 THREAD_NULL,
594 SEMAPHORE_SIGNAL_ALL);
595 semaphore_dereference(semaphore);
596 if (kr == KERN_NOT_WAITING)
597 kr = KERN_SUCCESS;
598 }
599 return kr;
600 }
601
602 /*
603 * Routine: semaphore_convert_wait_result
604 *
605 * Generate the return code after a semaphore wait/block. It
606 * takes the wait result as an input and coverts that to an
607 * appropriate result.
608 */
609 kern_return_t
610 semaphore_convert_wait_result(int wait_result)
611 {
612 switch (wait_result) {
613 case THREAD_AWAKENED:
614 return KERN_SUCCESS;
615
616 case THREAD_TIMED_OUT:
617 return KERN_OPERATION_TIMED_OUT;
618
619 case THREAD_INTERRUPTED:
620 return KERN_ABORTED;
621
622 case THREAD_RESTART:
623 return KERN_TERMINATED;
624
625 default:
626 panic("semaphore_block\n");
627 return KERN_FAILURE;
628 }
629 }
630
631 /*
632 * Routine: semaphore_wait_continue
633 *
634 * Common continuation routine after waiting on a semphore.
635 * It returns directly to user space.
636 */
637 void
638 semaphore_wait_continue(void)
639 {
640 thread_t self = current_thread();
641 int wait_result = self->wait_result;
642 void (*caller_cont)(kern_return_t) = self->sth_continuation;
643
644 assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
645 semaphore_dereference(self->sth_waitsemaphore);
646 if (self->sth_signalsemaphore != SEMAPHORE_NULL)
647 semaphore_dereference(self->sth_signalsemaphore);
648
649 assert(caller_cont != (void (*)(kern_return_t))0);
650 (*caller_cont)(semaphore_convert_wait_result(wait_result));
651 }
652
653 /*
654 * Routine: semaphore_wait_internal
655 *
656 * Decrements the semaphore count by one. If the count is
657 * negative after the decrement, the calling thread blocks
658 * (possibly at a continuation and/or with a timeout).
659 *
660 * Assumptions:
661 * The reference
662 * A reference is held on the signal semaphore.
663 */
664 static kern_return_t
665 semaphore_wait_internal(
666 semaphore_t wait_semaphore,
667 semaphore_t signal_semaphore,
668 uint64_t deadline,
669 int option,
670 void (*caller_cont)(kern_return_t))
671 {
672 int wait_result;
673 spl_t spl_level;
674 kern_return_t kr = KERN_ALREADY_WAITING;
675
676 spl_level = splsched();
677 semaphore_lock(wait_semaphore);
678
679 if (!wait_semaphore->active) {
680 kr = KERN_TERMINATED;
681 } else if (wait_semaphore->count > 0) {
682 wait_semaphore->count--;
683 kr = KERN_SUCCESS;
684 } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) {
685 kr = KERN_OPERATION_TIMED_OUT;
686 } else {
687 thread_t self = current_thread();
688
689 wait_semaphore->count = -1; /* we don't keep an actual count */
690 (void)waitq_assert_wait64_locked(
691 &wait_semaphore->waitq,
692 SEMAPHORE_EVENT,
693 THREAD_ABORTSAFE,
694 TIMEOUT_URGENCY_USER_NORMAL,
695 deadline, TIMEOUT_NO_LEEWAY,
696 self);
697 }
698 semaphore_unlock(wait_semaphore);
699 splx(spl_level);
700
701 /*
702 * wait_semaphore is unlocked so we are free to go ahead and
703 * signal the signal_semaphore (if one was provided).
704 */
705 if (signal_semaphore != SEMAPHORE_NULL) {
706 kern_return_t signal_kr;
707
708 /*
709 * lock the signal semaphore reference we got and signal it.
710 * This will NOT block (we cannot block after having asserted
711 * our intention to wait above).
712 */
713 signal_kr = semaphore_signal_internal(signal_semaphore,
714 THREAD_NULL,
715 SEMAPHORE_SIGNAL_PREPOST);
716
717 if (signal_kr == KERN_NOT_WAITING)
718 signal_kr = KERN_SUCCESS;
719 else if (signal_kr == KERN_TERMINATED) {
720 /*
721 * Uh!Oh! The semaphore we were to signal died.
722 * We have to get ourselves out of the wait in
723 * case we get stuck here forever (it is assumed
724 * that the semaphore we were posting is gating
725 * the decision by someone else to post the
726 * semaphore we are waiting on). People will
727 * discover the other dead semaphore soon enough.
728 * If we got out of the wait cleanly (someone
729 * already posted a wakeup to us) then return that
730 * (most important) result. Otherwise,
731 * return the KERN_TERMINATED status.
732 */
733 thread_t self = current_thread();
734
735 clear_wait(self, THREAD_INTERRUPTED);
736 kr = semaphore_convert_wait_result(self->wait_result);
737 if (kr == KERN_ABORTED)
738 kr = KERN_TERMINATED;
739 }
740 }
741
742 /*
743 * If we had an error, or we didn't really need to wait we can
744 * return now that we have signalled the signal semaphore.
745 */
746 if (kr != KERN_ALREADY_WAITING)
747 return kr;
748
749 /*
750 * Now, we can block. If the caller supplied a continuation
751 * pointer of his own for after the block, block with the
752 * appropriate semaphore continuation. Thiswill gather the
753 * semaphore results, release references on the semaphore(s),
754 * and then call the caller's continuation.
755 */
756 if (caller_cont) {
757 thread_t self = current_thread();
758
759 self->sth_continuation = caller_cont;
760 self->sth_waitsemaphore = wait_semaphore;
761 self->sth_signalsemaphore = signal_semaphore;
762 wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
763 }
764 else {
765 wait_result = thread_block(THREAD_CONTINUE_NULL);
766 }
767
768 return (semaphore_convert_wait_result(wait_result));
769 }
770
771
772 /*
773 * Routine: semaphore_wait
774 *
775 * Traditional (non-continuation) interface presented to
776 * in-kernel clients to wait on a semaphore.
777 */
778 kern_return_t
779 semaphore_wait(
780 semaphore_t semaphore)
781 {
782
783 if (semaphore == SEMAPHORE_NULL)
784 return KERN_INVALID_ARGUMENT;
785
786 return(semaphore_wait_internal(semaphore,
787 SEMAPHORE_NULL,
788 0ULL, SEMAPHORE_OPTION_NONE,
789 (void (*)(kern_return_t))0));
790 }
791
792 kern_return_t
793 semaphore_wait_noblock(
794 semaphore_t semaphore)
795 {
796
797 if (semaphore == SEMAPHORE_NULL)
798 return KERN_INVALID_ARGUMENT;
799
800 return(semaphore_wait_internal(semaphore,
801 SEMAPHORE_NULL,
802 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK,
803 (void (*)(kern_return_t))0));
804 }
805
806 kern_return_t
807 semaphore_wait_deadline(
808 semaphore_t semaphore,
809 uint64_t deadline)
810 {
811
812 if (semaphore == SEMAPHORE_NULL)
813 return KERN_INVALID_ARGUMENT;
814
815 return(semaphore_wait_internal(semaphore,
816 SEMAPHORE_NULL,
817 deadline, SEMAPHORE_OPTION_NONE,
818 (void (*)(kern_return_t))0));
819 }
820
821 /*
822 * Trap: semaphore_wait_trap
823 *
824 * Trap version of semaphore wait. Called on behalf of user-level
825 * clients.
826 */
827
828 kern_return_t
829 semaphore_wait_trap(
830 struct semaphore_wait_trap_args *args)
831 {
832 return(semaphore_wait_trap_internal(args->wait_name, thread_syscall_return));
833 }
834
835
836
837 kern_return_t
838 semaphore_wait_trap_internal(
839 mach_port_name_t name,
840 void (*caller_cont)(kern_return_t))
841 {
842 semaphore_t semaphore;
843 kern_return_t kr;
844
845 kr = port_name_to_semaphore(name, &semaphore);
846 if (kr == KERN_SUCCESS) {
847 kr = semaphore_wait_internal(semaphore,
848 SEMAPHORE_NULL,
849 0ULL, SEMAPHORE_OPTION_NONE,
850 caller_cont);
851 semaphore_dereference(semaphore);
852 }
853 return kr;
854 }
855
856 /*
857 * Routine: semaphore_timedwait
858 *
859 * Traditional (non-continuation) interface presented to
860 * in-kernel clients to wait on a semaphore with a timeout.
861 *
862 * A timeout of {0,0} is considered non-blocking.
863 */
864 kern_return_t
865 semaphore_timedwait(
866 semaphore_t semaphore,
867 mach_timespec_t wait_time)
868 {
869 int option = SEMAPHORE_OPTION_NONE;
870 uint64_t deadline = 0;
871
872 if (semaphore == SEMAPHORE_NULL)
873 return KERN_INVALID_ARGUMENT;
874
875 if(BAD_MACH_TIMESPEC(&wait_time))
876 return KERN_INVALID_VALUE;
877
878 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
879 option = SEMAPHORE_TIMEOUT_NOBLOCK;
880 else
881 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
882
883 return (semaphore_wait_internal(semaphore,
884 SEMAPHORE_NULL,
885 deadline, option,
886 (void(*)(kern_return_t))0));
887
888 }
889
890 /*
891 * Trap: semaphore_timedwait_trap
892 *
893 * Trap version of a semaphore_timedwait. The timeout parameter
894 * is passed in two distinct parts and re-assembled on this side
895 * of the trap interface (to accomodate calling conventions that
896 * pass structures as pointers instead of inline in registers without
897 * having to add a copyin).
898 *
899 * A timeout of {0,0} is considered non-blocking.
900 */
901 kern_return_t
902 semaphore_timedwait_trap(
903 struct semaphore_timedwait_trap_args *args)
904 {
905
906 return(semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return));
907 }
908
909
910 kern_return_t
911 semaphore_timedwait_trap_internal(
912 mach_port_name_t name,
913 unsigned int sec,
914 clock_res_t nsec,
915 void (*caller_cont)(kern_return_t))
916 {
917 semaphore_t semaphore;
918 mach_timespec_t wait_time;
919 kern_return_t kr;
920
921 wait_time.tv_sec = sec;
922 wait_time.tv_nsec = nsec;
923 if(BAD_MACH_TIMESPEC(&wait_time))
924 return KERN_INVALID_VALUE;
925
926 kr = port_name_to_semaphore(name, &semaphore);
927 if (kr == KERN_SUCCESS) {
928 int option = SEMAPHORE_OPTION_NONE;
929 uint64_t deadline = 0;
930
931 if (sec == 0 && nsec == 0)
932 option = SEMAPHORE_TIMEOUT_NOBLOCK;
933 else
934 deadline = semaphore_deadline(sec, nsec);
935
936 kr = semaphore_wait_internal(semaphore,
937 SEMAPHORE_NULL,
938 deadline, option,
939 caller_cont);
940 semaphore_dereference(semaphore);
941 }
942 return kr;
943 }
944
945 /*
946 * Routine: semaphore_wait_signal
947 *
948 * Atomically register a wait on a semaphore and THEN signal
949 * another. This is the in-kernel entry point that does not
950 * block at a continuation and does not free a signal_semaphore
951 * reference.
952 */
953 kern_return_t
954 semaphore_wait_signal(
955 semaphore_t wait_semaphore,
956 semaphore_t signal_semaphore)
957 {
958 if (wait_semaphore == SEMAPHORE_NULL)
959 return KERN_INVALID_ARGUMENT;
960
961 return(semaphore_wait_internal(wait_semaphore,
962 signal_semaphore,
963 0ULL, SEMAPHORE_OPTION_NONE,
964 (void(*)(kern_return_t))0));
965 }
966
967 /*
968 * Trap: semaphore_wait_signal_trap
969 *
970 * Atomically register a wait on a semaphore and THEN signal
971 * another. This is the trap version from user space.
972 */
973 kern_return_t
974 semaphore_wait_signal_trap(
975 struct semaphore_wait_signal_trap_args *args)
976 {
977 return(semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return));
978 }
979
980 kern_return_t
981 semaphore_wait_signal_trap_internal(
982 mach_port_name_t wait_name,
983 mach_port_name_t signal_name,
984 void (*caller_cont)(kern_return_t))
985 {
986 semaphore_t wait_semaphore;
987 semaphore_t signal_semaphore;
988 kern_return_t kr;
989
990 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
991 if (kr == KERN_SUCCESS) {
992 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
993 if (kr == KERN_SUCCESS) {
994 kr = semaphore_wait_internal(wait_semaphore,
995 signal_semaphore,
996 0ULL, SEMAPHORE_OPTION_NONE,
997 caller_cont);
998 semaphore_dereference(wait_semaphore);
999 }
1000 semaphore_dereference(signal_semaphore);
1001 }
1002 return kr;
1003 }
1004
1005
1006 /*
1007 * Routine: semaphore_timedwait_signal
1008 *
1009 * Atomically register a wait on a semaphore and THEN signal
1010 * another. This is the in-kernel entry point that does not
1011 * block at a continuation.
1012 *
1013 * A timeout of {0,0} is considered non-blocking.
1014 */
1015 kern_return_t
1016 semaphore_timedwait_signal(
1017 semaphore_t wait_semaphore,
1018 semaphore_t signal_semaphore,
1019 mach_timespec_t wait_time)
1020 {
1021 int option = SEMAPHORE_OPTION_NONE;
1022 uint64_t deadline = 0;
1023
1024 if (wait_semaphore == SEMAPHORE_NULL)
1025 return KERN_INVALID_ARGUMENT;
1026
1027 if(BAD_MACH_TIMESPEC(&wait_time))
1028 return KERN_INVALID_VALUE;
1029
1030 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
1031 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1032 else
1033 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
1034
1035 return(semaphore_wait_internal(wait_semaphore,
1036 signal_semaphore,
1037 deadline, option,
1038 (void(*)(kern_return_t))0));
1039 }
1040
1041 /*
1042 * Trap: semaphore_timedwait_signal_trap
1043 *
1044 * Atomically register a timed wait on a semaphore and THEN signal
1045 * another. This is the trap version from user space.
1046 */
1047 kern_return_t
1048 semaphore_timedwait_signal_trap(
1049 struct semaphore_timedwait_signal_trap_args *args)
1050 {
1051 return(semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return));
1052 }
1053
1054 kern_return_t
1055 semaphore_timedwait_signal_trap_internal(
1056 mach_port_name_t wait_name,
1057 mach_port_name_t signal_name,
1058 unsigned int sec,
1059 clock_res_t nsec,
1060 void (*caller_cont)(kern_return_t))
1061 {
1062 semaphore_t wait_semaphore;
1063 semaphore_t signal_semaphore;
1064 mach_timespec_t wait_time;
1065 kern_return_t kr;
1066
1067 wait_time.tv_sec = sec;
1068 wait_time.tv_nsec = nsec;
1069 if(BAD_MACH_TIMESPEC(&wait_time))
1070 return KERN_INVALID_VALUE;
1071
1072 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1073 if (kr == KERN_SUCCESS) {
1074 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1075 if (kr == KERN_SUCCESS) {
1076 int option = SEMAPHORE_OPTION_NONE;
1077 uint64_t deadline = 0;
1078
1079 if (sec == 0 && nsec == 0)
1080 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1081 else
1082 deadline = semaphore_deadline(sec, nsec);
1083
1084 kr = semaphore_wait_internal(wait_semaphore,
1085 signal_semaphore,
1086 deadline, option,
1087 caller_cont);
1088 semaphore_dereference(wait_semaphore);
1089 }
1090 semaphore_dereference(signal_semaphore);
1091 }
1092 return kr;
1093 }
1094
1095
1096 /*
1097 * Routine: semaphore_reference
1098 *
1099 * Take out a reference on a semaphore. This keeps the data structure
1100 * in existence (but the semaphore may be deactivated).
1101 */
1102 void
1103 semaphore_reference(
1104 semaphore_t semaphore)
1105 {
1106 (void)hw_atomic_add(&semaphore->ref_count, 1);
1107 }
1108
1109 /*
1110 * Routine: semaphore_dereference
1111 *
1112 * Release a reference on a semaphore. If this is the last reference,
1113 * the semaphore data structure is deallocated.
1114 */
1115 void
1116 semaphore_dereference(
1117 semaphore_t semaphore)
1118 {
1119 uint32_t collisions;
1120 spl_t spl_level;
1121
1122 if (semaphore == NULL)
1123 return;
1124
1125 if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
1126 return;
1127
1128 /*
1129 * Last ref, clean up the port [if any]
1130 * associated with the semaphore, destroy
1131 * it (if still active) and then free
1132 * the semaphore.
1133 */
1134 ipc_port_t port = semaphore->port;
1135
1136 if (IP_VALID(port)) {
1137 assert(!port->ip_srights);
1138 ipc_port_dealloc_kernel(port);
1139 }
1140
1141 /*
1142 * Lock the semaphore to lock in the owner task reference.
1143 * Then continue to try to lock the task (inverse order).
1144 */
1145 spl_level = splsched();
1146 semaphore_lock(semaphore);
1147 for (collisions = 0; semaphore->active; collisions++) {
1148 task_t task = semaphore->owner;
1149
1150 assert(task != TASK_NULL);
1151
1152 if (task_lock_try(task)) {
1153 semaphore_destroy_internal(task, semaphore);
1154 /* semaphore unlocked */
1155 splx(spl_level);
1156 task_unlock(task);
1157 goto out;
1158 }
1159
1160 /* failed to get out-of-order locks */
1161 semaphore_unlock(semaphore);
1162 splx(spl_level);
1163 mutex_pause(collisions);
1164 spl_level = splsched();
1165 semaphore_lock(semaphore);
1166 }
1167 semaphore_unlock(semaphore);
1168 splx(spl_level);
1169
1170 out:
1171 zfree(semaphore_zone, semaphore);
1172 }
1173
1174