]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sync_sema.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / kern / sync_sema.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
ff6e181a
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
1c79356b 12 *
ff6e181a
A
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
ff6e181a
A
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
1c79356b
A
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23/*
24 * @OSF_COPYRIGHT@
25 *
26 */
27/*
28 * File: kern/sync_sema.c
29 * Author: Joseph CaraDonna
30 *
31 * Contains RT distributed semaphore synchronization services.
32 */
33
34#include <mach/mach_types.h>
91447636 35#include <mach/mach_traps.h>
1c79356b
A
36#include <mach/kern_return.h>
37#include <mach/semaphore.h>
38#include <mach/sync_policy.h>
91447636 39#include <mach/task.h>
1c79356b
A
40
41#include <kern/misc_protos.h>
42#include <kern/sync_sema.h>
43#include <kern/spl.h>
44#include <kern/ipc_kobject.h>
45#include <kern/ipc_sync.h>
46#include <kern/ipc_tt.h>
47#include <kern/thread.h>
48#include <kern/clock.h>
49#include <ipc/ipc_port.h>
50#include <ipc/ipc_space.h>
51#include <kern/host.h>
52#include <kern/wait_queue.h>
53#include <kern/zalloc.h>
54#include <kern/mach_param.h>
55
9bccf70c
A
56static unsigned int semaphore_event;
57#define SEMAPHORE_EVENT ((event64_t)&semaphore_event)
1c79356b
A
58
59zone_t semaphore_zone;
60unsigned int semaphore_max = SEMAPHORE_MAX;
61
91447636
A
62/* Forward declarations */
63
64
65kern_return_t
66semaphore_wait_trap_internal(
67 mach_port_name_t name,
68 void (*caller_cont)(kern_return_t));
69
70kern_return_t
71semaphore_wait_signal_trap_internal(
72 mach_port_name_t wait_name,
73 mach_port_name_t signal_name,
74 void (*caller_cont)(kern_return_t));
75
76kern_return_t
77semaphore_timedwait_trap_internal(
78 mach_port_name_t name,
79 unsigned int sec,
80 clock_res_t nsec,
81 void (*caller_cont)(kern_return_t));
82
83kern_return_t
84semaphore_timedwait_signal_trap_internal(
85 mach_port_name_t wait_name,
86 mach_port_name_t signal_name,
87 unsigned int sec,
88 clock_res_t nsec,
89 void (*caller_cont)(kern_return_t));
90
91
92kern_return_t
93semaphore_signal_internal(
94 semaphore_t semaphore,
95 thread_t thread,
96 int options);
97
98kern_return_t
99semaphore_convert_wait_result(
100 int wait_result);
101
102void
103semaphore_wait_continue(void);
104
105kern_return_t
106semaphore_wait_internal(
107 semaphore_t wait_semaphore,
108 semaphore_t signal_semaphore,
109 mach_timespec_t *wait_timep,
110 void (*caller_cont)(kern_return_t));
111
1c79356b
A
112/*
113 * ROUTINE: semaphore_init [private]
114 *
115 * Initialize the semaphore mechanisms.
116 * Right now, we only need to initialize the semaphore zone.
117 */
118void
119semaphore_init(void)
120{
121 semaphore_zone = zinit(sizeof(struct semaphore),
122 semaphore_max * sizeof(struct semaphore),
123 sizeof(struct semaphore),
124 "semaphores");
125}
126
127/*
128 * Routine: semaphore_create
129 *
130 * Creates a semaphore.
131 * The port representing the semaphore is returned as a parameter.
132 */
133kern_return_t
134semaphore_create(
135 task_t task,
136 semaphore_t *new_semaphore,
137 int policy,
138 int value)
139{
140 semaphore_t s = SEMAPHORE_NULL;
141
142
143
144 if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX) {
145 *new_semaphore = SEMAPHORE_NULL;
146 return KERN_INVALID_ARGUMENT;
147 }
148
149 s = (semaphore_t) zalloc (semaphore_zone);
150
151 if (s == SEMAPHORE_NULL) {
152 *new_semaphore = SEMAPHORE_NULL;
153 return KERN_RESOURCE_SHORTAGE;
154 }
155
156 wait_queue_init(&s->wait_queue, policy); /* also inits lock */
157 s->count = value;
158 s->ref_count = 1;
159
160 /*
161 * Create and initialize the semaphore port
162 */
163 s->port = ipc_port_alloc_kernel();
164 if (s->port == IP_NULL) {
165 /* This will deallocate the semaphore */
166 semaphore_dereference(s);
167 *new_semaphore = SEMAPHORE_NULL;
168 return KERN_RESOURCE_SHORTAGE;
169 }
170
171 ipc_kobject_set (s->port, (ipc_kobject_t) s, IKOT_SEMAPHORE);
172
173 /*
174 * Associate the new semaphore with the task by adding
175 * the new semaphore to the task's semaphore list.
176 *
177 * Associate the task with the new semaphore by having the
178 * semaphores task pointer point to the owning task's structure.
179 */
180 task_lock(task);
181 enqueue_head(&task->semaphore_list, (queue_entry_t) s);
182 task->semaphores_owned++;
183 s->owner = task;
184 s->active = TRUE;
185 task_unlock(task);
186
187 *new_semaphore = s;
188
189 return KERN_SUCCESS;
190}
191
192/*
193 * Routine: semaphore_destroy
194 *
195 * Destroys a semaphore. This call will only succeed if the
196 * specified task is the SAME task name specified at the semaphore's
197 * creation.
198 *
199 * All threads currently blocked on the semaphore are awoken. These
200 * threads will return with the KERN_TERMINATED error.
201 */
202kern_return_t
203semaphore_destroy(
204 task_t task,
205 semaphore_t semaphore)
206{
207 int old_count;
1c79356b
A
208 spl_t spl_level;
209
210
211 if (task == TASK_NULL || semaphore == SEMAPHORE_NULL)
212 return KERN_INVALID_ARGUMENT;
213
214 /*
215 * Disown semaphore
216 */
217 task_lock(task);
218 if (semaphore->owner != task) {
219 task_unlock(task);
220 return KERN_INVALID_ARGUMENT;
221 }
222 remqueue(&task->semaphore_list, (queue_entry_t) semaphore);
223 semaphore->owner = TASK_NULL;
224 task->semaphores_owned--;
225 task_unlock(task);
226
227 spl_level = splsched();
228 semaphore_lock(semaphore);
229
230 /*
231 * Deactivate semaphore
232 */
233 assert(semaphore->active);
234 semaphore->active = FALSE;
235
236 /*
237 * Wakeup blocked threads
238 */
239 old_count = semaphore->count;
240 semaphore->count = 0;
241
242 if (old_count < 0) {
9bccf70c 243 wait_queue_wakeup64_all_locked(&semaphore->wait_queue,
1c79356b
A
244 SEMAPHORE_EVENT,
245 THREAD_RESTART,
246 TRUE); /* unlock? */
247 } else {
248 semaphore_unlock(semaphore);
249 }
250 splx(spl_level);
251
252 /*
253 * Deallocate
254 *
255 * Drop the semaphore reference, which in turn deallocates the
256 * semaphore structure if the reference count goes to zero.
257 */
258 ipc_port_dealloc_kernel(semaphore->port);
259 semaphore_dereference(semaphore);
260 return KERN_SUCCESS;
261}
262
263/*
264 * Routine: semaphore_signal_internal
265 *
266 * Signals the semaphore as direct.
267 * Assumptions:
268 * Semaphore is locked.
269 */
270kern_return_t
271semaphore_signal_internal(
272 semaphore_t semaphore,
91447636
A
273 thread_t thread,
274 int options)
1c79356b
A
275{
276 kern_return_t kr;
277 spl_t spl_level;
278
279 spl_level = splsched();
280 semaphore_lock(semaphore);
281
282 if (!semaphore->active) {
283 semaphore_unlock(semaphore);
284 splx(spl_level);
285 return KERN_TERMINATED;
286 }
287
91447636 288 if (thread != THREAD_NULL) {
1c79356b 289 if (semaphore->count < 0) {
9bccf70c 290 kr = wait_queue_wakeup64_thread_locked(
1c79356b
A
291 &semaphore->wait_queue,
292 SEMAPHORE_EVENT,
91447636 293 thread,
1c79356b
A
294 THREAD_AWAKENED,
295 TRUE); /* unlock? */
296 } else {
297 semaphore_unlock(semaphore);
298 kr = KERN_NOT_WAITING;
299 }
300 splx(spl_level);
301 return kr;
302 }
303
304 if (options & SEMAPHORE_SIGNAL_ALL) {
305 int old_count = semaphore->count;
306
307 if (old_count < 0) {
308 semaphore->count = 0; /* always reset */
9bccf70c 309 kr = wait_queue_wakeup64_all_locked(
1c79356b
A
310 &semaphore->wait_queue,
311 SEMAPHORE_EVENT,
312 THREAD_AWAKENED,
313 TRUE); /* unlock? */
314 } else {
315 if (options & SEMAPHORE_SIGNAL_PREPOST)
316 semaphore->count++;
317 semaphore_unlock(semaphore);
318 kr = KERN_SUCCESS;
319 }
320 splx(spl_level);
321 return kr;
322 }
323
324 if (semaphore->count < 0) {
9bccf70c 325 if (wait_queue_wakeup64_one_locked(
1c79356b
A
326 &semaphore->wait_queue,
327 SEMAPHORE_EVENT,
328 THREAD_AWAKENED,
329 FALSE) == KERN_SUCCESS) {
330 semaphore_unlock(semaphore);
331 splx(spl_level);
332 return KERN_SUCCESS;
333 } else
334 semaphore->count = 0; /* all waiters gone */
335 }
336
337 if (options & SEMAPHORE_SIGNAL_PREPOST) {
338 semaphore->count++;
339 }
340
341 semaphore_unlock(semaphore);
342 splx(spl_level);
343 return KERN_NOT_WAITING;
344}
345
346/*
347 * Routine: semaphore_signal_thread
348 *
91447636
A
349 * If the specified thread is blocked on the semaphore, it is
350 * woken up. If a NULL thread was supplied, then any one
1c79356b
A
351 * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING
352 * and the semaphore is unchanged.
353 */
354kern_return_t
355semaphore_signal_thread(
356 semaphore_t semaphore,
91447636 357 thread_t thread)
1c79356b
A
358{
359 kern_return_t ret;
360
361 if (semaphore == SEMAPHORE_NULL)
362 return KERN_INVALID_ARGUMENT;
363
364 ret = semaphore_signal_internal(semaphore,
91447636 365 thread,
1c79356b
A
366 SEMAPHORE_OPTION_NONE);
367 return ret;
368}
369
370/*
371 * Routine: semaphore_signal_thread_trap
372 *
373 * Trap interface to the semaphore_signal_thread function.
374 */
375kern_return_t
376semaphore_signal_thread_trap(
91447636 377 struct semaphore_signal_thread_trap_args *args)
1c79356b 378{
91447636
A
379 mach_port_name_t sema_name = args->signal_name;
380 mach_port_name_t thread_name = args->thread_name;
1c79356b 381 semaphore_t semaphore;
91447636 382 thread_t thread;
1c79356b
A
383 kern_return_t kr;
384
385 /*
386 * MACH_PORT_NULL is not an error. It means that we want to
387 * select any one thread that is already waiting, but not to
388 * pre-post the semaphore.
389 */
390 if (thread_name != MACH_PORT_NULL) {
91447636
A
391 thread = port_name_to_thread(thread_name);
392 if (thread == THREAD_NULL)
1c79356b
A
393 return KERN_INVALID_ARGUMENT;
394 } else
91447636 395 thread = THREAD_NULL;
1c79356b
A
396
397 kr = port_name_to_semaphore(sema_name, &semaphore);
91447636
A
398 if (kr == KERN_SUCCESS) {
399 kr = semaphore_signal_internal(semaphore,
400 thread,
401 SEMAPHORE_OPTION_NONE);
402 semaphore_dereference(semaphore);
403 }
404 if (thread != THREAD_NULL) {
405 thread_deallocate(thread);
1c79356b 406 }
1c79356b
A
407 return kr;
408}
409
410
411
412/*
413 * Routine: semaphore_signal
414 *
415 * Traditional (in-kernel client and MIG interface) semaphore
416 * signal routine. Most users will access the trap version.
417 *
418 * This interface in not defined to return info about whether
419 * this call found a thread waiting or not. The internal
420 * routines (and future external routines) do. We have to
421 * convert those into plain KERN_SUCCESS returns.
422 */
423kern_return_t
424semaphore_signal(
425 semaphore_t semaphore)
426{
427 kern_return_t kr;
428
429 if (semaphore == SEMAPHORE_NULL)
430 return KERN_INVALID_ARGUMENT;
431
432 kr = semaphore_signal_internal(semaphore,
91447636 433 THREAD_NULL,
1c79356b
A
434 SEMAPHORE_SIGNAL_PREPOST);
435 if (kr == KERN_NOT_WAITING)
436 return KERN_SUCCESS;
437 return kr;
438}
439
440/*
441 * Routine: semaphore_signal_trap
442 *
443 * Trap interface to the semaphore_signal function.
444 */
445kern_return_t
446semaphore_signal_trap(
91447636 447 struct semaphore_signal_trap_args *args)
1c79356b 448{
91447636 449 mach_port_name_t sema_name = args->signal_name;
1c79356b
A
450 semaphore_t semaphore;
451 kern_return_t kr;
452
453 kr = port_name_to_semaphore(sema_name, &semaphore);
91447636
A
454 if (kr == KERN_SUCCESS) {
455 kr = semaphore_signal_internal(semaphore,
456 THREAD_NULL,
457 SEMAPHORE_SIGNAL_PREPOST);
458 semaphore_dereference(semaphore);
459 if (kr == KERN_NOT_WAITING)
460 kr = KERN_SUCCESS;
1c79356b 461 }
1c79356b
A
462 return kr;
463}
464
465/*
466 * Routine: semaphore_signal_all
467 *
468 * Awakens ALL threads currently blocked on the semaphore.
469 * The semaphore count returns to zero.
470 */
471kern_return_t
472semaphore_signal_all(
473 semaphore_t semaphore)
474{
475 kern_return_t kr;
476
477 if (semaphore == SEMAPHORE_NULL)
478 return KERN_INVALID_ARGUMENT;
479
480 kr = semaphore_signal_internal(semaphore,
91447636 481 THREAD_NULL,
1c79356b
A
482 SEMAPHORE_SIGNAL_ALL);
483 if (kr == KERN_NOT_WAITING)
484 return KERN_SUCCESS;
485 return kr;
486}
487
488/*
489 * Routine: semaphore_signal_all_trap
490 *
491 * Trap interface to the semaphore_signal_all function.
492 */
493kern_return_t
494semaphore_signal_all_trap(
91447636 495 struct semaphore_signal_all_trap_args *args)
1c79356b 496{
91447636 497 mach_port_name_t sema_name = args->signal_name;
1c79356b
A
498 semaphore_t semaphore;
499 kern_return_t kr;
500
501 kr = port_name_to_semaphore(sema_name, &semaphore);
91447636
A
502 if (kr == KERN_SUCCESS) {
503 kr = semaphore_signal_internal(semaphore,
504 THREAD_NULL,
505 SEMAPHORE_SIGNAL_ALL);
506 semaphore_dereference(semaphore);
507 if (kr == KERN_NOT_WAITING)
508 kr = KERN_SUCCESS;
1c79356b 509 }
1c79356b
A
510 return kr;
511}
512
513/*
514 * Routine: semaphore_convert_wait_result
515 *
516 * Generate the return code after a semaphore wait/block. It
517 * takes the wait result as an input and coverts that to an
518 * appropriate result.
519 */
520kern_return_t
521semaphore_convert_wait_result(int wait_result)
522{
523 switch (wait_result) {
524 case THREAD_AWAKENED:
525 return KERN_SUCCESS;
526
527 case THREAD_TIMED_OUT:
528 return KERN_OPERATION_TIMED_OUT;
529
530 case THREAD_INTERRUPTED:
531 return KERN_ABORTED;
532
533 case THREAD_RESTART:
534 return KERN_TERMINATED;
535
536 default:
537 panic("semaphore_block\n");
538 return KERN_FAILURE;
539 }
540}
541
542/*
543 * Routine: semaphore_wait_continue
544 *
545 * Common continuation routine after waiting on a semphore.
546 * It returns directly to user space.
547 */
548void
549semaphore_wait_continue(void)
550{
551 thread_t self = current_thread();
552 int wait_result = self->wait_result;
553 void (*caller_cont)(kern_return_t) = self->sth_continuation;
554
555 assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
556 semaphore_dereference(self->sth_waitsemaphore);
557 if (self->sth_signalsemaphore != SEMAPHORE_NULL)
558 semaphore_dereference(self->sth_signalsemaphore);
559
560 assert(caller_cont != (void (*)(kern_return_t))0);
561 (*caller_cont)(semaphore_convert_wait_result(wait_result));
562}
563
1c79356b
A
564/*
565 * Routine: semaphore_wait_internal
566 *
567 * Decrements the semaphore count by one. If the count is
568 * negative after the decrement, the calling thread blocks
569 * (possibly at a continuation and/or with a timeout).
570 *
571 * Assumptions:
572 * The reference
573 * A reference is held on the signal semaphore.
574 */
575kern_return_t
576semaphore_wait_internal(
577 semaphore_t wait_semaphore,
578 semaphore_t signal_semaphore,
579 mach_timespec_t *wait_timep,
580 void (*caller_cont)(kern_return_t))
581{
91447636
A
582 boolean_t nonblocking;
583 int wait_result;
584 spl_t spl_level;
1c79356b
A
585 kern_return_t kr = KERN_ALREADY_WAITING;
586
587 spl_level = splsched();
588 semaphore_lock(wait_semaphore);
589
590 /*
591 * Decide if we really have to wait.
592 */
593 nonblocking = (wait_timep != (mach_timespec_t *)0) ?
594 (wait_timep->tv_sec == 0 && wait_timep->tv_nsec == 0) :
595 FALSE;
596
597 if (!wait_semaphore->active) {
598 kr = KERN_TERMINATED;
599 } else if (wait_semaphore->count > 0) {
600 wait_semaphore->count--;
601 kr = KERN_SUCCESS;
602 } else if (nonblocking) {
603 kr = KERN_OPERATION_TIMED_OUT;
55e303ae 604 } else {
91447636
A
605 uint64_t abstime;
606 thread_t self = current_thread();
55e303ae 607
1c79356b 608 wait_semaphore->count = -1; /* we don't keep an actual count */
55e303ae 609 thread_lock(self);
91447636
A
610
611 /*
612 * If it is a timed wait, calculate the wake up deadline.
613 */
614 if (wait_timep != (mach_timespec_t *)0) {
615 nanoseconds_to_absolutetime((uint64_t)wait_timep->tv_sec *
616 NSEC_PER_SEC + wait_timep->tv_nsec, &abstime);
617 clock_absolutetime_interval_to_deadline(abstime, &abstime);
618 }
619 else
620 abstime = 0;
621
9bccf70c
A
622 (void)wait_queue_assert_wait64_locked(
623 &wait_semaphore->wait_queue,
624 SEMAPHORE_EVENT,
91447636 625 THREAD_ABORTSAFE, abstime,
55e303ae
A
626 self);
627 thread_unlock(self);
1c79356b
A
628 }
629 semaphore_unlock(wait_semaphore);
630 splx(spl_level);
631
632 /*
633 * wait_semaphore is unlocked so we are free to go ahead and
634 * signal the signal_semaphore (if one was provided).
635 */
636 if (signal_semaphore != SEMAPHORE_NULL) {
637 kern_return_t signal_kr;
638
639 /*
640 * lock the signal semaphore reference we got and signal it.
641 * This will NOT block (we cannot block after having asserted
642 * our intention to wait above).
643 */
644 signal_kr = semaphore_signal_internal(signal_semaphore,
91447636 645 THREAD_NULL,
1c79356b
A
646 SEMAPHORE_SIGNAL_PREPOST);
647
648 if (signal_kr == KERN_NOT_WAITING)
649 signal_kr = KERN_SUCCESS;
650 else if (signal_kr == KERN_TERMINATED) {
651 /*
652 * Uh!Oh! The semaphore we were to signal died.
653 * We have to get ourselves out of the wait in
654 * case we get stuck here forever (it is assumed
655 * that the semaphore we were posting is gating
656 * the decision by someone else to post the
657 * semaphore we are waiting on). People will
658 * discover the other dead semaphore soon enough.
659 * If we got out of the wait cleanly (someone
660 * already posted a wakeup to us) then return that
661 * (most important) result. Otherwise,
662 * return the KERN_TERMINATED status.
663 */
664 thread_t self = current_thread();
665
666 clear_wait(self, THREAD_INTERRUPTED);
667 kr = semaphore_convert_wait_result(self->wait_result);
668 if (kr == KERN_ABORTED)
669 kr = KERN_TERMINATED;
670 }
671 }
672
673 /*
674 * If we had an error, or we didn't really need to wait we can
675 * return now that we have signalled the signal semaphore.
676 */
677 if (kr != KERN_ALREADY_WAITING)
678 return kr;
1c79356b
A
679
680 /*
681 * Now, we can block. If the caller supplied a continuation
682 * pointer of his own for after the block, block with the
683 * appropriate semaphore continuation. Thiswill gather the
684 * semaphore results, release references on the semaphore(s),
685 * and then call the caller's continuation.
686 */
687 if (caller_cont) {
688 thread_t self = current_thread();
689
690 self->sth_continuation = caller_cont;
691 self->sth_waitsemaphore = wait_semaphore;
692 self->sth_signalsemaphore = signal_semaphore;
91447636
A
693 wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
694 }
695 else {
9bccf70c 696 wait_result = thread_block(THREAD_CONTINUE_NULL);
1c79356b
A
697 }
698
1c79356b
A
699 return (semaphore_convert_wait_result(wait_result));
700}
701
702
703/*
704 * Routine: semaphore_wait
705 *
706 * Traditional (non-continuation) interface presented to
707 * in-kernel clients to wait on a semaphore.
708 */
709kern_return_t
710semaphore_wait(
711 semaphore_t semaphore)
712{
713
714 if (semaphore == SEMAPHORE_NULL)
715 return KERN_INVALID_ARGUMENT;
716
717 return(semaphore_wait_internal(semaphore,
718 SEMAPHORE_NULL,
719 (mach_timespec_t *)0,
720 (void (*)(kern_return_t))0));
721}
722
723/*
724 * Trap: semaphore_wait_trap
725 *
726 * Trap version of semaphore wait. Called on behalf of user-level
727 * clients.
728 */
91447636 729
1c79356b
A
730kern_return_t
731semaphore_wait_trap(
91447636
A
732 struct semaphore_wait_trap_args *args)
733{
734 return(semaphore_wait_trap_internal(args->wait_name, thread_syscall_return));
735}
736
737
738
739kern_return_t
740semaphore_wait_trap_internal(
741 mach_port_name_t name,
742 void (*caller_cont)(kern_return_t))
1c79356b
A
743{
744 semaphore_t semaphore;
745 kern_return_t kr;
746
747 kr = port_name_to_semaphore(name, &semaphore);
91447636
A
748 if (kr == KERN_SUCCESS) {
749 kr = semaphore_wait_internal(semaphore,
750 SEMAPHORE_NULL,
751 (mach_timespec_t *)0,
752 caller_cont);
753 semaphore_dereference(semaphore);
754 }
1c79356b
A
755 return kr;
756}
757
758/*
759 * Routine: semaphore_timedwait
760 *
761 * Traditional (non-continuation) interface presented to
762 * in-kernel clients to wait on a semaphore with a timeout.
763 *
764 * A timeout of {0,0} is considered non-blocking.
765 */
766kern_return_t
767semaphore_timedwait(
768 semaphore_t semaphore,
769 mach_timespec_t wait_time)
770{
771 if (semaphore == SEMAPHORE_NULL)
772 return KERN_INVALID_ARGUMENT;
773
774 if(BAD_MACH_TIMESPEC(&wait_time))
775 return KERN_INVALID_VALUE;
776
777 return (semaphore_wait_internal(semaphore,
778 SEMAPHORE_NULL,
779 &wait_time,
780 (void(*)(kern_return_t))0));
781
782}
783
784/*
785 * Trap: semaphore_timedwait_trap
786 *
787 * Trap version of a semaphore_timedwait. The timeout parameter
788 * is passed in two distinct parts and re-assembled on this side
789 * of the trap interface (to accomodate calling conventions that
790 * pass structures as pointers instead of inline in registers without
791 * having to add a copyin).
792 *
793 * A timeout of {0,0} is considered non-blocking.
794 */
795kern_return_t
796semaphore_timedwait_trap(
91447636 797 struct semaphore_timedwait_trap_args *args)
1c79356b 798{
91447636
A
799
800 return(semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return));
801}
802
803
804kern_return_t
805semaphore_timedwait_trap_internal(
806 mach_port_name_t name,
807 unsigned int sec,
808 clock_res_t nsec,
809 void (*caller_cont)(kern_return_t))
810{
811
1c79356b
A
812 semaphore_t semaphore;
813 mach_timespec_t wait_time;
814 kern_return_t kr;
815
816 wait_time.tv_sec = sec;
817 wait_time.tv_nsec = nsec;
818 if(BAD_MACH_TIMESPEC(&wait_time))
819 return KERN_INVALID_VALUE;
820
821 kr = port_name_to_semaphore(name, &semaphore);
91447636
A
822 if (kr == KERN_SUCCESS) {
823 kr = semaphore_wait_internal(semaphore,
824 SEMAPHORE_NULL,
825 &wait_time,
826 caller_cont);
827 semaphore_dereference(semaphore);
828 }
1c79356b
A
829 return kr;
830}
831
832/*
833 * Routine: semaphore_wait_signal
834 *
835 * Atomically register a wait on a semaphore and THEN signal
836 * another. This is the in-kernel entry point that does not
837 * block at a continuation and does not free a signal_semaphore
838 * reference.
839 */
840kern_return_t
841semaphore_wait_signal(
842 semaphore_t wait_semaphore,
843 semaphore_t signal_semaphore)
844{
845 if (wait_semaphore == SEMAPHORE_NULL)
846 return KERN_INVALID_ARGUMENT;
847
848 return(semaphore_wait_internal(wait_semaphore,
849 signal_semaphore,
850 (mach_timespec_t *)0,
851 (void(*)(kern_return_t))0));
852}
853
854/*
855 * Trap: semaphore_wait_signal_trap
856 *
857 * Atomically register a wait on a semaphore and THEN signal
858 * another. This is the trap version from user space.
859 */
860kern_return_t
861semaphore_wait_signal_trap(
91447636
A
862 struct semaphore_wait_signal_trap_args *args)
863{
864 return(semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return));
865}
866
867kern_return_t
868semaphore_wait_signal_trap_internal(
869 mach_port_name_t wait_name,
870 mach_port_name_t signal_name,
871 void (*caller_cont)(kern_return_t))
1c79356b
A
872{
873 semaphore_t wait_semaphore;
874 semaphore_t signal_semaphore;
875 kern_return_t kr;
876
877 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
91447636
A
878 if (kr == KERN_SUCCESS) {
879 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
880 if (kr == KERN_SUCCESS) {
881 kr = semaphore_wait_internal(wait_semaphore,
882 signal_semaphore,
883 (mach_timespec_t *)0,
884 caller_cont);
885 semaphore_dereference(wait_semaphore);
886 }
1c79356b 887 semaphore_dereference(signal_semaphore);
1c79356b 888 }
1c79356b
A
889 return kr;
890}
891
892
893/*
894 * Routine: semaphore_timedwait_signal
895 *
896 * Atomically register a wait on a semaphore and THEN signal
897 * another. This is the in-kernel entry point that does not
898 * block at a continuation.
899 *
900 * A timeout of {0,0} is considered non-blocking.
901 */
902kern_return_t
903semaphore_timedwait_signal(
904 semaphore_t wait_semaphore,
905 semaphore_t signal_semaphore,
906 mach_timespec_t wait_time)
907{
908 if (wait_semaphore == SEMAPHORE_NULL)
909 return KERN_INVALID_ARGUMENT;
910
911 if(BAD_MACH_TIMESPEC(&wait_time))
912 return KERN_INVALID_VALUE;
913
914 return(semaphore_wait_internal(wait_semaphore,
915 signal_semaphore,
916 &wait_time,
917 (void(*)(kern_return_t))0));
918}
919
920/*
921 * Trap: semaphore_timedwait_signal_trap
922 *
923 * Atomically register a timed wait on a semaphore and THEN signal
924 * another. This is the trap version from user space.
925 */
926kern_return_t
927semaphore_timedwait_signal_trap(
91447636
A
928 struct semaphore_timedwait_signal_trap_args *args)
929{
930 return(semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return));
931}
932
933kern_return_t
934semaphore_timedwait_signal_trap_internal(
935 mach_port_name_t wait_name,
936 mach_port_name_t signal_name,
937 unsigned int sec,
938 clock_res_t nsec,
939 void (*caller_cont)(kern_return_t))
1c79356b
A
940{
941 semaphore_t wait_semaphore;
942 semaphore_t signal_semaphore;
943 mach_timespec_t wait_time;
944 kern_return_t kr;
945
946 wait_time.tv_sec = sec;
947 wait_time.tv_nsec = nsec;
948 if(BAD_MACH_TIMESPEC(&wait_time))
949 return KERN_INVALID_VALUE;
950
951 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
91447636
A
952 if (kr == KERN_SUCCESS) {
953 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
954 if (kr == KERN_SUCCESS) {
955 kr = semaphore_wait_internal(wait_semaphore,
956 signal_semaphore,
957 &wait_time,
958 caller_cont);
959 semaphore_dereference(wait_semaphore);
960 }
1c79356b 961 semaphore_dereference(signal_semaphore);
1c79356b 962 }
1c79356b
A
963 return kr;
964}
965
966
967/*
968 * Routine: semaphore_reference
969 *
970 * Take out a reference on a semaphore. This keeps the data structure
971 * in existence (but the semaphore may be deactivated).
972 */
973void
974semaphore_reference(
975 semaphore_t semaphore)
976{
977 spl_t spl_level;
978
979 spl_level = splsched();
980 semaphore_lock(semaphore);
981
982 semaphore->ref_count++;
983
984 semaphore_unlock(semaphore);
985 splx(spl_level);
986}
987
988/*
989 * Routine: semaphore_dereference
990 *
991 * Release a reference on a semaphore. If this is the last reference,
992 * the semaphore data structure is deallocated.
993 */
994void
995semaphore_dereference(
996 semaphore_t semaphore)
997{
998 int ref_count;
999 spl_t spl_level;
1000
1001 if (semaphore != NULL) {
1002 spl_level = splsched();
1003 semaphore_lock(semaphore);
1004
1005 ref_count = --(semaphore->ref_count);
1006
1007 semaphore_unlock(semaphore);
1008 splx(spl_level);
1009
1010 if (ref_count == 0) {
1011 assert(wait_queue_empty(&semaphore->wait_queue));
91447636 1012 zfree(semaphore_zone, semaphore);
1c79356b
A
1013 }
1014 }
1015}