]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/sync_sema.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / osfmk / kern / sync_sema.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 *
31 */
32 /*
33 * File: kern/sync_sema.c
34 * Author: Joseph CaraDonna
35 *
36 * Contains RT distributed semaphore synchronization services.
37 */
38
39 #include <mach/mach_types.h>
40 #include <mach/mach_traps.h>
41 #include <mach/kern_return.h>
42 #include <mach/semaphore.h>
43 #include <mach/sync_policy.h>
44 #include <mach/task.h>
45
46 #include <kern/misc_protos.h>
47 #include <kern/sync_sema.h>
48 #include <kern/spl.h>
49 #include <kern/ipc_kobject.h>
50 #include <kern/ipc_sync.h>
51 #include <kern/ipc_tt.h>
52 #include <kern/thread.h>
53 #include <kern/clock.h>
54 #include <ipc/ipc_port.h>
55 #include <ipc/ipc_space.h>
56 #include <kern/host.h>
57 #include <kern/waitq.h>
58 #include <kern/zalloc.h>
59 #include <kern/mach_param.h>
60
61 #include <libkern/OSAtomic.h>
62
63 static unsigned int semaphore_event;
64 #define SEMAPHORE_EVENT CAST_EVENT64_T(&semaphore_event)
65
66 zone_t semaphore_zone;
67 unsigned int semaphore_max;
68
69 /* Forward declarations */
70
71
72 kern_return_t
73 semaphore_wait_trap_internal(
74 mach_port_name_t name,
75 void (*caller_cont)(kern_return_t));
76
77 kern_return_t
78 semaphore_wait_signal_trap_internal(
79 mach_port_name_t wait_name,
80 mach_port_name_t signal_name,
81 void (*caller_cont)(kern_return_t));
82
83 kern_return_t
84 semaphore_timedwait_trap_internal(
85 mach_port_name_t name,
86 unsigned int sec,
87 clock_res_t nsec,
88 void (*caller_cont)(kern_return_t));
89
90 kern_return_t
91 semaphore_timedwait_signal_trap_internal(
92 mach_port_name_t wait_name,
93 mach_port_name_t signal_name,
94 unsigned int sec,
95 clock_res_t nsec,
96 void (*caller_cont)(kern_return_t));
97
98 kern_return_t
99 semaphore_signal_internal_trap(mach_port_name_t sema_name);
100
101 kern_return_t
102 semaphore_signal_internal(
103 semaphore_t semaphore,
104 thread_t thread,
105 int options);
106
107 kern_return_t
108 semaphore_convert_wait_result(
109 int wait_result);
110
111 void
112 semaphore_wait_continue(void);
113
114 static kern_return_t
115 semaphore_wait_internal(
116 semaphore_t wait_semaphore,
117 semaphore_t signal_semaphore,
118 uint64_t deadline,
119 int option,
120 void (*caller_cont)(kern_return_t));
121
122 void
123 kdp_sema_find_owner(
124 struct waitq * waitq,
125 event64_t event,
126 thread_waitinfo_t * waitinfo);
127
128 static __inline__ uint64_t
129 semaphore_deadline(
130 unsigned int sec,
131 clock_res_t nsec)
132 {
133 uint64_t abstime;
134
135 nanoseconds_to_absolutetime((uint64_t)sec * NSEC_PER_SEC + nsec, &abstime);
136 clock_absolutetime_interval_to_deadline(abstime, &abstime);
137
138 return (abstime);
139 }
140
141 /*
142 * ROUTINE: semaphore_init [private]
143 *
144 * Initialize the semaphore mechanisms.
145 * Right now, we only need to initialize the semaphore zone.
146 */
147 void
148 semaphore_init(void)
149 {
150 semaphore_zone = zinit(sizeof(struct semaphore),
151 semaphore_max * sizeof(struct semaphore),
152 sizeof(struct semaphore),
153 "semaphores");
154 zone_change(semaphore_zone, Z_NOENCRYPT, TRUE);
155 }
156
157 /*
158 * Routine: semaphore_create
159 *
160 * Creates a semaphore.
161 * The port representing the semaphore is returned as a parameter.
162 */
163 kern_return_t
164 semaphore_create(
165 task_t task,
166 semaphore_t *new_semaphore,
167 int policy,
168 int value)
169 {
170 semaphore_t s = SEMAPHORE_NULL;
171 kern_return_t kret;
172
173
174 *new_semaphore = SEMAPHORE_NULL;
175 if (task == TASK_NULL || value < 0 || policy > SYNC_POLICY_MAX)
176 return KERN_INVALID_ARGUMENT;
177
178 s = (semaphore_t) zalloc (semaphore_zone);
179
180 if (s == SEMAPHORE_NULL)
181 return KERN_RESOURCE_SHORTAGE;
182
183 kret = waitq_init(&s->waitq, policy | SYNC_POLICY_DISABLE_IRQ); /* also inits lock */
184 if (kret != KERN_SUCCESS) {
185 zfree(semaphore_zone, s);
186 return kret;
187 }
188
189 /*
190 * Initialize the semaphore values.
191 */
192 s->port = IP_NULL;
193 s->ref_count = 1;
194 s->count = value;
195 s->active = TRUE;
196 s->owner = task;
197
198 /*
199 * Associate the new semaphore with the task by adding
200 * the new semaphore to the task's semaphore list.
201 */
202 task_lock(task);
203 enqueue_head(&task->semaphore_list, (queue_entry_t) s);
204 task->semaphores_owned++;
205 task_unlock(task);
206
207 *new_semaphore = s;
208
209 return KERN_SUCCESS;
210 }
211
212 /*
213 * Routine: semaphore_destroy_internal
214 *
215 * Disassociate a semaphore from its owning task, mark it inactive,
216 * and set any waiting threads running with THREAD_RESTART.
217 *
218 * Conditions:
219 * task is locked
220 * semaphore is locked
221 * semaphore is owned by the specified task
222 * Returns:
223 * with semaphore unlocked
224 */
225 static void
226 semaphore_destroy_internal(
227 task_t task,
228 semaphore_t semaphore)
229 {
230 int old_count;
231
232 /* unlink semaphore from owning task */
233 assert(semaphore->owner == task);
234 remqueue((queue_entry_t) semaphore);
235 semaphore->owner = TASK_NULL;
236 task->semaphores_owned--;
237
238 /*
239 * Deactivate semaphore
240 */
241 assert(semaphore->active);
242 semaphore->active = FALSE;
243
244 /*
245 * Wakeup blocked threads
246 */
247 old_count = semaphore->count;
248 semaphore->count = 0;
249
250 if (old_count < 0) {
251 waitq_wakeup64_all_locked(&semaphore->waitq,
252 SEMAPHORE_EVENT,
253 THREAD_RESTART, NULL,
254 WAITQ_ALL_PRIORITIES,
255 WAITQ_UNLOCK);
256 /* waitq/semaphore is unlocked */
257 } else {
258 semaphore_unlock(semaphore);
259 }
260 }
261
262 /*
263 * Routine: semaphore_destroy
264 *
265 * Destroys a semaphore and consume the caller's reference on the
266 * semaphore.
267 */
268 kern_return_t
269 semaphore_destroy(
270 task_t task,
271 semaphore_t semaphore)
272 {
273 spl_t spl_level;
274
275 if (semaphore == SEMAPHORE_NULL)
276 return KERN_INVALID_ARGUMENT;
277
278 if (task == TASK_NULL) {
279 semaphore_dereference(semaphore);
280 return KERN_INVALID_ARGUMENT;
281 }
282
283 task_lock(task);
284 spl_level = splsched();
285 semaphore_lock(semaphore);
286
287 if (semaphore->owner != task) {
288 semaphore_unlock(semaphore);
289 splx(spl_level);
290 task_unlock(task);
291 return KERN_INVALID_ARGUMENT;
292 }
293
294 semaphore_destroy_internal(task, semaphore);
295 /* semaphore unlocked */
296
297 splx(spl_level);
298 task_unlock(task);
299
300 semaphore_dereference(semaphore);
301 return KERN_SUCCESS;
302 }
303
304 /*
305 * Routine: semaphore_destroy_all
306 *
307 * Destroy all the semaphores associated with a given task.
308 */
309 #define SEMASPERSPL 20 /* max number of semaphores to destroy per spl hold */
310
311 void
312 semaphore_destroy_all(
313 task_t task)
314 {
315 uint32_t count;
316 spl_t spl_level;
317
318 count = 0;
319 task_lock(task);
320 while (!queue_empty(&task->semaphore_list)) {
321 semaphore_t semaphore;
322
323 semaphore = (semaphore_t) queue_first(&task->semaphore_list);
324
325 if (count == 0)
326 spl_level = splsched();
327 semaphore_lock(semaphore);
328
329 semaphore_destroy_internal(task, semaphore);
330 /* semaphore unlocked */
331
332 /* throttle number of semaphores per interrupt disablement */
333 if (++count == SEMASPERSPL) {
334 count = 0;
335 splx(spl_level);
336 }
337 }
338 if (count != 0)
339 splx(spl_level);
340
341 task_unlock(task);
342 }
343
344 /*
345 * Routine: semaphore_signal_internal
346 *
347 * Signals the semaphore as direct.
348 * Assumptions:
349 * Semaphore is locked.
350 */
351 kern_return_t
352 semaphore_signal_internal(
353 semaphore_t semaphore,
354 thread_t thread,
355 int options)
356 {
357 kern_return_t kr;
358 spl_t spl_level;
359
360 spl_level = splsched();
361 semaphore_lock(semaphore);
362
363 if (!semaphore->active) {
364 semaphore_unlock(semaphore);
365 splx(spl_level);
366 return KERN_TERMINATED;
367 }
368
369 if (thread != THREAD_NULL) {
370 if (semaphore->count < 0) {
371 kr = waitq_wakeup64_thread_locked(
372 &semaphore->waitq,
373 SEMAPHORE_EVENT,
374 thread,
375 THREAD_AWAKENED,
376 WAITQ_UNLOCK);
377 /* waitq/semaphore is unlocked */
378 } else {
379 kr = KERN_NOT_WAITING;
380 semaphore_unlock(semaphore);
381 }
382 splx(spl_level);
383 return kr;
384 }
385
386 if (options & SEMAPHORE_SIGNAL_ALL) {
387 int old_count = semaphore->count;
388
389 kr = KERN_NOT_WAITING;
390 if (old_count < 0) {
391 semaphore->count = 0; /* always reset */
392 kr = waitq_wakeup64_all_locked(
393 &semaphore->waitq,
394 SEMAPHORE_EVENT,
395 THREAD_AWAKENED, NULL,
396 WAITQ_ALL_PRIORITIES,
397 WAITQ_UNLOCK);
398 /* waitq / semaphore is unlocked */
399 } else {
400 if (options & SEMAPHORE_SIGNAL_PREPOST)
401 semaphore->count++;
402 kr = KERN_SUCCESS;
403 semaphore_unlock(semaphore);
404 }
405 splx(spl_level);
406 return kr;
407 }
408
409 if (semaphore->count < 0) {
410 kr = waitq_wakeup64_one_locked(
411 &semaphore->waitq,
412 SEMAPHORE_EVENT,
413 THREAD_AWAKENED, NULL,
414 WAITQ_ALL_PRIORITIES,
415 WAITQ_KEEP_LOCKED);
416 if (kr == KERN_SUCCESS) {
417 semaphore_unlock(semaphore);
418 splx(spl_level);
419 return KERN_SUCCESS;
420 } else {
421 semaphore->count = 0; /* all waiters gone */
422 }
423 }
424
425 if (options & SEMAPHORE_SIGNAL_PREPOST) {
426 semaphore->count++;
427 }
428
429 semaphore_unlock(semaphore);
430 splx(spl_level);
431 return KERN_NOT_WAITING;
432 }
433
434 /*
435 * Routine: semaphore_signal_thread
436 *
437 * If the specified thread is blocked on the semaphore, it is
438 * woken up. If a NULL thread was supplied, then any one
439 * thread is woken up. Otherwise the caller gets KERN_NOT_WAITING
440 * and the semaphore is unchanged.
441 */
442 kern_return_t
443 semaphore_signal_thread(
444 semaphore_t semaphore,
445 thread_t thread)
446 {
447 kern_return_t ret;
448
449 if (semaphore == SEMAPHORE_NULL)
450 return KERN_INVALID_ARGUMENT;
451
452 ret = semaphore_signal_internal(semaphore,
453 thread,
454 SEMAPHORE_OPTION_NONE);
455 return ret;
456 }
457
458 /*
459 * Routine: semaphore_signal_thread_trap
460 *
461 * Trap interface to the semaphore_signal_thread function.
462 */
463 kern_return_t
464 semaphore_signal_thread_trap(
465 struct semaphore_signal_thread_trap_args *args)
466 {
467 mach_port_name_t sema_name = args->signal_name;
468 mach_port_name_t thread_name = args->thread_name;
469 semaphore_t semaphore;
470 thread_t thread;
471 kern_return_t kr;
472
473 /*
474 * MACH_PORT_NULL is not an error. It means that we want to
475 * select any one thread that is already waiting, but not to
476 * pre-post the semaphore.
477 */
478 if (thread_name != MACH_PORT_NULL) {
479 thread = port_name_to_thread(thread_name);
480 if (thread == THREAD_NULL)
481 return KERN_INVALID_ARGUMENT;
482 } else
483 thread = THREAD_NULL;
484
485 kr = port_name_to_semaphore(sema_name, &semaphore);
486 if (kr == KERN_SUCCESS) {
487 kr = semaphore_signal_internal(semaphore,
488 thread,
489 SEMAPHORE_OPTION_NONE);
490 semaphore_dereference(semaphore);
491 }
492 if (thread != THREAD_NULL) {
493 thread_deallocate(thread);
494 }
495 return kr;
496 }
497
498
499
500 /*
501 * Routine: semaphore_signal
502 *
503 * Traditional (in-kernel client and MIG interface) semaphore
504 * signal routine. Most users will access the trap version.
505 *
506 * This interface in not defined to return info about whether
507 * this call found a thread waiting or not. The internal
508 * routines (and future external routines) do. We have to
509 * convert those into plain KERN_SUCCESS returns.
510 */
511 kern_return_t
512 semaphore_signal(
513 semaphore_t semaphore)
514 {
515 kern_return_t kr;
516
517 if (semaphore == SEMAPHORE_NULL)
518 return KERN_INVALID_ARGUMENT;
519
520 kr = semaphore_signal_internal(semaphore,
521 THREAD_NULL,
522 SEMAPHORE_SIGNAL_PREPOST);
523 if (kr == KERN_NOT_WAITING)
524 return KERN_SUCCESS;
525 return kr;
526 }
527
528 /*
529 * Routine: semaphore_signal_trap
530 *
531 * Trap interface to the semaphore_signal function.
532 */
533 kern_return_t
534 semaphore_signal_trap(
535 struct semaphore_signal_trap_args *args)
536 {
537 mach_port_name_t sema_name = args->signal_name;
538
539 return (semaphore_signal_internal_trap(sema_name));
540 }
541
542 kern_return_t
543 semaphore_signal_internal_trap(mach_port_name_t sema_name)
544 {
545 semaphore_t semaphore;
546 kern_return_t kr;
547
548 kr = port_name_to_semaphore(sema_name, &semaphore);
549 if (kr == KERN_SUCCESS) {
550 kr = semaphore_signal_internal(semaphore,
551 THREAD_NULL,
552 SEMAPHORE_SIGNAL_PREPOST);
553 semaphore_dereference(semaphore);
554 if (kr == KERN_NOT_WAITING)
555 kr = KERN_SUCCESS;
556 }
557 return kr;
558 }
559
560 /*
561 * Routine: semaphore_signal_all
562 *
563 * Awakens ALL threads currently blocked on the semaphore.
564 * The semaphore count returns to zero.
565 */
566 kern_return_t
567 semaphore_signal_all(
568 semaphore_t semaphore)
569 {
570 kern_return_t kr;
571
572 if (semaphore == SEMAPHORE_NULL)
573 return KERN_INVALID_ARGUMENT;
574
575 kr = semaphore_signal_internal(semaphore,
576 THREAD_NULL,
577 SEMAPHORE_SIGNAL_ALL);
578 if (kr == KERN_NOT_WAITING)
579 return KERN_SUCCESS;
580 return kr;
581 }
582
583 /*
584 * Routine: semaphore_signal_all_trap
585 *
586 * Trap interface to the semaphore_signal_all function.
587 */
588 kern_return_t
589 semaphore_signal_all_trap(
590 struct semaphore_signal_all_trap_args *args)
591 {
592 mach_port_name_t sema_name = args->signal_name;
593 semaphore_t semaphore;
594 kern_return_t kr;
595
596 kr = port_name_to_semaphore(sema_name, &semaphore);
597 if (kr == KERN_SUCCESS) {
598 kr = semaphore_signal_internal(semaphore,
599 THREAD_NULL,
600 SEMAPHORE_SIGNAL_ALL);
601 semaphore_dereference(semaphore);
602 if (kr == KERN_NOT_WAITING)
603 kr = KERN_SUCCESS;
604 }
605 return kr;
606 }
607
608 /*
609 * Routine: semaphore_convert_wait_result
610 *
611 * Generate the return code after a semaphore wait/block. It
612 * takes the wait result as an input and coverts that to an
613 * appropriate result.
614 */
615 kern_return_t
616 semaphore_convert_wait_result(int wait_result)
617 {
618 switch (wait_result) {
619 case THREAD_AWAKENED:
620 return KERN_SUCCESS;
621
622 case THREAD_TIMED_OUT:
623 return KERN_OPERATION_TIMED_OUT;
624
625 case THREAD_INTERRUPTED:
626 return KERN_ABORTED;
627
628 case THREAD_RESTART:
629 return KERN_TERMINATED;
630
631 default:
632 panic("semaphore_block\n");
633 return KERN_FAILURE;
634 }
635 }
636
637 /*
638 * Routine: semaphore_wait_continue
639 *
640 * Common continuation routine after waiting on a semphore.
641 * It returns directly to user space.
642 */
643 void
644 semaphore_wait_continue(void)
645 {
646 thread_t self = current_thread();
647 int wait_result = self->wait_result;
648 void (*caller_cont)(kern_return_t) = self->sth_continuation;
649
650 assert(self->sth_waitsemaphore != SEMAPHORE_NULL);
651 semaphore_dereference(self->sth_waitsemaphore);
652 if (self->sth_signalsemaphore != SEMAPHORE_NULL)
653 semaphore_dereference(self->sth_signalsemaphore);
654
655 assert(caller_cont != (void (*)(kern_return_t))0);
656 (*caller_cont)(semaphore_convert_wait_result(wait_result));
657 }
658
659 /*
660 * Routine: semaphore_wait_internal
661 *
662 * Decrements the semaphore count by one. If the count is
663 * negative after the decrement, the calling thread blocks
664 * (possibly at a continuation and/or with a timeout).
665 *
666 * Assumptions:
667 * The reference
668 * A reference is held on the signal semaphore.
669 */
670 static kern_return_t
671 semaphore_wait_internal(
672 semaphore_t wait_semaphore,
673 semaphore_t signal_semaphore,
674 uint64_t deadline,
675 int option,
676 void (*caller_cont)(kern_return_t))
677 {
678 int wait_result;
679 spl_t spl_level;
680 kern_return_t kr = KERN_ALREADY_WAITING;
681
682 spl_level = splsched();
683 semaphore_lock(wait_semaphore);
684
685 if (!wait_semaphore->active) {
686 kr = KERN_TERMINATED;
687 } else if (wait_semaphore->count > 0) {
688 wait_semaphore->count--;
689 kr = KERN_SUCCESS;
690 } else if (option & SEMAPHORE_TIMEOUT_NOBLOCK) {
691 kr = KERN_OPERATION_TIMED_OUT;
692 } else {
693 thread_t self = current_thread();
694
695 wait_semaphore->count = -1; /* we don't keep an actual count */
696
697 thread_set_pending_block_hint(self, kThreadWaitSemaphore);
698 (void)waitq_assert_wait64_locked(
699 &wait_semaphore->waitq,
700 SEMAPHORE_EVENT,
701 THREAD_ABORTSAFE,
702 TIMEOUT_URGENCY_USER_NORMAL,
703 deadline, TIMEOUT_NO_LEEWAY,
704 self);
705 }
706 semaphore_unlock(wait_semaphore);
707 splx(spl_level);
708
709 /*
710 * wait_semaphore is unlocked so we are free to go ahead and
711 * signal the signal_semaphore (if one was provided).
712 */
713 if (signal_semaphore != SEMAPHORE_NULL) {
714 kern_return_t signal_kr;
715
716 /*
717 * lock the signal semaphore reference we got and signal it.
718 * This will NOT block (we cannot block after having asserted
719 * our intention to wait above).
720 */
721 signal_kr = semaphore_signal_internal(signal_semaphore,
722 THREAD_NULL,
723 SEMAPHORE_SIGNAL_PREPOST);
724
725 if (signal_kr == KERN_NOT_WAITING)
726 signal_kr = KERN_SUCCESS;
727 else if (signal_kr == KERN_TERMINATED) {
728 /*
729 * Uh!Oh! The semaphore we were to signal died.
730 * We have to get ourselves out of the wait in
731 * case we get stuck here forever (it is assumed
732 * that the semaphore we were posting is gating
733 * the decision by someone else to post the
734 * semaphore we are waiting on). People will
735 * discover the other dead semaphore soon enough.
736 * If we got out of the wait cleanly (someone
737 * already posted a wakeup to us) then return that
738 * (most important) result. Otherwise,
739 * return the KERN_TERMINATED status.
740 */
741 thread_t self = current_thread();
742
743 clear_wait(self, THREAD_INTERRUPTED);
744 kr = semaphore_convert_wait_result(self->wait_result);
745 if (kr == KERN_ABORTED)
746 kr = KERN_TERMINATED;
747 }
748 }
749
750 /*
751 * If we had an error, or we didn't really need to wait we can
752 * return now that we have signalled the signal semaphore.
753 */
754 if (kr != KERN_ALREADY_WAITING)
755 return kr;
756
757 /*
758 * Now, we can block. If the caller supplied a continuation
759 * pointer of his own for after the block, block with the
760 * appropriate semaphore continuation. Thiswill gather the
761 * semaphore results, release references on the semaphore(s),
762 * and then call the caller's continuation.
763 */
764 if (caller_cont) {
765 thread_t self = current_thread();
766
767 self->sth_continuation = caller_cont;
768 self->sth_waitsemaphore = wait_semaphore;
769 self->sth_signalsemaphore = signal_semaphore;
770 wait_result = thread_block((thread_continue_t)semaphore_wait_continue);
771 }
772 else {
773 wait_result = thread_block(THREAD_CONTINUE_NULL);
774 }
775
776 return (semaphore_convert_wait_result(wait_result));
777 }
778
779
780 /*
781 * Routine: semaphore_wait
782 *
783 * Traditional (non-continuation) interface presented to
784 * in-kernel clients to wait on a semaphore.
785 */
786 kern_return_t
787 semaphore_wait(
788 semaphore_t semaphore)
789 {
790
791 if (semaphore == SEMAPHORE_NULL)
792 return KERN_INVALID_ARGUMENT;
793
794 return(semaphore_wait_internal(semaphore,
795 SEMAPHORE_NULL,
796 0ULL, SEMAPHORE_OPTION_NONE,
797 (void (*)(kern_return_t))0));
798 }
799
800 kern_return_t
801 semaphore_wait_noblock(
802 semaphore_t semaphore)
803 {
804
805 if (semaphore == SEMAPHORE_NULL)
806 return KERN_INVALID_ARGUMENT;
807
808 return(semaphore_wait_internal(semaphore,
809 SEMAPHORE_NULL,
810 0ULL, SEMAPHORE_TIMEOUT_NOBLOCK,
811 (void (*)(kern_return_t))0));
812 }
813
814 kern_return_t
815 semaphore_wait_deadline(
816 semaphore_t semaphore,
817 uint64_t deadline)
818 {
819
820 if (semaphore == SEMAPHORE_NULL)
821 return KERN_INVALID_ARGUMENT;
822
823 return(semaphore_wait_internal(semaphore,
824 SEMAPHORE_NULL,
825 deadline, SEMAPHORE_OPTION_NONE,
826 (void (*)(kern_return_t))0));
827 }
828
829 /*
830 * Trap: semaphore_wait_trap
831 *
832 * Trap version of semaphore wait. Called on behalf of user-level
833 * clients.
834 */
835
836 kern_return_t
837 semaphore_wait_trap(
838 struct semaphore_wait_trap_args *args)
839 {
840 return(semaphore_wait_trap_internal(args->wait_name, thread_syscall_return));
841 }
842
843
844
845 kern_return_t
846 semaphore_wait_trap_internal(
847 mach_port_name_t name,
848 void (*caller_cont)(kern_return_t))
849 {
850 semaphore_t semaphore;
851 kern_return_t kr;
852
853 kr = port_name_to_semaphore(name, &semaphore);
854 if (kr == KERN_SUCCESS) {
855 kr = semaphore_wait_internal(semaphore,
856 SEMAPHORE_NULL,
857 0ULL, SEMAPHORE_OPTION_NONE,
858 caller_cont);
859 semaphore_dereference(semaphore);
860 }
861 return kr;
862 }
863
864 /*
865 * Routine: semaphore_timedwait
866 *
867 * Traditional (non-continuation) interface presented to
868 * in-kernel clients to wait on a semaphore with a timeout.
869 *
870 * A timeout of {0,0} is considered non-blocking.
871 */
872 kern_return_t
873 semaphore_timedwait(
874 semaphore_t semaphore,
875 mach_timespec_t wait_time)
876 {
877 int option = SEMAPHORE_OPTION_NONE;
878 uint64_t deadline = 0;
879
880 if (semaphore == SEMAPHORE_NULL)
881 return KERN_INVALID_ARGUMENT;
882
883 if(BAD_MACH_TIMESPEC(&wait_time))
884 return KERN_INVALID_VALUE;
885
886 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
887 option = SEMAPHORE_TIMEOUT_NOBLOCK;
888 else
889 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
890
891 return (semaphore_wait_internal(semaphore,
892 SEMAPHORE_NULL,
893 deadline, option,
894 (void(*)(kern_return_t))0));
895
896 }
897
898 /*
899 * Trap: semaphore_timedwait_trap
900 *
901 * Trap version of a semaphore_timedwait. The timeout parameter
902 * is passed in two distinct parts and re-assembled on this side
903 * of the trap interface (to accomodate calling conventions that
904 * pass structures as pointers instead of inline in registers without
905 * having to add a copyin).
906 *
907 * A timeout of {0,0} is considered non-blocking.
908 */
909 kern_return_t
910 semaphore_timedwait_trap(
911 struct semaphore_timedwait_trap_args *args)
912 {
913
914 return(semaphore_timedwait_trap_internal(args->wait_name, args->sec, args->nsec, thread_syscall_return));
915 }
916
917
918 kern_return_t
919 semaphore_timedwait_trap_internal(
920 mach_port_name_t name,
921 unsigned int sec,
922 clock_res_t nsec,
923 void (*caller_cont)(kern_return_t))
924 {
925 semaphore_t semaphore;
926 mach_timespec_t wait_time;
927 kern_return_t kr;
928
929 wait_time.tv_sec = sec;
930 wait_time.tv_nsec = nsec;
931 if(BAD_MACH_TIMESPEC(&wait_time))
932 return KERN_INVALID_VALUE;
933
934 kr = port_name_to_semaphore(name, &semaphore);
935 if (kr == KERN_SUCCESS) {
936 int option = SEMAPHORE_OPTION_NONE;
937 uint64_t deadline = 0;
938
939 if (sec == 0 && nsec == 0)
940 option = SEMAPHORE_TIMEOUT_NOBLOCK;
941 else
942 deadline = semaphore_deadline(sec, nsec);
943
944 kr = semaphore_wait_internal(semaphore,
945 SEMAPHORE_NULL,
946 deadline, option,
947 caller_cont);
948 semaphore_dereference(semaphore);
949 }
950 return kr;
951 }
952
953 /*
954 * Routine: semaphore_wait_signal
955 *
956 * Atomically register a wait on a semaphore and THEN signal
957 * another. This is the in-kernel entry point that does not
958 * block at a continuation and does not free a signal_semaphore
959 * reference.
960 */
961 kern_return_t
962 semaphore_wait_signal(
963 semaphore_t wait_semaphore,
964 semaphore_t signal_semaphore)
965 {
966 if (wait_semaphore == SEMAPHORE_NULL)
967 return KERN_INVALID_ARGUMENT;
968
969 return(semaphore_wait_internal(wait_semaphore,
970 signal_semaphore,
971 0ULL, SEMAPHORE_OPTION_NONE,
972 (void(*)(kern_return_t))0));
973 }
974
975 /*
976 * Trap: semaphore_wait_signal_trap
977 *
978 * Atomically register a wait on a semaphore and THEN signal
979 * another. This is the trap version from user space.
980 */
981 kern_return_t
982 semaphore_wait_signal_trap(
983 struct semaphore_wait_signal_trap_args *args)
984 {
985 return(semaphore_wait_signal_trap_internal(args->wait_name, args->signal_name, thread_syscall_return));
986 }
987
988 kern_return_t
989 semaphore_wait_signal_trap_internal(
990 mach_port_name_t wait_name,
991 mach_port_name_t signal_name,
992 void (*caller_cont)(kern_return_t))
993 {
994 semaphore_t wait_semaphore;
995 semaphore_t signal_semaphore;
996 kern_return_t kr;
997
998 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
999 if (kr == KERN_SUCCESS) {
1000 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1001 if (kr == KERN_SUCCESS) {
1002 kr = semaphore_wait_internal(wait_semaphore,
1003 signal_semaphore,
1004 0ULL, SEMAPHORE_OPTION_NONE,
1005 caller_cont);
1006 semaphore_dereference(wait_semaphore);
1007 }
1008 semaphore_dereference(signal_semaphore);
1009 }
1010 return kr;
1011 }
1012
1013
1014 /*
1015 * Routine: semaphore_timedwait_signal
1016 *
1017 * Atomically register a wait on a semaphore and THEN signal
1018 * another. This is the in-kernel entry point that does not
1019 * block at a continuation.
1020 *
1021 * A timeout of {0,0} is considered non-blocking.
1022 */
1023 kern_return_t
1024 semaphore_timedwait_signal(
1025 semaphore_t wait_semaphore,
1026 semaphore_t signal_semaphore,
1027 mach_timespec_t wait_time)
1028 {
1029 int option = SEMAPHORE_OPTION_NONE;
1030 uint64_t deadline = 0;
1031
1032 if (wait_semaphore == SEMAPHORE_NULL)
1033 return KERN_INVALID_ARGUMENT;
1034
1035 if(BAD_MACH_TIMESPEC(&wait_time))
1036 return KERN_INVALID_VALUE;
1037
1038 if (wait_time.tv_sec == 0 && wait_time.tv_nsec == 0)
1039 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1040 else
1041 deadline = semaphore_deadline(wait_time.tv_sec, wait_time.tv_nsec);
1042
1043 return(semaphore_wait_internal(wait_semaphore,
1044 signal_semaphore,
1045 deadline, option,
1046 (void(*)(kern_return_t))0));
1047 }
1048
1049 /*
1050 * Trap: semaphore_timedwait_signal_trap
1051 *
1052 * Atomically register a timed wait on a semaphore and THEN signal
1053 * another. This is the trap version from user space.
1054 */
1055 kern_return_t
1056 semaphore_timedwait_signal_trap(
1057 struct semaphore_timedwait_signal_trap_args *args)
1058 {
1059 return(semaphore_timedwait_signal_trap_internal(args->wait_name, args->signal_name, args->sec, args->nsec, thread_syscall_return));
1060 }
1061
1062 kern_return_t
1063 semaphore_timedwait_signal_trap_internal(
1064 mach_port_name_t wait_name,
1065 mach_port_name_t signal_name,
1066 unsigned int sec,
1067 clock_res_t nsec,
1068 void (*caller_cont)(kern_return_t))
1069 {
1070 semaphore_t wait_semaphore;
1071 semaphore_t signal_semaphore;
1072 mach_timespec_t wait_time;
1073 kern_return_t kr;
1074
1075 wait_time.tv_sec = sec;
1076 wait_time.tv_nsec = nsec;
1077 if(BAD_MACH_TIMESPEC(&wait_time))
1078 return KERN_INVALID_VALUE;
1079
1080 kr = port_name_to_semaphore(signal_name, &signal_semaphore);
1081 if (kr == KERN_SUCCESS) {
1082 kr = port_name_to_semaphore(wait_name, &wait_semaphore);
1083 if (kr == KERN_SUCCESS) {
1084 int option = SEMAPHORE_OPTION_NONE;
1085 uint64_t deadline = 0;
1086
1087 if (sec == 0 && nsec == 0)
1088 option = SEMAPHORE_TIMEOUT_NOBLOCK;
1089 else
1090 deadline = semaphore_deadline(sec, nsec);
1091
1092 kr = semaphore_wait_internal(wait_semaphore,
1093 signal_semaphore,
1094 deadline, option,
1095 caller_cont);
1096 semaphore_dereference(wait_semaphore);
1097 }
1098 semaphore_dereference(signal_semaphore);
1099 }
1100 return kr;
1101 }
1102
1103
1104 /*
1105 * Routine: semaphore_reference
1106 *
1107 * Take out a reference on a semaphore. This keeps the data structure
1108 * in existence (but the semaphore may be deactivated).
1109 */
1110 void
1111 semaphore_reference(
1112 semaphore_t semaphore)
1113 {
1114 (void)hw_atomic_add(&semaphore->ref_count, 1);
1115 }
1116
1117 /*
1118 * Routine: semaphore_dereference
1119 *
1120 * Release a reference on a semaphore. If this is the last reference,
1121 * the semaphore data structure is deallocated.
1122 */
1123 void
1124 semaphore_dereference(
1125 semaphore_t semaphore)
1126 {
1127 uint32_t collisions;
1128 spl_t spl_level;
1129
1130 if (semaphore == NULL)
1131 return;
1132
1133 if (hw_atomic_sub(&semaphore->ref_count, 1) != 0)
1134 return;
1135
1136 /*
1137 * Last ref, clean up the port [if any]
1138 * associated with the semaphore, destroy
1139 * it (if still active) and then free
1140 * the semaphore.
1141 */
1142 ipc_port_t port = semaphore->port;
1143
1144 if (IP_VALID(port)) {
1145 assert(!port->ip_srights);
1146 ipc_port_dealloc_kernel(port);
1147 }
1148
1149 /*
1150 * Lock the semaphore to lock in the owner task reference.
1151 * Then continue to try to lock the task (inverse order).
1152 */
1153 spl_level = splsched();
1154 semaphore_lock(semaphore);
1155 for (collisions = 0; semaphore->active; collisions++) {
1156 task_t task = semaphore->owner;
1157
1158 assert(task != TASK_NULL);
1159
1160 if (task_lock_try(task)) {
1161 semaphore_destroy_internal(task, semaphore);
1162 /* semaphore unlocked */
1163 splx(spl_level);
1164 task_unlock(task);
1165 goto out;
1166 }
1167
1168 /* failed to get out-of-order locks */
1169 semaphore_unlock(semaphore);
1170 splx(spl_level);
1171 mutex_pause(collisions);
1172 spl_level = splsched();
1173 semaphore_lock(semaphore);
1174 }
1175 semaphore_unlock(semaphore);
1176 splx(spl_level);
1177
1178 out:
1179 zfree(semaphore_zone, semaphore);
1180 }
1181
1182 #define WAITQ_TO_SEMA(wq) ((semaphore_t) ((uintptr_t)(wq) - offsetof(struct semaphore, waitq)))
1183 void
1184 kdp_sema_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
1185 {
1186 semaphore_t sem = WAITQ_TO_SEMA(waitq);
1187 assert(event == SEMAPHORE_EVENT);
1188 assert(kdp_is_in_zone(sem, "semaphores"));
1189
1190 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(sem->port);
1191 if (sem->owner)
1192 waitinfo->owner = pid_from_task(sem->owner);
1193 }