2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_mqueue.c
63 * Functions to manipulate IPC message queues.
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/waitq.h>
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_space.h>
94 #include <vm/vm_map.h>
97 int ipc_mqueue_full
; /* address is event for queue space */
98 int ipc_mqueue_rcv
; /* address is event for message arrival */
100 /* forward declarations */
101 void ipc_mqueue_receive_results(wait_result_t result
);
104 * Routine: ipc_mqueue_init
106 * Initialize a newly-allocated message queue.
112 uint64_t *reserved_link
)
115 waitq_set_init(&mqueue
->imq_set_queue
,
116 SYNC_POLICY_FIFO
|SYNC_POLICY_PREPOST
|SYNC_POLICY_DISABLE_IRQ
,
119 waitq_init(&mqueue
->imq_wait_queue
, SYNC_POLICY_FIFO
|SYNC_POLICY_DISABLE_IRQ
);
120 ipc_kmsg_queue_init(&mqueue
->imq_messages
);
121 mqueue
->imq_seqno
= 0;
122 mqueue
->imq_msgcount
= 0;
123 mqueue
->imq_qlimit
= MACH_PORT_QLIMIT_DEFAULT
;
124 mqueue
->imq_fullwaiters
= FALSE
;
128 void ipc_mqueue_deinit(
131 boolean_t is_set
= imq_is_set(mqueue
);
134 waitq_set_deinit(&mqueue
->imq_set_queue
);
136 waitq_deinit(&mqueue
->imq_wait_queue
);
140 * Routine: imq_reserve_and_lock
142 * Atomically lock an ipc_mqueue_t object and reserve
143 * an appropriate number of prepost linkage objects for
144 * use in wakeup operations.
149 imq_reserve_and_lock(ipc_mqueue_t mq
, uint64_t *reserved_prepost
, spl_t
*spl
)
151 *reserved_prepost
= waitq_prepost_reserve(&mq
->imq_wait_queue
, 0,
152 WAITQ_KEEP_LOCKED
, spl
);
158 * Routine: imq_release_and_unlock
160 * Unlock an ipc_mqueue_t object, re-enable interrupts,
161 * and release any unused prepost object reservations.
166 imq_release_and_unlock(ipc_mqueue_t mq
, uint64_t reserved_prepost
, spl_t spl
)
168 assert(imq_held(mq
));
169 waitq_unlock(&mq
->imq_wait_queue
);
171 waitq_prepost_release_reserve(reserved_prepost
);
176 * Routine: ipc_mqueue_member
178 * Indicate whether the (port) mqueue is a member of
179 * this portset's mqueue. We do this by checking
180 * whether the portset mqueue's waitq is an member of
181 * the port's mqueue waitq.
183 * the portset's mqueue is not already a member
184 * this may block while allocating linkage structures.
189 ipc_mqueue_t port_mqueue
,
190 ipc_mqueue_t set_mqueue
)
192 struct waitq
*port_waitq
= &port_mqueue
->imq_wait_queue
;
193 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
195 return waitq_member(port_waitq
, set_waitq
);
200 * Routine: ipc_mqueue_remove
202 * Remove the association between the queue and the specified
209 ipc_mqueue_t set_mqueue
)
211 struct waitq
*mq_waitq
= &mqueue
->imq_wait_queue
;
212 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
214 return waitq_unlink(mq_waitq
, set_waitq
);
218 * Routine: ipc_mqueue_remove_from_all
220 * Remove the mqueue from all the sets it is a member of
225 ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue
)
227 struct waitq
*mq_waitq
= &mqueue
->imq_wait_queue
;
229 waitq_unlink_all(mq_waitq
);
234 * Routine: ipc_mqueue_remove_all
236 * Remove all the member queues from the specified set.
237 * Also removes the queue from any containing sets.
242 ipc_mqueue_remove_all(ipc_mqueue_t mqueue
)
244 struct waitq_set
*mq_setq
= &mqueue
->imq_set_queue
;
245 waitq_set_unlink_all(mq_setq
);
251 * Routine: ipc_mqueue_add
253 * Associate the portset's mqueue with the port's mqueue.
254 * This has to be done so that posting the port will wakeup
255 * a portset waiter. If there are waiters on the portset
256 * mqueue and messages on the port mqueue, try to match them
263 ipc_mqueue_t port_mqueue
,
264 ipc_mqueue_t set_mqueue
,
265 uint64_t *reserved_link
,
266 uint64_t *reserved_prepost
)
268 struct waitq
*port_waitq
= &port_mqueue
->imq_wait_queue
;
269 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
270 ipc_kmsg_queue_t kmsgq
;
271 ipc_kmsg_t kmsg
, next
;
275 assert(reserved_link
&& *reserved_link
!= 0);
278 imq_lock(port_mqueue
);
281 * The link operation is now under the same lock-hold as
282 * message iteration and thread wakeup, but doesn't have to be...
284 kr
= waitq_link(port_waitq
, set_waitq
, WAITQ_ALREADY_LOCKED
, reserved_link
);
285 if (kr
!= KERN_SUCCESS
) {
286 imq_unlock(port_mqueue
);
292 * Now that the set has been added to the port, there may be
293 * messages queued on the port and threads waiting on the set
294 * waitq. Lets get them together.
296 kmsgq
= &port_mqueue
->imq_messages
;
297 for (kmsg
= ipc_kmsg_queue_first(kmsgq
);
300 next
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
304 mach_msg_size_t msize
;
307 th
= waitq_wakeup64_identity_locked(
310 THREAD_AWAKENED
, &th_spl
,
311 reserved_prepost
, WAITQ_KEEP_LOCKED
);
312 /* waitq/mqueue still locked, thread locked */
314 if (th
== THREAD_NULL
)
318 * If the receiver waited with a facility not directly
319 * related to Mach messaging, then it isn't prepared to get
320 * handed the message directly. Just set it running, and
321 * go look for another thread that can.
323 if (th
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
330 * Found a receiver. see if they can handle the message
331 * correctly (the message is not too large for them, or
332 * they didn't care to be informed that the message was
333 * too large). If they can't handle it, take them off
334 * the list and let them go back and figure it out and
335 * just move onto the next.
337 msize
= ipc_kmsg_copyout_size(kmsg
, th
->map
);
339 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(th
), th
->ith_option
))) {
340 th
->ith_state
= MACH_RCV_TOO_LARGE
;
341 th
->ith_msize
= msize
;
342 if (th
->ith_option
& MACH_RCV_LARGE
) {
344 * let him go without message
346 th
->ith_receiver_name
= port_mqueue
->imq_receiver_name
;
347 th
->ith_kmsg
= IKM_NULL
;
351 continue; /* find another thread */
354 th
->ith_state
= MACH_MSG_SUCCESS
;
358 * This thread is going to take this message,
361 ipc_kmsg_rmqueue(kmsgq
, kmsg
);
362 ipc_mqueue_release_msgcount(port_mqueue
, IMQ_NULL
);
365 th
->ith_seqno
= port_mqueue
->imq_seqno
++;
368 break; /* go to next message */
372 imq_unlock(port_mqueue
);
378 * Routine: ipc_mqueue_changed
380 * Wake up receivers waiting in a message queue.
382 * The message queue is locked.
389 waitq_wakeup64_all_locked(&mqueue
->imq_wait_queue
,
393 WAITQ_ALL_PRIORITIES
,
401 * Routine: ipc_mqueue_send
403 * Send a message to a message queue. The message holds a reference
404 * for the destination port for this message queue in the
405 * msgh_remote_port field.
407 * If unsuccessful, the caller still has possession of
408 * the message and must do something with it. If successful,
409 * the message is queued, given to a receiver, or destroyed.
413 * MACH_MSG_SUCCESS The message was accepted.
414 * MACH_SEND_TIMED_OUT Caller still has message.
415 * MACH_SEND_INTERRUPTED Caller still has message.
421 mach_msg_option_t option
,
422 mach_msg_timeout_t send_timeout
,
429 * 1) We're under the queue limit.
430 * 2) Caller used the MACH_SEND_ALWAYS internal option.
431 * 3) Message is sent to a send-once right.
433 if (!imq_full(mqueue
) ||
434 (!imq_full_kernel(mqueue
) &&
435 ((option
& MACH_SEND_ALWAYS
) ||
436 (MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) ==
437 MACH_MSG_TYPE_PORT_SEND_ONCE
)))) {
438 mqueue
->imq_msgcount
++;
439 assert(mqueue
->imq_msgcount
> 0);
443 thread_t cur_thread
= current_thread();
447 * We have to wait for space to be granted to us.
449 if ((option
& MACH_SEND_TIMEOUT
) && (send_timeout
== 0)) {
452 return MACH_SEND_TIMED_OUT
;
454 if (imq_full_kernel(mqueue
)) {
457 return MACH_SEND_NO_BUFFER
;
459 mqueue
->imq_fullwaiters
= TRUE
;
460 thread_lock(cur_thread
);
461 if (option
& MACH_SEND_TIMEOUT
)
462 clock_interval_to_deadline(send_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
465 wresult
= waitq_assert_wait64_locked(
466 &mqueue
->imq_wait_queue
,
469 TIMEOUT_URGENCY_USER_NORMAL
,
470 deadline
, TIMEOUT_NO_LEEWAY
,
472 thread_unlock(cur_thread
);
476 if (wresult
== THREAD_WAITING
) {
477 wresult
= thread_block(THREAD_CONTINUE_NULL
);
478 counter(c_ipc_mqueue_send_block
++);
483 case THREAD_AWAKENED
:
485 * we can proceed - inherited msgcount from waker
486 * or the message queue has been destroyed and the msgcount
487 * has been reset to zero (will detect in ipc_mqueue_post()).
491 case THREAD_TIMED_OUT
:
492 assert(option
& MACH_SEND_TIMEOUT
);
493 return MACH_SEND_TIMED_OUT
;
495 case THREAD_INTERRUPTED
:
496 return MACH_SEND_INTERRUPTED
;
499 /* mqueue is being destroyed */
500 return MACH_SEND_INVALID_DEST
;
502 panic("ipc_mqueue_send");
506 ipc_mqueue_post(mqueue
, kmsg
);
507 return MACH_MSG_SUCCESS
;
512 * Routine: ipc_mqueue_release_msgcount
514 * Release a message queue reference in the case where we
518 * The message queue is locked.
519 * The message corresponding to this reference is off the queue.
520 * There is no need to pass reserved preposts because this will
521 * never prepost to anyone
524 ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq
, ipc_mqueue_t set_mq
)
527 assert(imq_held(port_mq
));
528 assert(port_mq
->imq_msgcount
> 1 || ipc_kmsg_queue_empty(&port_mq
->imq_messages
));
530 port_mq
->imq_msgcount
--;
532 if (!imq_full(port_mq
) && port_mq
->imq_fullwaiters
) {
534 * boost the priority of the awoken thread
535 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
536 * the message queue slot we've just reserved.
538 * NOTE: this will never prepost
540 if (waitq_wakeup64_one_locked(&port_mq
->imq_wait_queue
,
544 WAITQ_PROMOTE_PRIORITY
,
545 WAITQ_KEEP_LOCKED
) != KERN_SUCCESS
) {
546 port_mq
->imq_fullwaiters
= FALSE
;
548 /* gave away our slot - add reference back */
549 port_mq
->imq_msgcount
++;
553 if (ipc_kmsg_queue_empty(&port_mq
->imq_messages
)) {
554 /* no more msgs: invalidate the port's prepost object */
555 waitq_clear_prepost_locked(&port_mq
->imq_wait_queue
, NULL
);
560 * Routine: ipc_mqueue_post
562 * Post a message to a waiting receiver or enqueue it. If a
563 * receiver is waiting, we can release our reserved space in
568 * If we need to queue, our space in the message queue is reserved.
572 register ipc_mqueue_t mqueue
,
573 register ipc_kmsg_t kmsg
)
576 uint64_t reserved_prepost
= 0;
579 * While the msg queue is locked, we have control of the
580 * kmsg, so the ref in it for the port is still good.
582 * Check for a receiver for the message.
584 imq_reserve_and_lock(mqueue
, &reserved_prepost
, &s
);
586 struct waitq
*waitq
= &mqueue
->imq_wait_queue
;
589 mach_msg_size_t msize
;
591 receiver
= waitq_wakeup64_identity_locked(waitq
,
597 /* waitq still locked, thread locked */
599 if (receiver
== THREAD_NULL
) {
602 * no receivers; queue kmsg if space still reserved.
604 if (mqueue
->imq_msgcount
> 0) {
605 ipc_kmsg_enqueue_macro(&mqueue
->imq_messages
, kmsg
);
610 * Otherwise, the message queue must belong to an inactive
611 * port, so just destroy the message and pretend it was posted.
613 /* clear the waitq boost we may have been given */
614 waitq_clear_promotion_locked(waitq
, current_thread());
615 imq_release_and_unlock(mqueue
, reserved_prepost
, s
);
616 ipc_kmsg_destroy(kmsg
);
617 current_task()->messages_sent
++;
622 * If the receiver waited with a facility not directly
623 * related to Mach messaging, then it isn't prepared to get
624 * handed the message directly. Just set it running, and
625 * go look for another thread that can.
627 if (receiver
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
628 thread_unlock(receiver
);
635 * We found a waiting thread.
636 * If the message is too large or the scatter list is too small
637 * the thread we wake up will get that as its status.
639 msize
= ipc_kmsg_copyout_size(kmsg
, receiver
->map
);
640 if (receiver
->ith_msize
<
641 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver
), receiver
->ith_option
))) {
642 receiver
->ith_msize
= msize
;
643 receiver
->ith_state
= MACH_RCV_TOO_LARGE
;
645 receiver
->ith_state
= MACH_MSG_SUCCESS
;
649 * If there is no problem with the upcoming receive, or the
650 * receiver thread didn't specifically ask for special too
651 * large error condition, go ahead and select it anyway.
653 if ((receiver
->ith_state
== MACH_MSG_SUCCESS
) ||
654 !(receiver
->ith_option
& MACH_RCV_LARGE
)) {
656 receiver
->ith_kmsg
= kmsg
;
657 receiver
->ith_seqno
= mqueue
->imq_seqno
++;
658 thread_unlock(receiver
);
661 /* we didn't need our reserved spot in the queue */
662 ipc_mqueue_release_msgcount(mqueue
, IMQ_NULL
);
667 * Otherwise, this thread needs to be released to run
668 * and handle its error without getting the message. We
669 * need to go back and pick another one.
671 receiver
->ith_receiver_name
= mqueue
->imq_receiver_name
;
672 receiver
->ith_kmsg
= IKM_NULL
;
673 receiver
->ith_seqno
= 0;
674 thread_unlock(receiver
);
678 /* clear the waitq boost we may have been given */
679 waitq_clear_promotion_locked(&mqueue
->imq_wait_queue
, current_thread());
680 imq_release_and_unlock(mqueue
, reserved_prepost
, s
);
682 current_task()->messages_sent
++;
688 ipc_mqueue_receive_results(wait_result_t saved_wait_result
)
690 thread_t self
= current_thread();
691 mach_msg_option_t option
= self
->ith_option
;
694 * why did we wake up?
696 switch (saved_wait_result
) {
697 case THREAD_TIMED_OUT
:
698 self
->ith_state
= MACH_RCV_TIMED_OUT
;
701 case THREAD_INTERRUPTED
:
702 self
->ith_state
= MACH_RCV_INTERRUPTED
;
706 /* something bad happened to the port/set */
707 self
->ith_state
= MACH_RCV_PORT_CHANGED
;
710 case THREAD_AWAKENED
:
712 * We do not need to go select a message, somebody
713 * handed us one (or a too-large indication).
715 switch (self
->ith_state
) {
716 case MACH_RCV_SCATTER_SMALL
:
717 case MACH_RCV_TOO_LARGE
:
719 * Somebody tried to give us a too large
720 * message. If we indicated that we cared,
721 * then they only gave us the indication,
722 * otherwise they gave us the indication
723 * AND the message anyway.
725 if (option
& MACH_RCV_LARGE
) {
729 case MACH_MSG_SUCCESS
:
733 panic("ipc_mqueue_receive_results: strange ith_state");
737 panic("ipc_mqueue_receive_results: strange wait_result");
742 ipc_mqueue_receive_continue(
743 __unused
void *param
,
744 wait_result_t wresult
)
746 ipc_mqueue_receive_results(wresult
);
747 mach_msg_receive_continue(); /* hard-coded for now */
751 * Routine: ipc_mqueue_receive
753 * Receive a message from a message queue.
755 * If continuation is non-zero, then we might discard
756 * our kernel stack when we block. We will continue
757 * after unblocking by executing continuation.
759 * If resume is true, then we are resuming a receive
760 * operation after a blocked receive discarded our stack.
762 * Our caller must hold a reference for the port or port set
763 * to which this queue belongs, to keep the queue
764 * from being deallocated.
766 * The kmsg is returned with clean header fields
767 * and with the circular bit turned off.
769 * MACH_MSG_SUCCESS Message returned in kmsgp.
770 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
771 * MACH_RCV_TIMED_OUT No message obtained.
772 * MACH_RCV_INTERRUPTED No message obtained.
773 * MACH_RCV_PORT_DIED Port/set died; no message.
774 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
781 mach_msg_option_t option
,
782 mach_msg_size_t max_size
,
783 mach_msg_timeout_t rcv_timeout
,
786 wait_result_t wresult
;
787 thread_t self
= current_thread();
789 wresult
= ipc_mqueue_receive_on_thread(mqueue
, option
, max_size
,
790 rcv_timeout
, interruptible
,
792 if (wresult
== THREAD_NOT_WAITING
)
795 if (wresult
== THREAD_WAITING
) {
796 counter((interruptible
== THREAD_ABORTSAFE
) ?
797 c_ipc_mqueue_receive_block_user
++ :
798 c_ipc_mqueue_receive_block_kernel
++);
800 if (self
->ith_continuation
)
801 thread_block(ipc_mqueue_receive_continue
);
804 wresult
= thread_block(THREAD_CONTINUE_NULL
);
806 ipc_mqueue_receive_results(wresult
);
809 static int mqueue_process_prepost_receive(void *ctx
, struct waitq
*waitq
,
810 struct waitq_set
*wqset
)
812 ipc_mqueue_t port_mq
, *pmq_ptr
;
815 port_mq
= (ipc_mqueue_t
)waitq
;
818 * If there are no messages on this queue, skip it and remove
819 * it from the prepost list
821 if (ipc_kmsg_queue_empty(&port_mq
->imq_messages
))
822 return WQ_ITERATE_INVALIDATE_CONTINUE
;
825 * There are messages waiting on this port.
826 * Instruct the prepost iteration logic to break, but keep the
829 pmq_ptr
= (ipc_mqueue_t
*)ctx
;
832 return WQ_ITERATE_BREAK_KEEP_LOCKED
;
836 ipc_mqueue_receive_on_thread(
838 mach_msg_option_t option
,
839 mach_msg_size_t max_size
,
840 mach_msg_timeout_t rcv_timeout
,
844 wait_result_t wresult
;
850 /* no need to reserve anything: we never prepost to anyone */
852 if (imq_is_set(mqueue
)) {
853 ipc_mqueue_t port_mq
= IMQ_NULL
;
856 (void)waitq_set_iterate_preposts(&mqueue
->imq_set_queue
,
858 mqueue_process_prepost_receive
,
861 if (port_mq
!= IMQ_NULL
) {
863 * We get here if there is at least one message
864 * waiting on port_mq. We have instructed the prepost
865 * iteration logic to leave both the port_mq and the
868 * TODO: previously, we would place this port at the
869 * back of the prepost list...
873 /* TODO: if/when port mqueues become non irq safe,
874 * we won't need this spl, and we should be
875 * able to call splx(s) (if that's even
877 * For now, we've still disabled interrupts via
878 * imq_reserve_and_lock();
883 * Continue on to handling the message with just
884 * the port mqueue locked.
886 ipc_mqueue_select_on_thread(port_mq
, mqueue
, option
,
891 return THREAD_NOT_WAITING
;
894 ipc_kmsg_queue_t kmsgs
;
897 * Receive on a single port. Just try to get the messages.
899 kmsgs
= &mqueue
->imq_messages
;
900 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
901 ipc_mqueue_select_on_thread(mqueue
, IMQ_NULL
, option
,
905 return THREAD_NOT_WAITING
;
910 * Looks like we'll have to block. The mqueue we will
911 * block on (whether the set's or the local port's) is
914 if (option
& MACH_RCV_TIMEOUT
) {
915 if (rcv_timeout
== 0) {
918 thread
->ith_state
= MACH_RCV_TIMED_OUT
;
919 return THREAD_NOT_WAITING
;
923 /* NOTE: need splsched() here if mqueue no longer needs irq disabled */
925 thread
->ith_state
= MACH_RCV_IN_PROGRESS
;
926 thread
->ith_option
= option
;
927 thread
->ith_msize
= max_size
;
929 if (option
& MACH_RCV_TIMEOUT
)
930 clock_interval_to_deadline(rcv_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
934 wresult
= waitq_assert_wait64_locked(&mqueue
->imq_wait_queue
,
937 TIMEOUT_URGENCY_USER_NORMAL
,
941 /* preposts should be detected above, not here */
942 if (wresult
== THREAD_AWAKENED
)
943 panic("ipc_mqueue_receive_on_thread: sleep walking");
945 thread_unlock(thread
);
953 * Routine: ipc_mqueue_select_on_thread
955 * A receiver discovered that there was a message on the queue
956 * before he had to block. Pick the message off the queue and
957 * "post" it to thread.
961 * There is a message.
962 * No need to reserve prepost objects - it will never prepost
965 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
966 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
969 ipc_mqueue_select_on_thread(
970 ipc_mqueue_t port_mq
,
972 mach_msg_option_t option
,
973 mach_msg_size_t max_size
,
977 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
978 mach_msg_size_t rcv_size
;
981 * Do some sanity checking of our ability to receive
982 * before pulling the message off the queue.
984 kmsg
= ipc_kmsg_queue_first(&port_mq
->imq_messages
);
985 assert(kmsg
!= IKM_NULL
);
988 * If we really can't receive it, but we had the
989 * MACH_RCV_LARGE option set, then don't take it off
990 * the queue, instead return the appropriate error
993 rcv_size
= ipc_kmsg_copyout_size(kmsg
, thread
->map
);
994 if (rcv_size
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(thread
), option
) > max_size
) {
995 mr
= MACH_RCV_TOO_LARGE
;
996 if (option
& MACH_RCV_LARGE
) {
997 thread
->ith_receiver_name
= port_mq
->imq_receiver_name
;
998 thread
->ith_kmsg
= IKM_NULL
;
999 thread
->ith_msize
= rcv_size
;
1000 thread
->ith_seqno
= 0;
1001 thread
->ith_state
= mr
;
1006 ipc_kmsg_rmqueue_first_macro(&port_mq
->imq_messages
, kmsg
);
1007 ipc_mqueue_release_msgcount(port_mq
, set_mq
);
1008 thread
->ith_seqno
= port_mq
->imq_seqno
++;
1009 thread
->ith_kmsg
= kmsg
;
1010 thread
->ith_state
= mr
;
1012 current_task()->messages_received
++;
1017 * Routine: ipc_mqueue_peek
1019 * Peek at a (non-set) message queue to see if it has a message
1020 * matching the sequence number provided (if zero, then the
1021 * first message in the queue) and return vital info about the
1025 * Locks may be held by callers, so this routine cannot block.
1026 * Caller holds reference on the message queue.
1029 ipc_mqueue_peek(ipc_mqueue_t mq
,
1030 mach_port_seqno_t
* seqnop
,
1031 mach_msg_size_t
* msg_sizep
,
1032 mach_msg_id_t
* msg_idp
,
1033 mach_msg_max_trailer_t
* msg_trailerp
)
1035 ipc_kmsg_queue_t kmsgq
;
1037 mach_port_seqno_t seqno
, msgoff
;
1041 assert(!imq_is_set(mq
));
1051 seqno
= mq
->imq_seqno
;
1053 } else if (seqno
>= mq
->imq_seqno
&&
1054 seqno
< mq
->imq_seqno
+ mq
->imq_msgcount
) {
1055 msgoff
= seqno
- mq
->imq_seqno
;
1059 /* look for the message that would match that seqno */
1060 kmsgq
= &mq
->imq_messages
;
1061 kmsg
= ipc_kmsg_queue_first(kmsgq
);
1062 while (msgoff
-- && kmsg
!= IKM_NULL
) {
1063 kmsg
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
1065 if (kmsg
== IKM_NULL
)
1068 /* found one - return the requested info */
1071 if (msg_sizep
!= NULL
)
1072 *msg_sizep
= kmsg
->ikm_header
->msgh_size
;
1073 if (msg_idp
!= NULL
)
1074 *msg_idp
= kmsg
->ikm_header
->msgh_id
;
1075 if (msg_trailerp
!= NULL
)
1076 memcpy(msg_trailerp
,
1077 (mach_msg_max_trailer_t
*)((vm_offset_t
)kmsg
->ikm_header
+
1078 round_msg(kmsg
->ikm_header
->msgh_size
)),
1079 sizeof(mach_msg_max_trailer_t
));
1090 * peek at the contained port message queues, break prepost iteration as soon
1091 * as we spot a message on one of the message queues referenced by the set's
1092 * prepost list. No need to lock each message queue, as only the head of each
1093 * queue is checked. If a message wasn't there before we entered here, no need
1094 * to find it (if we do, great).
1096 static int mqueue_peek_iterator(void *ctx
, struct waitq
*waitq
,
1097 struct waitq_set
*wqset
)
1099 ipc_mqueue_t port_mq
= (ipc_mqueue_t
)waitq
;
1100 ipc_kmsg_queue_t kmsgs
= &port_mq
->imq_messages
;
1105 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
)
1106 return WQ_ITERATE_BREAK
; /* break out of the prepost iteration */
1108 return WQ_ITERATE_CONTINUE
;
1112 * Routine: ipc_mqueue_set_peek
1114 * Peek at a message queue set to see if it has any ports
1118 * Locks may be held by callers, so this routine cannot block.
1119 * Caller holds reference on the message queue.
1122 ipc_mqueue_set_peek(ipc_mqueue_t mq
)
1127 assert(imq_is_set(mq
));
1132 ret
= waitq_set_iterate_preposts(&mq
->imq_set_queue
, NULL
,
1133 mqueue_peek_iterator
, NULL
);
1137 return (ret
== WQ_ITERATE_BREAK
);
1141 * Routine: ipc_mqueue_set_gather_member_names
1143 * Discover all ports which are members of a given port set.
1144 * Because the waitq linkage mechanism was redesigned to save
1145 * significan amounts of memory, it no longer keeps back-pointers
1146 * from a port set to a port. Therefore, we must iterate over all
1147 * ports within a given IPC space and individually query them to
1148 * see if they are members of the given set. Port names of ports
1149 * found to be members of the given set will be gathered into the
1150 * provided 'names' array. Actual returned names are limited to
1151 * maxnames entries, but we keep counting the actual number of
1152 * members to let the caller decide to retry if necessary.
1155 * Locks may be held by callers, so this routine cannot block.
1156 * Caller holds reference on the message queue (via port set).
1159 ipc_mqueue_set_gather_member_names(
1161 ipc_mqueue_t set_mq
,
1162 ipc_entry_num_t maxnames
,
1163 mach_port_name_t
*names
,
1164 ipc_entry_num_t
*actualp
)
1167 ipc_entry_num_t tsize
;
1168 struct waitq_set
*wqset
;
1169 ipc_entry_num_t actual
= 0;
1171 assert(set_mq
!= IMQ_NULL
);
1172 wqset
= &set_mq
->imq_set_queue
;
1174 assert(space
!= IS_NULL
);
1175 is_read_lock(space
);
1176 if (!is_active(space
)) {
1177 is_read_unlock(space
);
1181 if (!waitq_set_is_valid(wqset
)) {
1182 is_read_unlock(space
);
1186 table
= space
->is_table
;
1187 tsize
= space
->is_table_size
;
1188 for (ipc_entry_num_t idx
= 0; idx
< tsize
; idx
++) {
1189 ipc_entry_t entry
= &table
[idx
];
1191 /* only receive rights can be members of port sets */
1192 if ((entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) != MACH_PORT_TYPE_NONE
) {
1193 __IGNORE_WCASTALIGN(ipc_port_t port
= (ipc_port_t
)entry
->ie_object
);
1194 ipc_mqueue_t mq
= &port
->ip_messages
;
1196 assert(IP_VALID(port
));
1197 if (ip_active(port
) &&
1198 waitq_member(&mq
->imq_wait_queue
, wqset
)) {
1199 if (actual
< maxnames
)
1200 names
[actual
] = mq
->imq_receiver_name
;
1206 is_read_unlock(space
);
1214 * Routine: ipc_mqueue_destroy
1216 * Destroy a (non-set) message queue.
1217 * Set any blocked senders running.
1218 * Destroy the kmsgs in the queue.
1221 * Receivers were removed when the receive right was "changed"
1225 ipc_mqueue_t mqueue
)
1227 ipc_kmsg_queue_t kmqueue
;
1229 boolean_t reap
= FALSE
;
1232 assert(!imq_is_set(mqueue
));
1238 * rouse all blocked senders
1239 * (don't boost anyone - we're tearing this queue down)
1242 mqueue
->imq_fullwaiters
= FALSE
;
1243 waitq_wakeup64_all_locked(&mqueue
->imq_wait_queue
,
1247 WAITQ_ALL_PRIORITIES
,
1251 * Move messages from the specified queue to the per-thread
1252 * clean/drain queue while we have the mqueue lock.
1254 kmqueue
= &mqueue
->imq_messages
;
1255 while ((kmsg
= ipc_kmsg_dequeue(kmqueue
)) != IKM_NULL
) {
1257 first
= ipc_kmsg_delayed_destroy(kmsg
);
1263 * Wipe out message count, both for messages about to be
1264 * reaped and for reserved space for (previously) woken senders.
1265 * This is the indication to them that their reserved space is gone
1266 * (the mqueue was destroyed).
1268 mqueue
->imq_msgcount
= 0;
1270 /* clear out any preposting we may have done */
1271 waitq_clear_prepost_locked(&mqueue
->imq_wait_queue
, &s
);
1277 * assert that we're destroying a queue that's not a
1278 * member of any other queue
1280 assert(mqueue
->imq_wait_queue
.waitq_prepost_id
== 0);
1281 assert(mqueue
->imq_wait_queue
.waitq_set_id
== 0);
1285 * Destroy the messages we enqueued if we aren't nested
1286 * inside some other attempt to drain the same queue.
1289 ipc_kmsg_reap_delayed();
1293 * Routine: ipc_mqueue_set_qlimit
1295 * Changes a message queue limit; the maximum number
1296 * of messages which may be queued.
1302 ipc_mqueue_set_qlimit(
1303 ipc_mqueue_t mqueue
,
1304 mach_port_msgcount_t qlimit
)
1308 assert(qlimit
<= MACH_PORT_QLIMIT_MAX
);
1310 /* wake up senders allowed by the new qlimit */
1313 if (qlimit
> mqueue
->imq_qlimit
) {
1314 mach_port_msgcount_t i
, wakeup
;
1316 /* caution: wakeup, qlimit are unsigned */
1317 wakeup
= qlimit
- mqueue
->imq_qlimit
;
1319 for (i
= 0; i
< wakeup
; i
++) {
1321 * boost the priority of the awoken thread
1322 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
1323 * the message queue slot we've just reserved.
1325 * NOTE: this will never prepost
1327 if (waitq_wakeup64_one_locked(&mqueue
->imq_wait_queue
,
1331 WAITQ_PROMOTE_PRIORITY
,
1332 WAITQ_KEEP_LOCKED
) == KERN_NOT_WAITING
) {
1333 mqueue
->imq_fullwaiters
= FALSE
;
1336 mqueue
->imq_msgcount
++; /* give it to the awakened thread */
1339 mqueue
->imq_qlimit
= qlimit
;
1345 * Routine: ipc_mqueue_set_seqno
1347 * Changes an mqueue's sequence number.
1349 * Caller holds a reference to the queue's containing object.
1352 ipc_mqueue_set_seqno(
1353 ipc_mqueue_t mqueue
,
1354 mach_port_seqno_t seqno
)
1360 mqueue
->imq_seqno
= seqno
;
1367 * Routine: ipc_mqueue_copyin
1369 * Convert a name in a space to a message queue.
1371 * Nothing locked. If successful, the caller gets a ref for
1372 * for the object. This ref ensures the continued existence of
1375 * MACH_MSG_SUCCESS Found a message queue.
1376 * MACH_RCV_INVALID_NAME The space is dead.
1377 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1378 * MACH_RCV_INVALID_NAME
1379 * The denoted right is not receive or port set.
1380 * MACH_RCV_IN_SET Receive right is a member of a set.
1386 mach_port_name_t name
,
1387 ipc_mqueue_t
*mqueuep
,
1388 ipc_object_t
*objectp
)
1391 ipc_object_t object
;
1392 ipc_mqueue_t mqueue
;
1394 is_read_lock(space
);
1395 if (!is_active(space
)) {
1396 is_read_unlock(space
);
1397 return MACH_RCV_INVALID_NAME
;
1400 entry
= ipc_entry_lookup(space
, name
);
1401 if (entry
== IE_NULL
) {
1402 is_read_unlock(space
);
1403 return MACH_RCV_INVALID_NAME
;
1406 object
= entry
->ie_object
;
1408 if (entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) {
1411 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) object
);
1412 assert(port
!= IP_NULL
);
1415 assert(ip_active(port
));
1416 assert(port
->ip_receiver_name
== name
);
1417 assert(port
->ip_receiver
== space
);
1418 is_read_unlock(space
);
1419 mqueue
= &port
->ip_messages
;
1421 } else if (entry
->ie_bits
& MACH_PORT_TYPE_PORT_SET
) {
1424 __IGNORE_WCASTALIGN(pset
= (ipc_pset_t
) object
);
1425 assert(pset
!= IPS_NULL
);
1428 assert(ips_active(pset
));
1429 assert(pset
->ips_local_name
== name
);
1430 is_read_unlock(space
);
1432 mqueue
= &pset
->ips_messages
;
1434 is_read_unlock(space
);
1435 return MACH_RCV_INVALID_NAME
;
1439 * At this point, the object is locked and active,
1440 * the space is unlocked, and mqueue is initialized.
1443 io_reference(object
);
1448 return MACH_MSG_SUCCESS
;