2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_mqueue.c
63 * Functions to manipulate IPC message queues.
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/wait_queue.h>
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_space.h>
94 #include <vm/vm_map.h>
98 #include <security/mac_mach_internal.h>
101 int ipc_mqueue_full
; /* address is event for queue space */
102 int ipc_mqueue_rcv
; /* address is event for message arrival */
104 /* forward declarations */
105 void ipc_mqueue_receive_results(wait_result_t result
);
108 * Routine: ipc_mqueue_init
110 * Initialize a newly-allocated message queue.
118 wait_queue_set_init(&mqueue
->imq_set_queue
, SYNC_POLICY_FIFO
|SYNC_POLICY_PREPOST
);
120 wait_queue_init(&mqueue
->imq_wait_queue
, SYNC_POLICY_FIFO
);
121 ipc_kmsg_queue_init(&mqueue
->imq_messages
);
122 mqueue
->imq_seqno
= 0;
123 mqueue
->imq_msgcount
= 0;
124 mqueue
->imq_qlimit
= MACH_PORT_QLIMIT_DEFAULT
;
125 mqueue
->imq_fullwaiters
= FALSE
;
130 * Routine: ipc_mqueue_member
132 * Indicate whether the (port) mqueue is a member of
133 * this portset's mqueue. We do this by checking
134 * whether the portset mqueue's waitq is an member of
135 * the port's mqueue waitq.
137 * the portset's mqueue is not already a member
138 * this may block while allocating linkage structures.
143 ipc_mqueue_t port_mqueue
,
144 ipc_mqueue_t set_mqueue
)
146 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
147 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
149 return (wait_queue_member(port_waitq
, set_waitq
));
154 * Routine: ipc_mqueue_remove
156 * Remove the association between the queue and the specified
163 ipc_mqueue_t set_mqueue
,
164 wait_queue_link_t
*wqlp
)
166 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
167 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
169 return wait_queue_unlink_nofree(mq_waitq
, set_waitq
, wqlp
);
173 * Routine: ipc_mqueue_remove_from_all
175 * Remove the mqueue from all the sets it is a member of
180 ipc_mqueue_remove_from_all(
184 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
186 wait_queue_unlink_all_nofree(mq_waitq
, links
);
191 * Routine: ipc_mqueue_remove_all
193 * Remove all the member queues from the specified set.
198 ipc_mqueue_remove_all(
202 wait_queue_set_t mq_setq
= &mqueue
->imq_set_queue
;
204 wait_queue_set_unlink_all_nofree(mq_setq
, links
);
210 * Routine: ipc_mqueue_add
212 * Associate the portset's mqueue with the port's mqueue.
213 * This has to be done so that posting the port will wakeup
214 * a portset waiter. If there are waiters on the portset
215 * mqueue and messages on the port mqueue, try to match them
222 ipc_mqueue_t port_mqueue
,
223 ipc_mqueue_t set_mqueue
,
224 wait_queue_link_t wql
)
226 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
227 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
228 ipc_kmsg_queue_t kmsgq
;
229 ipc_kmsg_t kmsg
, next
;
233 kr
= wait_queue_link_noalloc(port_waitq
, set_waitq
, wql
);
234 if (kr
!= KERN_SUCCESS
)
238 * Now that the set has been added to the port, there may be
239 * messages queued on the port and threads waiting on the set
240 * waitq. Lets get them together.
243 imq_lock(port_mqueue
);
244 kmsgq
= &port_mqueue
->imq_messages
;
245 for (kmsg
= ipc_kmsg_queue_first(kmsgq
);
248 next
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
252 mach_msg_size_t msize
;
254 th
= wait_queue_wakeup64_identity_locked(
259 /* waitq/mqueue still locked, thread locked */
261 if (th
== THREAD_NULL
)
265 * If the receiver waited with a facility not directly
266 * related to Mach messaging, then it isn't prepared to get
267 * handed the message directly. Just set it running, and
268 * go look for another thread that can.
270 if (th
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
276 * Found a receiver. see if they can handle the message
277 * correctly (the message is not too large for them, or
278 * they didn't care to be informed that the message was
279 * too large). If they can't handle it, take them off
280 * the list and let them go back and figure it out and
281 * just move onto the next.
283 msize
= ipc_kmsg_copyout_size(kmsg
, th
->map
);
285 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(th
), th
->ith_option
))) {
286 th
->ith_state
= MACH_RCV_TOO_LARGE
;
287 th
->ith_msize
= msize
;
288 if (th
->ith_option
& MACH_RCV_LARGE
) {
290 * let him go without message
292 th
->ith_receiver_name
= port_mqueue
->imq_receiver_name
;
293 th
->ith_kmsg
= IKM_NULL
;
296 continue; /* find another thread */
299 th
->ith_state
= MACH_MSG_SUCCESS
;
303 * This thread is going to take this message,
306 ipc_kmsg_rmqueue(kmsgq
, kmsg
);
307 ipc_mqueue_release_msgcount(port_mqueue
);
310 th
->ith_seqno
= port_mqueue
->imq_seqno
++;
312 break; /* go to next message */
317 imq_unlock(port_mqueue
);
323 * Routine: ipc_mqueue_changed
325 * Wake up receivers waiting in a message queue.
327 * The message queue is locked.
334 wait_queue_wakeup64_all_locked(
335 &mqueue
->imq_wait_queue
,
338 FALSE
); /* unlock waitq? */
345 * Routine: ipc_mqueue_send
347 * Send a message to a message queue. The message holds a reference
348 * for the destination port for this message queue in the
349 * msgh_remote_port field.
351 * If unsuccessful, the caller still has possession of
352 * the message and must do something with it. If successful,
353 * the message is queued, given to a receiver, or destroyed.
357 * MACH_MSG_SUCCESS The message was accepted.
358 * MACH_SEND_TIMED_OUT Caller still has message.
359 * MACH_SEND_INTERRUPTED Caller still has message.
365 mach_msg_option_t option
,
366 mach_msg_timeout_t send_timeout
,
373 * 1) We're under the queue limit.
374 * 2) Caller used the MACH_SEND_ALWAYS internal option.
375 * 3) Message is sent to a send-once right.
377 if (!imq_full(mqueue
) ||
378 (!imq_full_kernel(mqueue
) &&
379 ((option
& MACH_SEND_ALWAYS
) ||
380 (MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) ==
381 MACH_MSG_TYPE_PORT_SEND_ONCE
)))) {
382 mqueue
->imq_msgcount
++;
383 assert(mqueue
->imq_msgcount
> 0);
387 thread_t cur_thread
= current_thread();
391 * We have to wait for space to be granted to us.
393 if ((option
& MACH_SEND_TIMEOUT
) && (send_timeout
== 0)) {
396 return MACH_SEND_TIMED_OUT
;
398 if (imq_full_kernel(mqueue
)) {
401 return MACH_SEND_NO_BUFFER
;
403 mqueue
->imq_fullwaiters
= TRUE
;
404 thread_lock(cur_thread
);
405 if (option
& MACH_SEND_TIMEOUT
)
406 clock_interval_to_deadline(send_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
409 wresult
= wait_queue_assert_wait64_locked(
410 &mqueue
->imq_wait_queue
,
413 TIMEOUT_URGENCY_USER_NORMAL
,
416 thread_unlock(cur_thread
);
420 if (wresult
== THREAD_WAITING
) {
421 wresult
= thread_block(THREAD_CONTINUE_NULL
);
422 counter(c_ipc_mqueue_send_block
++);
426 case THREAD_TIMED_OUT
:
427 assert(option
& MACH_SEND_TIMEOUT
);
428 return MACH_SEND_TIMED_OUT
;
430 case THREAD_AWAKENED
:
431 /* we can proceed - inherited msgcount from waker */
432 assert(mqueue
->imq_msgcount
> 0);
435 case THREAD_INTERRUPTED
:
436 return MACH_SEND_INTERRUPTED
;
439 /* mqueue is being destroyed */
440 return MACH_SEND_INVALID_DEST
;
442 panic("ipc_mqueue_send");
446 ipc_mqueue_post(mqueue
, kmsg
);
447 return MACH_MSG_SUCCESS
;
452 * Routine: ipc_mqueue_release_msgcount
454 * Release a message queue reference in the case where we
458 * The message queue is locked.
459 * The message corresponding to this reference is off the queue.
462 ipc_mqueue_release_msgcount(
465 assert(imq_held(mqueue
));
466 assert(mqueue
->imq_msgcount
> 1 || ipc_kmsg_queue_empty(&mqueue
->imq_messages
));
468 mqueue
->imq_msgcount
--;
470 if (!imq_full(mqueue
) && mqueue
->imq_fullwaiters
) {
471 if (wait_queue_wakeup64_one_locked(
472 &mqueue
->imq_wait_queue
,
475 FALSE
) != KERN_SUCCESS
) {
476 mqueue
->imq_fullwaiters
= FALSE
;
478 /* gave away our slot - add reference back */
479 mqueue
->imq_msgcount
++;
485 * Routine: ipc_mqueue_post
487 * Post a message to a waiting receiver or enqueue it. If a
488 * receiver is waiting, we can release our reserved space in
492 * If we need to queue, our space in the message queue is reserved.
496 register ipc_mqueue_t mqueue
,
497 register ipc_kmsg_t kmsg
)
502 * While the msg queue is locked, we have control of the
503 * kmsg, so the ref in it for the port is still good.
505 * Check for a receiver for the message.
510 wait_queue_t waitq
= &mqueue
->imq_wait_queue
;
512 mach_msg_size_t msize
;
514 receiver
= wait_queue_wakeup64_identity_locked(
519 /* waitq still locked, thread locked */
521 if (receiver
== THREAD_NULL
) {
523 * no receivers; queue kmsg
525 assert(mqueue
->imq_msgcount
> 0);
526 ipc_kmsg_enqueue_macro(&mqueue
->imq_messages
, kmsg
);
531 * If the receiver waited with a facility not directly
532 * related to Mach messaging, then it isn't prepared to get
533 * handed the message directly. Just set it running, and
534 * go look for another thread that can.
536 if (receiver
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
537 thread_unlock(receiver
);
543 * We found a waiting thread.
544 * If the message is too large or the scatter list is too small
545 * the thread we wake up will get that as its status.
547 msize
= ipc_kmsg_copyout_size(kmsg
, receiver
->map
);
548 if (receiver
->ith_msize
<
549 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver
), receiver
->ith_option
))) {
550 receiver
->ith_msize
= msize
;
551 receiver
->ith_state
= MACH_RCV_TOO_LARGE
;
553 receiver
->ith_state
= MACH_MSG_SUCCESS
;
557 * If there is no problem with the upcoming receive, or the
558 * receiver thread didn't specifically ask for special too
559 * large error condition, go ahead and select it anyway.
561 if ((receiver
->ith_state
== MACH_MSG_SUCCESS
) ||
562 !(receiver
->ith_option
& MACH_RCV_LARGE
)) {
564 receiver
->ith_kmsg
= kmsg
;
565 receiver
->ith_seqno
= mqueue
->imq_seqno
++;
566 thread_unlock(receiver
);
568 /* we didn't need our reserved spot in the queue */
569 ipc_mqueue_release_msgcount(mqueue
);
574 * Otherwise, this thread needs to be released to run
575 * and handle its error without getting the message. We
576 * need to go back and pick another one.
578 receiver
->ith_receiver_name
= mqueue
->imq_receiver_name
;
579 receiver
->ith_kmsg
= IKM_NULL
;
580 receiver
->ith_seqno
= 0;
581 thread_unlock(receiver
);
587 current_task()->messages_sent
++;
593 ipc_mqueue_receive_results(wait_result_t saved_wait_result
)
595 thread_t self
= current_thread();
596 mach_msg_option_t option
= self
->ith_option
;
599 * why did we wake up?
601 switch (saved_wait_result
) {
602 case THREAD_TIMED_OUT
:
603 self
->ith_state
= MACH_RCV_TIMED_OUT
;
606 case THREAD_INTERRUPTED
:
607 self
->ith_state
= MACH_RCV_INTERRUPTED
;
611 /* something bad happened to the port/set */
612 self
->ith_state
= MACH_RCV_PORT_CHANGED
;
615 case THREAD_AWAKENED
:
617 * We do not need to go select a message, somebody
618 * handed us one (or a too-large indication).
620 switch (self
->ith_state
) {
621 case MACH_RCV_SCATTER_SMALL
:
622 case MACH_RCV_TOO_LARGE
:
624 * Somebody tried to give us a too large
625 * message. If we indicated that we cared,
626 * then they only gave us the indication,
627 * otherwise they gave us the indication
628 * AND the message anyway.
630 if (option
& MACH_RCV_LARGE
) {
634 case MACH_MSG_SUCCESS
:
638 panic("ipc_mqueue_receive_results: strange ith_state");
642 panic("ipc_mqueue_receive_results: strange wait_result");
647 ipc_mqueue_receive_continue(
648 __unused
void *param
,
649 wait_result_t wresult
)
651 ipc_mqueue_receive_results(wresult
);
652 mach_msg_receive_continue(); /* hard-coded for now */
656 * Routine: ipc_mqueue_receive
658 * Receive a message from a message queue.
660 * If continuation is non-zero, then we might discard
661 * our kernel stack when we block. We will continue
662 * after unblocking by executing continuation.
664 * If resume is true, then we are resuming a receive
665 * operation after a blocked receive discarded our stack.
667 * Our caller must hold a reference for the port or port set
668 * to which this queue belongs, to keep the queue
669 * from being deallocated.
671 * The kmsg is returned with clean header fields
672 * and with the circular bit turned off.
674 * MACH_MSG_SUCCESS Message returned in kmsgp.
675 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
676 * MACH_RCV_TIMED_OUT No message obtained.
677 * MACH_RCV_INTERRUPTED No message obtained.
678 * MACH_RCV_PORT_DIED Port/set died; no message.
679 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
686 mach_msg_option_t option
,
687 mach_msg_size_t max_size
,
688 mach_msg_timeout_t rcv_timeout
,
691 wait_result_t wresult
;
692 thread_t self
= current_thread();
694 wresult
= ipc_mqueue_receive_on_thread(mqueue
, option
, max_size
,
695 rcv_timeout
, interruptible
,
697 if (wresult
== THREAD_NOT_WAITING
)
700 if (wresult
== THREAD_WAITING
) {
701 counter((interruptible
== THREAD_ABORTSAFE
) ?
702 c_ipc_mqueue_receive_block_user
++ :
703 c_ipc_mqueue_receive_block_kernel
++);
705 if (self
->ith_continuation
)
706 thread_block(ipc_mqueue_receive_continue
);
709 wresult
= thread_block(THREAD_CONTINUE_NULL
);
711 ipc_mqueue_receive_results(wresult
);
715 ipc_mqueue_receive_on_thread(
717 mach_msg_option_t option
,
718 mach_msg_size_t max_size
,
719 mach_msg_timeout_t rcv_timeout
,
723 ipc_kmsg_queue_t kmsgs
;
724 wait_result_t wresult
;
736 if (imq_is_set(mqueue
)) {
739 q
= &mqueue
->imq_preposts
;
742 * If we are waiting on a portset mqueue, we need to see if
743 * any of the member ports have work for us. Ports that
744 * have (or recently had) messages will be linked in the
745 * prepost queue for the portset. By holding the portset's
746 * mqueue lock during the search, we tie up any attempts by
747 * mqueue_deliver or portset membership changes that may
751 while(!queue_empty(q
)) {
752 wait_queue_link_t wql
;
753 ipc_mqueue_t port_mq
;
755 queue_remove_first(q
, wql
, wait_queue_link_t
, wql_preposts
);
756 assert(!wql_is_preposted(wql
));
759 * This is a lock order violation, so we have to do it
760 * "softly," putting the link back on the prepost list
761 * if it fails (at the tail is fine since the order of
762 * handling messages from different sources in a set is
763 * not guaranteed and we'd like to skip to the next source
764 * if one is available).
766 port_mq
= (ipc_mqueue_t
)wql
->wql_queue
;
767 if (!imq_lock_try(port_mq
)) {
768 queue_enter(q
, wql
, wait_queue_link_t
, wql_preposts
);
774 goto search_set
; /* start again at beginning - SMP */
778 * If there are no messages on this queue, just skip it
779 * (we already removed the link from the set's prepost queue).
781 kmsgs
= &port_mq
->imq_messages
;
782 if (ipc_kmsg_queue_first(kmsgs
) == IKM_NULL
) {
788 * There are messages, so reinsert the link back
789 * at the tail of the preposted queue (for fairness)
790 * while we still have the portset mqueue locked.
792 queue_enter(q
, wql
, wait_queue_link_t
, wql_preposts
);
796 * Continue on to handling the message with just
797 * the port mqueue locked.
799 ipc_mqueue_select_on_thread(port_mq
, option
, max_size
, thread
);
802 if (thread
->task
!= TASK_NULL
&&
803 thread
->ith_kmsg
!= NULL
&&
804 thread
->ith_kmsg
->ikm_sender
!= NULL
) {
805 lh
= thread
->ith_kmsg
->ikm_sender
->label
;
806 tasklabel_lock(thread
->task
);
807 ip_lock(lh
->lh_port
);
808 rc
= mac_port_check_receive(&thread
->task
->maclabel
,
810 ip_unlock(lh
->lh_port
);
811 tasklabel_unlock(thread
->task
);
813 thread
->ith_state
= MACH_RCV_INVALID_DATA
;
817 return THREAD_NOT_WAITING
;
824 * Receive on a single port. Just try to get the messages.
826 kmsgs
= &mqueue
->imq_messages
;
827 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
828 ipc_mqueue_select_on_thread(mqueue
, option
, max_size
, thread
);
831 if (thread
->task
!= TASK_NULL
&&
832 thread
->ith_kmsg
!= NULL
&&
833 thread
->ith_kmsg
->ikm_sender
!= NULL
) {
834 lh
= thread
->ith_kmsg
->ikm_sender
->label
;
835 tasklabel_lock(thread
->task
);
836 ip_lock(lh
->lh_port
);
837 rc
= mac_port_check_receive(&thread
->task
->maclabel
,
839 ip_unlock(lh
->lh_port
);
840 tasklabel_unlock(thread
->task
);
842 thread
->ith_state
= MACH_RCV_INVALID_DATA
;
846 return THREAD_NOT_WAITING
;
851 * Looks like we'll have to block. The mqueue we will
852 * block on (whether the set's or the local port's) is
855 if (option
& MACH_RCV_TIMEOUT
) {
856 if (rcv_timeout
== 0) {
859 thread
->ith_state
= MACH_RCV_TIMED_OUT
;
860 return THREAD_NOT_WAITING
;
865 thread
->ith_state
= MACH_RCV_IN_PROGRESS
;
866 thread
->ith_option
= option
;
867 thread
->ith_msize
= max_size
;
869 if (option
& MACH_RCV_TIMEOUT
)
870 clock_interval_to_deadline(rcv_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
874 wresult
= wait_queue_assert_wait64_locked(&mqueue
->imq_wait_queue
,
877 TIMEOUT_URGENCY_USER_NORMAL
,
880 /* preposts should be detected above, not here */
881 if (wresult
== THREAD_AWAKENED
)
882 panic("ipc_mqueue_receive_on_thread: sleep walking");
884 thread_unlock(thread
);
892 * Routine: ipc_mqueue_select_on_thread
894 * A receiver discovered that there was a message on the queue
895 * before he had to block. Pick the message off the queue and
896 * "post" it to thread.
900 * There is a message.
902 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
903 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
906 ipc_mqueue_select_on_thread(
908 mach_msg_option_t option
,
909 mach_msg_size_t max_size
,
913 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
914 mach_msg_size_t rcv_size
;
917 * Do some sanity checking of our ability to receive
918 * before pulling the message off the queue.
920 kmsg
= ipc_kmsg_queue_first(&mqueue
->imq_messages
);
921 assert(kmsg
!= IKM_NULL
);
924 * If we really can't receive it, but we had the
925 * MACH_RCV_LARGE option set, then don't take it off
926 * the queue, instead return the appropriate error
929 rcv_size
= ipc_kmsg_copyout_size(kmsg
, thread
->map
);
930 if (rcv_size
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(thread
), option
) > max_size
) {
931 mr
= MACH_RCV_TOO_LARGE
;
932 if (option
& MACH_RCV_LARGE
) {
933 thread
->ith_receiver_name
= mqueue
->imq_receiver_name
;
934 thread
->ith_kmsg
= IKM_NULL
;
935 thread
->ith_msize
= rcv_size
;
936 thread
->ith_seqno
= 0;
937 thread
->ith_state
= mr
;
942 ipc_kmsg_rmqueue_first_macro(&mqueue
->imq_messages
, kmsg
);
943 ipc_mqueue_release_msgcount(mqueue
);
944 thread
->ith_seqno
= mqueue
->imq_seqno
++;
945 thread
->ith_kmsg
= kmsg
;
946 thread
->ith_state
= mr
;
948 current_task()->messages_received
++;
953 * Routine: ipc_mqueue_peek
955 * Peek at a (non-set) message queue to see if it has a message
956 * matching the sequence number provided (if zero, then the
957 * first message in the queue) and return vital info about the
961 * Locks may be held by callers, so this routine cannot block.
962 * Caller holds reference on the message queue.
965 ipc_mqueue_peek(ipc_mqueue_t mq
,
966 mach_port_seqno_t
*seqnop
,
967 mach_msg_size_t
*msg_sizep
,
968 mach_msg_id_t
*msg_idp
,
969 mach_msg_max_trailer_t
*msg_trailerp
)
971 ipc_kmsg_queue_t kmsgq
;
973 mach_port_seqno_t seqno
, msgoff
;
977 assert(!imq_is_set(mq
));
982 seqno
= (seqnop
!= NULL
) ? seqno
= *seqnop
: 0;
985 seqno
= mq
->imq_seqno
;
987 } else if (seqno
>= mq
->imq_seqno
&&
988 seqno
< mq
->imq_seqno
+ mq
->imq_msgcount
) {
989 msgoff
= seqno
- mq
->imq_seqno
;
993 /* look for the message that would match that seqno */
994 kmsgq
= &mq
->imq_messages
;
995 kmsg
= ipc_kmsg_queue_first(kmsgq
);
996 while (msgoff
-- && kmsg
!= IKM_NULL
) {
997 kmsg
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
999 if (kmsg
== IKM_NULL
)
1002 /* found one - return the requested info */
1005 if (msg_sizep
!= NULL
)
1006 *msg_sizep
= kmsg
->ikm_header
->msgh_size
;
1007 if (msg_idp
!= NULL
)
1008 *msg_idp
= kmsg
->ikm_header
->msgh_id
;
1009 if (msg_trailerp
!= NULL
)
1010 memcpy(msg_trailerp
,
1011 (mach_msg_max_trailer_t
*)((vm_offset_t
)kmsg
->ikm_header
+
1012 round_msg(kmsg
->ikm_header
->msgh_size
)),
1013 sizeof(mach_msg_max_trailer_t
));
1023 * Routine: ipc_mqueue_set_peek
1025 * Peek at a message queue set to see if it has any ports
1029 * Locks may be held by callers, so this routine cannot block.
1030 * Caller holds reference on the message queue.
1033 ipc_mqueue_set_peek(ipc_mqueue_t mq
)
1035 wait_queue_link_t wql
;
1040 assert(imq_is_set(mq
));
1046 * peek at the contained port message queues, return as soon as
1047 * we spot a message on one of the message queues linked on the
1048 * prepost list. No need to lock each message queue, as only the
1049 * head of each queue is checked. If a message wasn't there before
1050 * we entered here, no need to find it (if we do, great).
1053 q
= &mq
->imq_preposts
;
1054 queue_iterate(q
, wql
, wait_queue_link_t
, wql_preposts
) {
1055 ipc_mqueue_t port_mq
= (ipc_mqueue_t
)wql
->wql_queue
;
1056 ipc_kmsg_queue_t kmsgs
= &port_mq
->imq_messages
;
1058 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
1069 * Routine: ipc_mqueue_set_gather_member_names
1071 * Iterate a message queue set to identify the member port
1072 * names. Actual returned names is limited to maxnames entries,
1073 * but we keep counting the actual number of members to let
1074 * the caller decide to retry if necessary.
1077 * Locks may be held by callers, so this routine cannot block.
1078 * Caller holds reference on the message queue.
1081 ipc_mqueue_set_gather_member_names(
1083 ipc_entry_num_t maxnames
,
1084 mach_port_name_t
*names
,
1085 ipc_entry_num_t
*actualp
)
1087 wait_queue_link_t wql
;
1090 ipc_entry_num_t actual
= 0;
1092 assert(imq_is_set(mq
));
1098 * Iterate over the member ports through the mqueue set links
1099 * capturing as many names as we can.
1101 q
= &mq
->imq_setlinks
;
1102 queue_iterate(q
, wql
, wait_queue_link_t
, wql_setlinks
) {
1103 ipc_mqueue_t port_mq
= (ipc_mqueue_t
)wql
->wql_queue
;
1105 if (actual
< maxnames
)
1106 names
[actual
] = port_mq
->imq_receiver_name
;
1117 * Routine: ipc_mqueue_destroy
1119 * Destroy a (non-set) message queue.
1120 * Set any blocked senders running.
1121 * Destroy the kmsgs in the queue.
1124 * Receivers were removed when the receive right was "changed"
1128 ipc_mqueue_t mqueue
)
1130 ipc_kmsg_queue_t kmqueue
;
1132 boolean_t reap
= FALSE
;
1138 * rouse all blocked senders
1140 mqueue
->imq_fullwaiters
= FALSE
;
1141 wait_queue_wakeup64_all_locked(
1142 &mqueue
->imq_wait_queue
,
1148 * Move messages from the specified queue to the per-thread
1149 * clean/drain queue while we have the mqueue lock.
1151 kmqueue
= &mqueue
->imq_messages
;
1152 while ((kmsg
= ipc_kmsg_dequeue(kmqueue
)) != IKM_NULL
) {
1154 first
= ipc_kmsg_delayed_destroy(kmsg
);
1163 * Destroy the messages we enqueued if we aren't nested
1164 * inside some other attempt to drain the same queue.
1167 ipc_kmsg_reap_delayed();
1171 * Routine: ipc_mqueue_set_qlimit
1173 * Changes a message queue limit; the maximum number
1174 * of messages which may be queued.
1180 ipc_mqueue_set_qlimit(
1181 ipc_mqueue_t mqueue
,
1182 mach_port_msgcount_t qlimit
)
1186 assert(qlimit
<= MACH_PORT_QLIMIT_MAX
);
1188 /* wake up senders allowed by the new qlimit */
1191 if (qlimit
> mqueue
->imq_qlimit
) {
1192 mach_port_msgcount_t i
, wakeup
;
1194 /* caution: wakeup, qlimit are unsigned */
1195 wakeup
= qlimit
- mqueue
->imq_qlimit
;
1197 for (i
= 0; i
< wakeup
; i
++) {
1198 if (wait_queue_wakeup64_one_locked(
1199 &mqueue
->imq_wait_queue
,
1202 FALSE
) == KERN_NOT_WAITING
) {
1203 mqueue
->imq_fullwaiters
= FALSE
;
1206 mqueue
->imq_msgcount
++; /* give it to the awakened thread */
1209 mqueue
->imq_qlimit
= qlimit
;
1215 * Routine: ipc_mqueue_set_seqno
1217 * Changes an mqueue's sequence number.
1219 * Caller holds a reference to the queue's containing object.
1222 ipc_mqueue_set_seqno(
1223 ipc_mqueue_t mqueue
,
1224 mach_port_seqno_t seqno
)
1230 mqueue
->imq_seqno
= seqno
;
1237 * Routine: ipc_mqueue_copyin
1239 * Convert a name in a space to a message queue.
1241 * Nothing locked. If successful, the caller gets a ref for
1242 * for the object. This ref ensures the continued existence of
1245 * MACH_MSG_SUCCESS Found a message queue.
1246 * MACH_RCV_INVALID_NAME The space is dead.
1247 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1248 * MACH_RCV_INVALID_NAME
1249 * The denoted right is not receive or port set.
1250 * MACH_RCV_IN_SET Receive right is a member of a set.
1256 mach_port_name_t name
,
1257 ipc_mqueue_t
*mqueuep
,
1258 ipc_object_t
*objectp
)
1261 ipc_object_t object
;
1262 ipc_mqueue_t mqueue
;
1264 is_read_lock(space
);
1265 if (!is_active(space
)) {
1266 is_read_unlock(space
);
1267 return MACH_RCV_INVALID_NAME
;
1270 entry
= ipc_entry_lookup(space
, name
);
1271 if (entry
== IE_NULL
) {
1272 is_read_unlock(space
);
1273 return MACH_RCV_INVALID_NAME
;
1276 object
= entry
->ie_object
;
1278 if (entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) {
1281 port
= (ipc_port_t
) object
;
1282 assert(port
!= IP_NULL
);
1285 assert(ip_active(port
));
1286 assert(port
->ip_receiver_name
== name
);
1287 assert(port
->ip_receiver
== space
);
1288 is_read_unlock(space
);
1289 mqueue
= &port
->ip_messages
;
1291 } else if (entry
->ie_bits
& MACH_PORT_TYPE_PORT_SET
) {
1294 pset
= (ipc_pset_t
) object
;
1295 assert(pset
!= IPS_NULL
);
1298 assert(ips_active(pset
));
1299 assert(pset
->ips_local_name
== name
);
1300 is_read_unlock(space
);
1302 mqueue
= &pset
->ips_messages
;
1304 is_read_unlock(space
);
1305 return MACH_RCV_INVALID_NAME
;
1309 * At this point, the object is locked and active,
1310 * the space is unlocked, and mqueue is initialized.
1313 io_reference(object
);
1318 return MACH_MSG_SUCCESS
;