2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_mqueue.c
63 * Functions to manipulate IPC message queues.
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/wait_queue.h>
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_space.h>
96 #include <security/mac_mach_internal.h>
99 int ipc_mqueue_full
; /* address is event for queue space */
100 int ipc_mqueue_rcv
; /* address is event for message arrival */
104 /* forward declarations */
105 void ipc_mqueue_receive_results(wait_result_t result
);
108 * Routine: ipc_mqueue_init
110 * Initialize a newly-allocated message queue.
118 wait_queue_set_init(&mqueue
->imq_set_queue
, SYNC_POLICY_FIFO
);
120 wait_queue_init(&mqueue
->imq_wait_queue
, SYNC_POLICY_FIFO
);
121 ipc_kmsg_queue_init(&mqueue
->imq_messages
);
122 mqueue
->imq_seqno
= 0;
123 mqueue
->imq_msgcount
= 0;
124 mqueue
->imq_qlimit
= MACH_PORT_QLIMIT_DEFAULT
;
125 mqueue
->imq_fullwaiters
= FALSE
;
130 * Routine: ipc_mqueue_member
132 * Indicate whether the (port) mqueue is a member of
133 * this portset's mqueue. We do this by checking
134 * whether the portset mqueue's waitq is an member of
135 * the port's mqueue waitq.
137 * the portset's mqueue is not already a member
138 * this may block while allocating linkage structures.
143 ipc_mqueue_t port_mqueue
,
144 ipc_mqueue_t set_mqueue
)
146 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
147 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
149 return (wait_queue_member(port_waitq
, set_waitq
));
154 * Routine: ipc_mqueue_remove
156 * Remove the association between the queue and the specified
163 ipc_mqueue_t set_mqueue
)
165 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
166 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
168 return wait_queue_unlink(mq_waitq
, set_waitq
);
172 * Routine: ipc_mqueue_remove_from_all
174 * Remove the mqueue from all the sets it is a member of
179 ipc_mqueue_remove_from_all(
182 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
184 wait_queue_unlink_all(mq_waitq
);
189 * Routine: ipc_mqueue_remove_all
191 * Remove all the member queues from the specified set.
196 ipc_mqueue_remove_all(
199 wait_queue_set_t mq_setq
= &mqueue
->imq_set_queue
;
201 wait_queue_set_unlink_all(mq_setq
);
207 * Routine: ipc_mqueue_add
209 * Associate the portset's mqueue with the port's mqueue.
210 * This has to be done so that posting the port will wakeup
211 * a portset waiter. If there are waiters on the portset
212 * mqueue and messages on the port mqueue, try to match them
219 ipc_mqueue_t port_mqueue
,
220 ipc_mqueue_t set_mqueue
)
222 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
223 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
224 ipc_kmsg_queue_t kmsgq
;
225 ipc_kmsg_t kmsg
, next
;
229 kr
= wait_queue_link(port_waitq
, set_waitq
);
230 if (kr
!= KERN_SUCCESS
)
234 * Now that the set has been added to the port, there may be
235 * messages queued on the port and threads waiting on the set
236 * waitq. Lets get them together.
239 imq_lock(port_mqueue
);
240 kmsgq
= &port_mqueue
->imq_messages
;
241 for (kmsg
= ipc_kmsg_queue_first(kmsgq
);
244 next
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
249 th
= wait_queue_wakeup64_identity_locked(
254 /* waitq/mqueue still locked, thread locked */
256 if (th
== THREAD_NULL
)
260 * Found a receiver. see if they can handle the message
261 * correctly (the message is not too large for them, or
262 * they didn't care to be informed that the message was
263 * too large). If they can't handle it, take them off
264 * the list and let them go back and figure it out and
265 * just move onto the next.
268 kmsg
->ikm_header
->msgh_size
+
269 REQUESTED_TRAILER_SIZE(th
->ith_option
)) {
270 th
->ith_state
= MACH_RCV_TOO_LARGE
;
271 th
->ith_msize
= kmsg
->ikm_header
->msgh_size
;
272 if (th
->ith_option
& MACH_RCV_LARGE
) {
274 * let him go without message
276 th
->ith_kmsg
= IKM_NULL
;
279 continue; /* find another thread */
282 th
->ith_state
= MACH_MSG_SUCCESS
;
286 * This thread is going to take this message,
289 ipc_kmsg_rmqueue(kmsgq
, kmsg
);
290 ipc_mqueue_release_msgcount(port_mqueue
);
293 th
->ith_seqno
= port_mqueue
->imq_seqno
++;
295 break; /* go to next message */
300 imq_unlock(port_mqueue
);
306 * Routine: ipc_mqueue_changed
308 * Wake up receivers waiting in a message queue.
310 * The message queue is locked.
317 wait_queue_wakeup64_all_locked(
318 &mqueue
->imq_wait_queue
,
321 FALSE
); /* unlock waitq? */
328 * Routine: ipc_mqueue_send
330 * Send a message to a message queue. The message holds a reference
331 * for the destination port for this message queue in the
332 * msgh_remote_port field.
334 * If unsuccessful, the caller still has possession of
335 * the message and must do something with it. If successful,
336 * the message is queued, given to a receiver, or destroyed.
340 * MACH_MSG_SUCCESS The message was accepted.
341 * MACH_SEND_TIMED_OUT Caller still has message.
342 * MACH_SEND_INTERRUPTED Caller still has message.
348 mach_msg_option_t option
,
349 mach_msg_timeout_t send_timeout
)
356 * 1) We're under the queue limit.
357 * 2) Caller used the MACH_SEND_ALWAYS internal option.
358 * 3) Message is sent to a send-once right.
363 if (!imq_full(mqueue
) ||
364 (option
& MACH_SEND_ALWAYS
) ||
365 (MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) ==
366 MACH_MSG_TYPE_PORT_SEND_ONCE
)) {
367 mqueue
->imq_msgcount
++;
368 assert(mqueue
->imq_msgcount
> 0);
372 thread_t cur_thread
= current_thread();
376 * We have to wait for space to be granted to us.
378 if ((option
& MACH_SEND_TIMEOUT
) && (send_timeout
== 0)) {
381 return MACH_SEND_TIMED_OUT
;
383 mqueue
->imq_fullwaiters
= TRUE
;
384 thread_lock(cur_thread
);
385 if (option
& MACH_SEND_TIMEOUT
)
386 clock_interval_to_deadline(send_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
389 wresult
= wait_queue_assert_wait64_locked(
390 &mqueue
->imq_wait_queue
,
392 THREAD_ABORTSAFE
, deadline
,
394 thread_unlock(cur_thread
);
398 if (wresult
== THREAD_WAITING
) {
399 wresult
= thread_block(THREAD_CONTINUE_NULL
);
400 counter(c_ipc_mqueue_send_block
++);
404 case THREAD_TIMED_OUT
:
405 assert(option
& MACH_SEND_TIMEOUT
);
406 return MACH_SEND_TIMED_OUT
;
408 case THREAD_AWAKENED
:
409 /* we can proceed - inherited msgcount from waker */
410 assert(mqueue
->imq_msgcount
> 0);
413 case THREAD_INTERRUPTED
:
414 return MACH_SEND_INTERRUPTED
;
418 panic("ipc_mqueue_send");
422 ipc_mqueue_post(mqueue
, kmsg
);
423 return MACH_MSG_SUCCESS
;
427 * Routine: ipc_mqueue_release_msgcount
429 * Release a message queue reference in the case where we
433 * The message queue is locked.
434 * The message corresponding to this reference is off the queue.
437 ipc_mqueue_release_msgcount(
440 assert(imq_held(mqueue
));
441 assert(mqueue
->imq_msgcount
> 1 || ipc_kmsg_queue_empty(&mqueue
->imq_messages
));
443 mqueue
->imq_msgcount
--;
445 if (!imq_full(mqueue
) && mqueue
->imq_fullwaiters
) {
446 if (wait_queue_wakeup64_one_locked(
447 &mqueue
->imq_wait_queue
,
450 FALSE
) != KERN_SUCCESS
) {
451 mqueue
->imq_fullwaiters
= FALSE
;
453 /* gave away our slot - add reference back */
454 mqueue
->imq_msgcount
++;
460 * Routine: ipc_mqueue_post
462 * Post a message to a waiting receiver or enqueue it. If a
463 * receiver is waiting, we can release our reserved space in
467 * If we need to queue, our space in the message queue is reserved.
471 register ipc_mqueue_t mqueue
,
472 register ipc_kmsg_t kmsg
)
478 * While the msg queue is locked, we have control of the
479 * kmsg, so the ref in it for the port is still good.
481 * Check for a receiver for the message.
486 wait_queue_t waitq
= &mqueue
->imq_wait_queue
;
489 receiver
= wait_queue_wakeup64_identity_locked(
494 /* waitq still locked, thread locked */
496 if (receiver
== THREAD_NULL
) {
498 * no receivers; queue kmsg
500 assert(mqueue
->imq_msgcount
> 0);
501 ipc_kmsg_enqueue_macro(&mqueue
->imq_messages
, kmsg
);
506 * We found a waiting thread.
507 * If the message is too large or the scatter list is too small
508 * the thread we wake up will get that as its status.
510 if (receiver
->ith_msize
<
511 (kmsg
->ikm_header
->msgh_size
) +
512 REQUESTED_TRAILER_SIZE(receiver
->ith_option
)) {
513 receiver
->ith_msize
= kmsg
->ikm_header
->msgh_size
;
514 receiver
->ith_state
= MACH_RCV_TOO_LARGE
;
516 receiver
->ith_state
= MACH_MSG_SUCCESS
;
520 * If there is no problem with the upcoming receive, or the
521 * receiver thread didn't specifically ask for special too
522 * large error condition, go ahead and select it anyway.
524 if ((receiver
->ith_state
== MACH_MSG_SUCCESS
) ||
525 !(receiver
->ith_option
& MACH_RCV_LARGE
)) {
527 receiver
->ith_kmsg
= kmsg
;
528 receiver
->ith_seqno
= mqueue
->imq_seqno
++;
529 thread_unlock(receiver
);
531 /* we didn't need our reserved spot in the queue */
532 ipc_mqueue_release_msgcount(mqueue
);
537 * Otherwise, this thread needs to be released to run
538 * and handle its error without getting the message. We
539 * need to go back and pick another one.
541 receiver
->ith_kmsg
= IKM_NULL
;
542 receiver
->ith_seqno
= 0;
543 thread_unlock(receiver
);
549 current_task()->messages_sent
++;
555 ipc_mqueue_receive_results(wait_result_t saved_wait_result
)
557 thread_t self
= current_thread();
558 mach_msg_option_t option
= self
->ith_option
;
561 * why did we wake up?
563 switch (saved_wait_result
) {
564 case THREAD_TIMED_OUT
:
565 self
->ith_state
= MACH_RCV_TIMED_OUT
;
568 case THREAD_INTERRUPTED
:
569 self
->ith_state
= MACH_RCV_INTERRUPTED
;
573 /* something bad happened to the port/set */
574 self
->ith_state
= MACH_RCV_PORT_CHANGED
;
577 case THREAD_AWAKENED
:
579 * We do not need to go select a message, somebody
580 * handed us one (or a too-large indication).
582 switch (self
->ith_state
) {
583 case MACH_RCV_SCATTER_SMALL
:
584 case MACH_RCV_TOO_LARGE
:
586 * Somebody tried to give us a too large
587 * message. If we indicated that we cared,
588 * then they only gave us the indication,
589 * otherwise they gave us the indication
590 * AND the message anyway.
592 if (option
& MACH_RCV_LARGE
) {
596 case MACH_MSG_SUCCESS
:
600 panic("ipc_mqueue_receive_results: strange ith_state");
604 panic("ipc_mqueue_receive_results: strange wait_result");
609 ipc_mqueue_receive_continue(
610 __unused
void *param
,
611 wait_result_t wresult
)
613 ipc_mqueue_receive_results(wresult
);
614 mach_msg_receive_continue(); /* hard-coded for now */
618 * Routine: ipc_mqueue_receive
620 * Receive a message from a message queue.
622 * If continuation is non-zero, then we might discard
623 * our kernel stack when we block. We will continue
624 * after unblocking by executing continuation.
626 * If resume is true, then we are resuming a receive
627 * operation after a blocked receive discarded our stack.
629 * Our caller must hold a reference for the port or port set
630 * to which this queue belongs, to keep the queue
631 * from being deallocated.
633 * The kmsg is returned with clean header fields
634 * and with the circular bit turned off.
636 * MACH_MSG_SUCCESS Message returned in kmsgp.
637 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
638 * MACH_RCV_TIMED_OUT No message obtained.
639 * MACH_RCV_INTERRUPTED No message obtained.
640 * MACH_RCV_PORT_DIED Port/set died; no message.
641 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
648 mach_msg_option_t option
,
649 mach_msg_size_t max_size
,
650 mach_msg_timeout_t rcv_timeout
,
653 ipc_kmsg_queue_t kmsgs
;
654 wait_result_t wresult
;
666 self
= current_thread();
668 if (imq_is_set(mqueue
)) {
669 wait_queue_link_t wql
;
670 ipc_mqueue_t port_mq
;
673 q
= &mqueue
->imq_setlinks
;
676 * If we are waiting on a portset mqueue, we need to see if
677 * any of the member ports have work for us. If so, try to
678 * deliver one of those messages. By holding the portset's
679 * mqueue lock during the search, we tie up any attempts by
680 * mqueue_deliver or portset membership changes that may
681 * cross our path. But this is a lock order violation, so we
682 * have to do it "softly." If we don't find a message waiting
683 * for us, we will assert our intention to wait while still
684 * holding that lock. When we release the lock, the deliver/
685 * change will succeed and find us.
688 queue_iterate(q
, wql
, wait_queue_link_t
, wql_setlinks
) {
689 port_mq
= (ipc_mqueue_t
)wql
->wql_queue
;
690 kmsgs
= &port_mq
->imq_messages
;
692 if (!imq_lock_try(port_mq
)) {
698 goto search_set
; /* start again at beginning - SMP */
702 * If there is still a message to be had, we will
703 * try to select it (may not succeed because of size
704 * and options). In any case, we deliver those
705 * results back to the user.
707 * We also move the port's linkage to the tail of the
708 * list for this set (fairness). Future versions will
709 * sort by timestamp or priority.
711 if (ipc_kmsg_queue_first(kmsgs
) == IKM_NULL
) {
715 queue_remove(q
, wql
, wait_queue_link_t
, wql_setlinks
);
716 queue_enter(q
, wql
, wait_queue_link_t
, wql_setlinks
);
719 ipc_mqueue_select(port_mq
, option
, max_size
);
722 if (self
->ith_kmsg
!= NULL
&&
723 self
->ith_kmsg
->ikm_sender
!= NULL
) {
724 lh
= self
->ith_kmsg
->ikm_sender
->label
;
725 task
= current_task();
726 tasklabel_lock(task
);
727 ip_lock(lh
->lh_port
);
728 rc
= mac_port_check_receive(&task
->maclabel
,
730 ip_unlock(lh
->lh_port
);
731 tasklabel_unlock(task
);
733 self
->ith_state
= MACH_RCV_INVALID_DATA
;
744 * Receive on a single port. Just try to get the messages.
746 kmsgs
= &mqueue
->imq_messages
;
747 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
748 ipc_mqueue_select(mqueue
, option
, max_size
);
751 if (self
->ith_kmsg
!= NULL
&&
752 self
->ith_kmsg
->ikm_sender
!= NULL
) {
753 lh
= self
->ith_kmsg
->ikm_sender
->label
;
754 task
= current_task();
755 tasklabel_lock(task
);
756 ip_lock(lh
->lh_port
);
757 rc
= mac_port_check_receive(&task
->maclabel
,
759 ip_unlock(lh
->lh_port
);
760 tasklabel_unlock(task
);
762 self
->ith_state
= MACH_RCV_INVALID_DATA
;
771 * Looks like we'll have to block. The mqueue we will
772 * block on (whether the set's or the local port's) is
775 if (option
& MACH_RCV_TIMEOUT
) {
776 if (rcv_timeout
== 0) {
779 self
->ith_state
= MACH_RCV_TIMED_OUT
;
785 self
->ith_state
= MACH_RCV_IN_PROGRESS
;
786 self
->ith_option
= option
;
787 self
->ith_msize
= max_size
;
789 if (option
& MACH_RCV_TIMEOUT
)
790 clock_interval_to_deadline(rcv_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
794 wresult
= wait_queue_assert_wait64_locked(&mqueue
->imq_wait_queue
,
796 interruptible
, deadline
,
802 if (wresult
== THREAD_WAITING
) {
803 counter((interruptible
== THREAD_ABORTSAFE
) ?
804 c_ipc_mqueue_receive_block_user
++ :
805 c_ipc_mqueue_receive_block_kernel
++);
807 if (self
->ith_continuation
)
808 thread_block(ipc_mqueue_receive_continue
);
811 wresult
= thread_block(THREAD_CONTINUE_NULL
);
813 ipc_mqueue_receive_results(wresult
);
818 * Routine: ipc_mqueue_select
820 * A receiver discovered that there was a message on the queue
821 * before he had to block. Pick the message off the queue and
822 * "post" it to himself.
825 * There is a message.
827 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
828 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
833 mach_msg_option_t option
,
834 mach_msg_size_t max_size
)
836 thread_t self
= current_thread();
838 mach_msg_return_t mr
;
839 mach_msg_size_t rcv_size
;
841 mr
= MACH_MSG_SUCCESS
;
845 * Do some sanity checking of our ability to receive
846 * before pulling the message off the queue.
848 kmsg
= ipc_kmsg_queue_first(&mqueue
->imq_messages
);
849 assert(kmsg
!= IKM_NULL
);
852 * If we really can't receive it, but we had the
853 * MACH_RCV_LARGE option set, then don't take it off
854 * the queue, instead return the appropriate error
857 rcv_size
= ipc_kmsg_copyout_size(kmsg
, self
->map
);
858 if (rcv_size
+ REQUESTED_TRAILER_SIZE(option
) > max_size
) {
859 mr
= MACH_RCV_TOO_LARGE
;
860 if (option
& MACH_RCV_LARGE
) {
861 self
->ith_kmsg
= IKM_NULL
;
862 self
->ith_msize
= rcv_size
;
864 self
->ith_state
= mr
;
869 ipc_kmsg_rmqueue_first_macro(&mqueue
->imq_messages
, kmsg
);
870 ipc_mqueue_release_msgcount(mqueue
);
871 self
->ith_seqno
= mqueue
->imq_seqno
++;
872 self
->ith_kmsg
= kmsg
;
873 self
->ith_state
= mr
;
875 current_task()->messages_received
++;
880 * Routine: ipc_mqueue_destroy
882 * Destroy a message queue. Set any blocked senders running.
883 * Destroy the kmsgs in the queue.
886 * Receivers were removed when the receive right was "changed"
892 ipc_kmsg_queue_t kmqueue
;
900 * rouse all blocked senders
902 mqueue
->imq_fullwaiters
= FALSE
;
903 wait_queue_wakeup64_all_locked(
904 &mqueue
->imq_wait_queue
,
909 kmqueue
= &mqueue
->imq_messages
;
911 while ((kmsg
= ipc_kmsg_dequeue(kmqueue
)) != IKM_NULL
) {
915 ipc_kmsg_destroy_dest(kmsg
);
925 * Routine: ipc_mqueue_set_qlimit
927 * Changes a message queue limit; the maximum number
928 * of messages which may be queued.
934 ipc_mqueue_set_qlimit(
936 mach_port_msgcount_t qlimit
)
940 assert(qlimit
<= MACH_PORT_QLIMIT_MAX
);
942 /* wake up senders allowed by the new qlimit */
945 if (qlimit
> mqueue
->imq_qlimit
) {
946 mach_port_msgcount_t i
, wakeup
;
948 /* caution: wakeup, qlimit are unsigned */
949 wakeup
= qlimit
- mqueue
->imq_qlimit
;
951 for (i
= 0; i
< wakeup
; i
++) {
952 if (wait_queue_wakeup64_one_locked(
953 &mqueue
->imq_wait_queue
,
956 FALSE
) == KERN_NOT_WAITING
) {
957 mqueue
->imq_fullwaiters
= FALSE
;
960 mqueue
->imq_msgcount
++; /* give it to the awakened thread */
963 mqueue
->imq_qlimit
= qlimit
;
969 * Routine: ipc_mqueue_set_seqno
971 * Changes an mqueue's sequence number.
973 * Caller holds a reference to the queue's containing object.
976 ipc_mqueue_set_seqno(
978 mach_port_seqno_t seqno
)
984 mqueue
->imq_seqno
= seqno
;
991 * Routine: ipc_mqueue_copyin
993 * Convert a name in a space to a message queue.
995 * Nothing locked. If successful, the caller gets a ref for
996 * for the object. This ref ensures the continued existence of
999 * MACH_MSG_SUCCESS Found a message queue.
1000 * MACH_RCV_INVALID_NAME The space is dead.
1001 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1002 * MACH_RCV_INVALID_NAME
1003 * The denoted right is not receive or port set.
1004 * MACH_RCV_IN_SET Receive right is a member of a set.
1010 mach_port_name_t name
,
1011 ipc_mqueue_t
*mqueuep
,
1012 ipc_object_t
*objectp
)
1015 ipc_object_t object
;
1016 ipc_mqueue_t mqueue
;
1018 is_read_lock(space
);
1019 if (!space
->is_active
) {
1020 is_read_unlock(space
);
1021 return MACH_RCV_INVALID_NAME
;
1024 entry
= ipc_entry_lookup(space
, name
);
1025 if (entry
== IE_NULL
) {
1026 is_read_unlock(space
);
1027 return MACH_RCV_INVALID_NAME
;
1030 object
= entry
->ie_object
;
1032 if (entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) {
1035 port
= (ipc_port_t
) object
;
1036 assert(port
!= IP_NULL
);
1039 assert(ip_active(port
));
1040 assert(port
->ip_receiver_name
== name
);
1041 assert(port
->ip_receiver
== space
);
1042 is_read_unlock(space
);
1043 mqueue
= &port
->ip_messages
;
1045 } else if (entry
->ie_bits
& MACH_PORT_TYPE_PORT_SET
) {
1048 pset
= (ipc_pset_t
) object
;
1049 assert(pset
!= IPS_NULL
);
1052 assert(ips_active(pset
));
1053 assert(pset
->ips_local_name
== name
);
1054 is_read_unlock(space
);
1056 mqueue
= &pset
->ips_messages
;
1058 is_read_unlock(space
);
1059 return MACH_RCV_INVALID_NAME
;
1063 * At this point, the object is locked and active,
1064 * the space is unlocked, and mqueue is initialized.
1067 io_reference(object
);
1072 return MACH_MSG_SUCCESS
;