2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 * @OSF_FREE_COPYRIGHT@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 * File: ipc/ipc_mqueue.c
65 * Functions to manipulate IPC message queues.
68 #include <mach/port.h>
69 #include <mach/message.h>
70 #include <mach/sync_policy.h>
72 #include <kern/assert.h>
73 #include <kern/counters.h>
74 #include <kern/sched_prim.h>
75 #include <kern/ipc_kobject.h>
76 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
77 #include <kern/misc_protos.h>
78 #include <kern/task.h>
79 #include <kern/thread.h>
80 #include <kern/wait_queue.h>
82 #include <ipc/ipc_mqueue.h>
83 #include <ipc/ipc_kmsg.h>
84 #include <ipc/ipc_port.h>
85 #include <ipc/ipc_pset.h>
86 #include <ipc/ipc_space.h>
90 int ipc_mqueue_full
; /* address is event for queue space */
91 int ipc_mqueue_rcv
; /* address is event for message arrival */
95 /* forward declarations */
96 void ipc_mqueue_receive_results(wait_result_t result
);
99 * Routine: ipc_mqueue_init
101 * Initialize a newly-allocated message queue.
109 wait_queue_set_init(&mqueue
->imq_set_queue
, SYNC_POLICY_FIFO
);
111 wait_queue_init(&mqueue
->imq_wait_queue
, SYNC_POLICY_FIFO
);
112 ipc_kmsg_queue_init(&mqueue
->imq_messages
);
113 mqueue
->imq_seqno
= 0;
114 mqueue
->imq_msgcount
= 0;
115 mqueue
->imq_qlimit
= MACH_PORT_QLIMIT_DEFAULT
;
116 mqueue
->imq_fullwaiters
= FALSE
;
121 * Routine: ipc_mqueue_member
123 * Indicate whether the (port) mqueue is a member of
124 * this portset's mqueue. We do this by checking
125 * whether the portset mqueue's waitq is an member of
126 * the port's mqueue waitq.
128 * the portset's mqueue is not already a member
129 * this may block while allocating linkage structures.
134 ipc_mqueue_t port_mqueue
,
135 ipc_mqueue_t set_mqueue
)
137 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
138 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
140 return (wait_queue_member(port_waitq
, set_waitq
));
145 * Routine: ipc_mqueue_remove
147 * Remove the association between the queue and the specified
154 ipc_mqueue_t set_mqueue
)
156 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
157 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
159 return wait_queue_unlink(mq_waitq
, set_waitq
);
163 * Routine: ipc_mqueue_remove_from_all
165 * Remove the mqueue from all the sets it is a member of
170 ipc_mqueue_remove_from_all(
173 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
175 wait_queue_unlink_all(mq_waitq
);
180 * Routine: ipc_mqueue_remove_all
182 * Remove all the member queues from the specified set.
187 ipc_mqueue_remove_all(
190 wait_queue_set_t mq_setq
= &mqueue
->imq_set_queue
;
192 wait_queue_set_unlink_all(mq_setq
);
198 * Routine: ipc_mqueue_add
200 * Associate the portset's mqueue with the port's mqueue.
201 * This has to be done so that posting the port will wakeup
202 * a portset waiter. If there are waiters on the portset
203 * mqueue and messages on the port mqueue, try to match them
210 ipc_mqueue_t port_mqueue
,
211 ipc_mqueue_t set_mqueue
)
213 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
214 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
215 ipc_kmsg_queue_t kmsgq
;
216 ipc_kmsg_t kmsg
, next
;
220 kr
= wait_queue_link(port_waitq
, set_waitq
);
221 if (kr
!= KERN_SUCCESS
)
225 * Now that the set has been added to the port, there may be
226 * messages queued on the port and threads waiting on the set
227 * waitq. Lets get them together.
230 imq_lock(port_mqueue
);
231 kmsgq
= &port_mqueue
->imq_messages
;
232 for (kmsg
= ipc_kmsg_queue_first(kmsgq
);
235 next
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
240 th
= wait_queue_wakeup64_identity_locked(
245 /* waitq/mqueue still locked, thread locked */
247 if (th
== THREAD_NULL
)
251 * Found a receiver. see if they can handle the message
252 * correctly (the message is not too large for them, or
253 * they didn't care to be informed that the message was
254 * too large). If they can't handle it, take them off
255 * the list and let them go back and figure it out and
256 * just move onto the next.
259 kmsg
->ikm_header
->msgh_size
+
260 REQUESTED_TRAILER_SIZE(th
->ith_option
)) {
261 th
->ith_state
= MACH_RCV_TOO_LARGE
;
262 th
->ith_msize
= kmsg
->ikm_header
->msgh_size
;
263 if (th
->ith_option
& MACH_RCV_LARGE
) {
265 * let him go without message
267 th
->ith_kmsg
= IKM_NULL
;
270 continue; /* find another thread */
273 th
->ith_state
= MACH_MSG_SUCCESS
;
277 * This thread is going to take this message,
280 ipc_kmsg_rmqueue(kmsgq
, kmsg
);
281 ipc_mqueue_release_msgcount(port_mqueue
);
284 th
->ith_seqno
= port_mqueue
->imq_seqno
++;
286 break; /* go to next message */
291 imq_unlock(port_mqueue
);
297 * Routine: ipc_mqueue_changed
299 * Wake up receivers waiting in a message queue.
301 * The message queue is locked.
308 wait_queue_wakeup64_all_locked(
309 &mqueue
->imq_wait_queue
,
312 FALSE
); /* unlock waitq? */
319 * Routine: ipc_mqueue_send
321 * Send a message to a message queue. The message holds a reference
322 * for the destination port for this message queue in the
323 * msgh_remote_port field.
325 * If unsuccessful, the caller still has possession of
326 * the message and must do something with it. If successful,
327 * the message is queued, given to a receiver, or destroyed.
331 * MACH_MSG_SUCCESS The message was accepted.
332 * MACH_SEND_TIMED_OUT Caller still has message.
333 * MACH_SEND_INTERRUPTED Caller still has message.
339 mach_msg_option_t option
,
340 mach_msg_timeout_t send_timeout
)
347 * 1) We're under the queue limit.
348 * 2) Caller used the MACH_SEND_ALWAYS internal option.
349 * 3) Message is sent to a send-once right.
354 if (!imq_full(mqueue
) ||
355 (option
& MACH_SEND_ALWAYS
) ||
356 (MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) ==
357 MACH_MSG_TYPE_PORT_SEND_ONCE
)) {
358 mqueue
->imq_msgcount
++;
359 assert(mqueue
->imq_msgcount
> 0);
363 thread_t cur_thread
= current_thread();
367 * We have to wait for space to be granted to us.
369 if ((option
& MACH_SEND_TIMEOUT
) && (send_timeout
== 0)) {
372 return MACH_SEND_TIMED_OUT
;
374 mqueue
->imq_fullwaiters
= TRUE
;
375 thread_lock(cur_thread
);
376 if (option
& MACH_SEND_TIMEOUT
)
377 clock_interval_to_deadline(send_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
380 wresult
= wait_queue_assert_wait64_locked(
381 &mqueue
->imq_wait_queue
,
383 THREAD_ABORTSAFE
, deadline
,
385 thread_unlock(cur_thread
);
389 if (wresult
== THREAD_WAITING
) {
390 wresult
= thread_block(THREAD_CONTINUE_NULL
);
391 counter(c_ipc_mqueue_send_block
++);
395 case THREAD_TIMED_OUT
:
396 assert(option
& MACH_SEND_TIMEOUT
);
397 return MACH_SEND_TIMED_OUT
;
399 case THREAD_AWAKENED
:
400 /* we can proceed - inherited msgcount from waker */
401 assert(mqueue
->imq_msgcount
> 0);
404 case THREAD_INTERRUPTED
:
405 return MACH_SEND_INTERRUPTED
;
409 panic("ipc_mqueue_send");
413 ipc_mqueue_post(mqueue
, kmsg
);
414 return MACH_MSG_SUCCESS
;
418 * Routine: ipc_mqueue_release_msgcount
420 * Release a message queue reference in the case where we
424 * The message queue is locked.
425 * The message corresponding to this reference is off the queue.
428 ipc_mqueue_release_msgcount(
431 assert(imq_held(mqueue
));
432 assert(mqueue
->imq_msgcount
> 1 || ipc_kmsg_queue_empty(&mqueue
->imq_messages
));
434 mqueue
->imq_msgcount
--;
436 if (!imq_full(mqueue
) && mqueue
->imq_fullwaiters
) {
437 if (wait_queue_wakeup64_one_locked(
438 &mqueue
->imq_wait_queue
,
441 FALSE
) != KERN_SUCCESS
) {
442 mqueue
->imq_fullwaiters
= FALSE
;
444 /* gave away our slot - add reference back */
445 mqueue
->imq_msgcount
++;
451 * Routine: ipc_mqueue_post
453 * Post a message to a waiting receiver or enqueue it. If a
454 * receiver is waiting, we can release our reserved space in
458 * If we need to queue, our space in the message queue is reserved.
462 register ipc_mqueue_t mqueue
,
463 register ipc_kmsg_t kmsg
)
469 * While the msg queue is locked, we have control of the
470 * kmsg, so the ref in it for the port is still good.
472 * Check for a receiver for the message.
477 wait_queue_t waitq
= &mqueue
->imq_wait_queue
;
480 receiver
= wait_queue_wakeup64_identity_locked(
485 /* waitq still locked, thread locked */
487 if (receiver
== THREAD_NULL
) {
489 * no receivers; queue kmsg
491 assert(mqueue
->imq_msgcount
> 0);
492 ipc_kmsg_enqueue_macro(&mqueue
->imq_messages
, kmsg
);
497 * We found a waiting thread.
498 * If the message is too large or the scatter list is too small
499 * the thread we wake up will get that as its status.
501 if (receiver
->ith_msize
<
502 (kmsg
->ikm_header
->msgh_size
) +
503 REQUESTED_TRAILER_SIZE(receiver
->ith_option
)) {
504 receiver
->ith_msize
= kmsg
->ikm_header
->msgh_size
;
505 receiver
->ith_state
= MACH_RCV_TOO_LARGE
;
507 receiver
->ith_state
= MACH_MSG_SUCCESS
;
511 * If there is no problem with the upcoming receive, or the
512 * receiver thread didn't specifically ask for special too
513 * large error condition, go ahead and select it anyway.
515 if ((receiver
->ith_state
== MACH_MSG_SUCCESS
) ||
516 !(receiver
->ith_option
& MACH_RCV_LARGE
)) {
518 receiver
->ith_kmsg
= kmsg
;
519 receiver
->ith_seqno
= mqueue
->imq_seqno
++;
520 thread_unlock(receiver
);
522 /* we didn't need our reserved spot in the queue */
523 ipc_mqueue_release_msgcount(mqueue
);
528 * Otherwise, this thread needs to be released to run
529 * and handle its error without getting the message. We
530 * need to go back and pick another one.
532 receiver
->ith_kmsg
= IKM_NULL
;
533 receiver
->ith_seqno
= 0;
534 thread_unlock(receiver
);
540 current_task()->messages_sent
++;
546 ipc_mqueue_receive_results(wait_result_t saved_wait_result
)
548 thread_t self
= current_thread();
549 mach_msg_option_t option
= self
->ith_option
;
553 * why did we wake up?
555 switch (saved_wait_result
) {
556 case THREAD_TIMED_OUT
:
557 self
->ith_state
= MACH_RCV_TIMED_OUT
;
560 case THREAD_INTERRUPTED
:
561 self
->ith_state
= MACH_RCV_INTERRUPTED
;
565 /* something bad happened to the port/set */
566 self
->ith_state
= MACH_RCV_PORT_CHANGED
;
569 case THREAD_AWAKENED
:
571 * We do not need to go select a message, somebody
572 * handed us one (or a too-large indication).
574 mr
= MACH_MSG_SUCCESS
;
576 switch (self
->ith_state
) {
577 case MACH_RCV_SCATTER_SMALL
:
578 case MACH_RCV_TOO_LARGE
:
580 * Somebody tried to give us a too large
581 * message. If we indicated that we cared,
582 * then they only gave us the indication,
583 * otherwise they gave us the indication
584 * AND the message anyway.
586 if (option
& MACH_RCV_LARGE
) {
590 case MACH_MSG_SUCCESS
:
594 panic("ipc_mqueue_receive_results: strange ith_state");
598 panic("ipc_mqueue_receive_results: strange wait_result");
603 ipc_mqueue_receive_continue(
604 __unused
void *param
,
605 wait_result_t wresult
)
607 ipc_mqueue_receive_results(wresult
);
608 mach_msg_receive_continue(); /* hard-coded for now */
612 * Routine: ipc_mqueue_receive
614 * Receive a message from a message queue.
616 * If continuation is non-zero, then we might discard
617 * our kernel stack when we block. We will continue
618 * after unblocking by executing continuation.
620 * If resume is true, then we are resuming a receive
621 * operation after a blocked receive discarded our stack.
623 * Our caller must hold a reference for the port or port set
624 * to which this queue belongs, to keep the queue
625 * from being deallocated.
627 * The kmsg is returned with clean header fields
628 * and with the circular bit turned off.
630 * MACH_MSG_SUCCESS Message returned in kmsgp.
631 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
632 * MACH_RCV_TIMED_OUT No message obtained.
633 * MACH_RCV_INTERRUPTED No message obtained.
634 * MACH_RCV_PORT_DIED Port/set died; no message.
635 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
642 mach_msg_option_t option
,
643 mach_msg_size_t max_size
,
644 mach_msg_timeout_t rcv_timeout
,
647 ipc_kmsg_queue_t kmsgs
;
648 wait_result_t wresult
;
656 if (imq_is_set(mqueue
)) {
657 wait_queue_link_t wql
;
658 ipc_mqueue_t port_mq
;
661 q
= &mqueue
->imq_setlinks
;
664 * If we are waiting on a portset mqueue, we need to see if
665 * any of the member ports have work for us. If so, try to
666 * deliver one of those messages. By holding the portset's
667 * mqueue lock during the search, we tie up any attempts by
668 * mqueue_deliver or portset membership changes that may
669 * cross our path. But this is a lock order violation, so we
670 * have to do it "softly." If we don't find a message waiting
671 * for us, we will assert our intention to wait while still
672 * holding that lock. When we release the lock, the deliver/
673 * change will succeed and find us.
676 queue_iterate(q
, wql
, wait_queue_link_t
, wql_setlinks
) {
677 port_mq
= (ipc_mqueue_t
)wql
->wql_queue
;
678 kmsgs
= &port_mq
->imq_messages
;
680 if (!imq_lock_try(port_mq
)) {
686 goto search_set
; /* start again at beginning - SMP */
690 * If there is still a message to be had, we will
691 * try to select it (may not succeed because of size
692 * and options). In any case, we deliver those
693 * results back to the user.
695 * We also move the port's linkage to the tail of the
696 * list for this set (fairness). Future versions will
697 * sort by timestamp or priority.
699 if (ipc_kmsg_queue_first(kmsgs
) == IKM_NULL
) {
703 queue_remove(q
, wql
, wait_queue_link_t
, wql_setlinks
);
704 queue_enter(q
, wql
, wait_queue_link_t
, wql_setlinks
);
707 ipc_mqueue_select(port_mq
, option
, max_size
);
717 * Receive on a single port. Just try to get the messages.
719 kmsgs
= &mqueue
->imq_messages
;
720 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
721 ipc_mqueue_select(mqueue
, option
, max_size
);
729 * Looks like we'll have to block. The mqueue we will
730 * block on (whether the set's or the local port's) is
733 self
= current_thread();
734 if (option
& MACH_RCV_TIMEOUT
) {
735 if (rcv_timeout
== 0) {
738 self
->ith_state
= MACH_RCV_TIMED_OUT
;
744 self
->ith_state
= MACH_RCV_IN_PROGRESS
;
745 self
->ith_option
= option
;
746 self
->ith_msize
= max_size
;
748 if (option
& MACH_RCV_TIMEOUT
)
749 clock_interval_to_deadline(rcv_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
753 wresult
= wait_queue_assert_wait64_locked(&mqueue
->imq_wait_queue
,
755 interruptible
, deadline
,
761 if (wresult
== THREAD_WAITING
) {
762 counter((interruptible
== THREAD_ABORTSAFE
) ?
763 c_ipc_mqueue_receive_block_user
++ :
764 c_ipc_mqueue_receive_block_kernel
++);
766 if (self
->ith_continuation
)
767 thread_block(ipc_mqueue_receive_continue
);
770 wresult
= thread_block(THREAD_CONTINUE_NULL
);
772 ipc_mqueue_receive_results(wresult
);
777 * Routine: ipc_mqueue_select
779 * A receiver discovered that there was a message on the queue
780 * before he had to block. Pick the message off the queue and
781 * "post" it to himself.
784 * There is a message.
786 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
787 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
792 mach_msg_option_t option
,
793 mach_msg_size_t max_size
)
795 thread_t self
= current_thread();
797 mach_msg_return_t mr
;
798 mach_msg_size_t rcv_size
;
800 mr
= MACH_MSG_SUCCESS
;
804 * Do some sanity checking of our ability to receive
805 * before pulling the message off the queue.
807 kmsg
= ipc_kmsg_queue_first(&mqueue
->imq_messages
);
808 assert(kmsg
!= IKM_NULL
);
811 * If we really can't receive it, but we had the
812 * MACH_RCV_LARGE option set, then don't take it off
813 * the queue, instead return the appropriate error
816 rcv_size
= ipc_kmsg_copyout_size(kmsg
, self
->map
);
817 if (rcv_size
+ REQUESTED_TRAILER_SIZE(option
) > max_size
) {
818 mr
= MACH_RCV_TOO_LARGE
;
819 if (option
& MACH_RCV_LARGE
) {
820 self
->ith_kmsg
= IKM_NULL
;
821 self
->ith_msize
= rcv_size
;
823 self
->ith_state
= mr
;
828 ipc_kmsg_rmqueue_first_macro(&mqueue
->imq_messages
, kmsg
);
829 ipc_mqueue_release_msgcount(mqueue
);
830 self
->ith_seqno
= mqueue
->imq_seqno
++;
831 self
->ith_kmsg
= kmsg
;
832 self
->ith_state
= mr
;
834 current_task()->messages_received
++;
839 * Routine: ipc_mqueue_destroy
841 * Destroy a message queue. Set any blocked senders running.
842 * Destroy the kmsgs in the queue.
845 * Receivers were removed when the receive right was "changed"
851 ipc_kmsg_queue_t kmqueue
;
859 * rouse all blocked senders
861 mqueue
->imq_fullwaiters
= FALSE
;
862 wait_queue_wakeup64_all_locked(
863 &mqueue
->imq_wait_queue
,
868 kmqueue
= &mqueue
->imq_messages
;
870 while ((kmsg
= ipc_kmsg_dequeue(kmqueue
)) != IKM_NULL
) {
874 ipc_kmsg_destroy_dest(kmsg
);
884 * Routine: ipc_mqueue_set_qlimit
886 * Changes a message queue limit; the maximum number
887 * of messages which may be queued.
893 ipc_mqueue_set_qlimit(
895 mach_port_msgcount_t qlimit
)
899 assert(qlimit
<= MACH_PORT_QLIMIT_MAX
);
901 /* wake up senders allowed by the new qlimit */
904 if (qlimit
> mqueue
->imq_qlimit
) {
905 mach_port_msgcount_t i
, wakeup
;
907 /* caution: wakeup, qlimit are unsigned */
908 wakeup
= qlimit
- mqueue
->imq_qlimit
;
910 for (i
= 0; i
< wakeup
; i
++) {
911 if (wait_queue_wakeup64_one_locked(
912 &mqueue
->imq_wait_queue
,
915 FALSE
) == KERN_NOT_WAITING
) {
916 mqueue
->imq_fullwaiters
= FALSE
;
919 mqueue
->imq_msgcount
++; /* give it to the awakened thread */
922 mqueue
->imq_qlimit
= qlimit
;
928 * Routine: ipc_mqueue_set_seqno
930 * Changes an mqueue's sequence number.
932 * Caller holds a reference to the queue's containing object.
935 ipc_mqueue_set_seqno(
937 mach_port_seqno_t seqno
)
943 mqueue
->imq_seqno
= seqno
;
950 * Routine: ipc_mqueue_copyin
952 * Convert a name in a space to a message queue.
954 * Nothing locked. If successful, the caller gets a ref for
955 * for the object. This ref ensures the continued existence of
958 * MACH_MSG_SUCCESS Found a message queue.
959 * MACH_RCV_INVALID_NAME The space is dead.
960 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
961 * MACH_RCV_INVALID_NAME
962 * The denoted right is not receive or port set.
963 * MACH_RCV_IN_SET Receive right is a member of a set.
969 mach_port_name_t name
,
970 ipc_mqueue_t
*mqueuep
,
971 ipc_object_t
*objectp
)
978 if (!space
->is_active
) {
979 is_read_unlock(space
);
980 return MACH_RCV_INVALID_NAME
;
983 entry
= ipc_entry_lookup(space
, name
);
984 if (entry
== IE_NULL
) {
985 is_read_unlock(space
);
986 return MACH_RCV_INVALID_NAME
;
989 object
= entry
->ie_object
;
991 if (entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) {
994 port
= (ipc_port_t
) object
;
995 assert(port
!= IP_NULL
);
998 assert(ip_active(port
));
999 assert(port
->ip_receiver_name
== name
);
1000 assert(port
->ip_receiver
== space
);
1001 is_read_unlock(space
);
1002 mqueue
= &port
->ip_messages
;
1004 } else if (entry
->ie_bits
& MACH_PORT_TYPE_PORT_SET
) {
1007 pset
= (ipc_pset_t
) object
;
1008 assert(pset
!= IPS_NULL
);
1011 assert(ips_active(pset
));
1012 assert(pset
->ips_local_name
== name
);
1013 is_read_unlock(space
);
1015 mqueue
= &pset
->ips_messages
;
1017 is_read_unlock(space
);
1018 return MACH_RCV_INVALID_NAME
;
1022 * At this point, the object is locked and active,
1023 * the space is unlocked, and mqueue is initialized.
1026 io_reference(object
);
1031 return MACH_MSG_SUCCESS
;