2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_mqueue.c
63 * Functions to manipulate IPC message queues.
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/wait_queue.h>
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_space.h>
96 #include <security/mac_mach_internal.h>
99 int ipc_mqueue_full
; /* address is event for queue space */
100 int ipc_mqueue_rcv
; /* address is event for message arrival */
104 /* forward declarations */
105 void ipc_mqueue_receive_results(wait_result_t result
);
108 * Routine: ipc_mqueue_init
110 * Initialize a newly-allocated message queue.
118 wait_queue_set_init(&mqueue
->imq_set_queue
, SYNC_POLICY_FIFO
);
120 wait_queue_init(&mqueue
->imq_wait_queue
, SYNC_POLICY_FIFO
);
121 ipc_kmsg_queue_init(&mqueue
->imq_messages
);
122 mqueue
->imq_seqno
= 0;
123 mqueue
->imq_msgcount
= 0;
124 mqueue
->imq_qlimit
= MACH_PORT_QLIMIT_DEFAULT
;
125 mqueue
->imq_fullwaiters
= FALSE
;
130 * Routine: ipc_mqueue_member
132 * Indicate whether the (port) mqueue is a member of
133 * this portset's mqueue. We do this by checking
134 * whether the portset mqueue's waitq is an member of
135 * the port's mqueue waitq.
137 * the portset's mqueue is not already a member
138 * this may block while allocating linkage structures.
143 ipc_mqueue_t port_mqueue
,
144 ipc_mqueue_t set_mqueue
)
146 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
147 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
149 return (wait_queue_member(port_waitq
, set_waitq
));
154 * Routine: ipc_mqueue_remove
156 * Remove the association between the queue and the specified
163 ipc_mqueue_t set_mqueue
)
165 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
166 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
168 return wait_queue_unlink(mq_waitq
, set_waitq
);
172 * Routine: ipc_mqueue_remove_from_all
174 * Remove the mqueue from all the sets it is a member of
179 ipc_mqueue_remove_from_all(
182 wait_queue_t mq_waitq
= &mqueue
->imq_wait_queue
;
184 wait_queue_unlink_all(mq_waitq
);
189 * Routine: ipc_mqueue_remove_all
191 * Remove all the member queues from the specified set.
196 ipc_mqueue_remove_all(
199 wait_queue_set_t mq_setq
= &mqueue
->imq_set_queue
;
201 wait_queue_set_unlink_all(mq_setq
);
207 * Routine: ipc_mqueue_add
209 * Associate the portset's mqueue with the port's mqueue.
210 * This has to be done so that posting the port will wakeup
211 * a portset waiter. If there are waiters on the portset
212 * mqueue and messages on the port mqueue, try to match them
219 ipc_mqueue_t port_mqueue
,
220 ipc_mqueue_t set_mqueue
)
222 wait_queue_t port_waitq
= &port_mqueue
->imq_wait_queue
;
223 wait_queue_set_t set_waitq
= &set_mqueue
->imq_set_queue
;
224 ipc_kmsg_queue_t kmsgq
;
225 ipc_kmsg_t kmsg
, next
;
229 kr
= wait_queue_link(port_waitq
, set_waitq
);
230 if (kr
!= KERN_SUCCESS
)
234 * Now that the set has been added to the port, there may be
235 * messages queued on the port and threads waiting on the set
236 * waitq. Lets get them together.
239 imq_lock(port_mqueue
);
240 kmsgq
= &port_mqueue
->imq_messages
;
241 for (kmsg
= ipc_kmsg_queue_first(kmsgq
);
244 next
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
249 th
= wait_queue_wakeup64_identity_locked(
254 /* waitq/mqueue still locked, thread locked */
256 if (th
== THREAD_NULL
)
260 * Found a receiver. see if they can handle the message
261 * correctly (the message is not too large for them, or
262 * they didn't care to be informed that the message was
263 * too large). If they can't handle it, take them off
264 * the list and let them go back and figure it out and
265 * just move onto the next.
268 kmsg
->ikm_header
->msgh_size
+
269 REQUESTED_TRAILER_SIZE(th
->ith_option
)) {
270 th
->ith_state
= MACH_RCV_TOO_LARGE
;
271 th
->ith_msize
= kmsg
->ikm_header
->msgh_size
;
272 if (th
->ith_option
& MACH_RCV_LARGE
) {
274 * let him go without message
276 th
->ith_kmsg
= IKM_NULL
;
279 continue; /* find another thread */
282 th
->ith_state
= MACH_MSG_SUCCESS
;
286 * This thread is going to take this message,
289 ipc_kmsg_rmqueue(kmsgq
, kmsg
);
290 ipc_mqueue_release_msgcount(port_mqueue
);
293 th
->ith_seqno
= port_mqueue
->imq_seqno
++;
295 break; /* go to next message */
300 imq_unlock(port_mqueue
);
306 * Routine: ipc_mqueue_changed
308 * Wake up receivers waiting in a message queue.
310 * The message queue is locked.
317 wait_queue_wakeup64_all_locked(
318 &mqueue
->imq_wait_queue
,
321 FALSE
); /* unlock waitq? */
328 * Routine: ipc_mqueue_send
330 * Send a message to a message queue. The message holds a reference
331 * for the destination port for this message queue in the
332 * msgh_remote_port field.
334 * If unsuccessful, the caller still has possession of
335 * the message and must do something with it. If successful,
336 * the message is queued, given to a receiver, or destroyed.
340 * MACH_MSG_SUCCESS The message was accepted.
341 * MACH_SEND_TIMED_OUT Caller still has message.
342 * MACH_SEND_INTERRUPTED Caller still has message.
348 mach_msg_option_t option
,
349 mach_msg_timeout_t send_timeout
)
356 * 1) We're under the queue limit.
357 * 2) Caller used the MACH_SEND_ALWAYS internal option.
358 * 3) Message is sent to a send-once right.
363 if (!imq_full(mqueue
) ||
364 (!imq_full_kernel(mqueue
) &&
365 ((option
& MACH_SEND_ALWAYS
) ||
366 (MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) ==
367 MACH_MSG_TYPE_PORT_SEND_ONCE
)))) {
368 mqueue
->imq_msgcount
++;
369 assert(mqueue
->imq_msgcount
> 0);
373 thread_t cur_thread
= current_thread();
377 * We have to wait for space to be granted to us.
379 if ((option
& MACH_SEND_TIMEOUT
) && (send_timeout
== 0)) {
382 return MACH_SEND_TIMED_OUT
;
384 if (imq_full_kernel(mqueue
)) {
387 return MACH_SEND_NO_BUFFER
;
389 mqueue
->imq_fullwaiters
= TRUE
;
390 thread_lock(cur_thread
);
391 if (option
& MACH_SEND_TIMEOUT
)
392 clock_interval_to_deadline(send_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
395 wresult
= wait_queue_assert_wait64_locked(
396 &mqueue
->imq_wait_queue
,
398 THREAD_ABORTSAFE
, deadline
,
400 thread_unlock(cur_thread
);
404 if (wresult
== THREAD_WAITING
) {
405 wresult
= thread_block(THREAD_CONTINUE_NULL
);
406 counter(c_ipc_mqueue_send_block
++);
410 case THREAD_TIMED_OUT
:
411 assert(option
& MACH_SEND_TIMEOUT
);
412 return MACH_SEND_TIMED_OUT
;
414 case THREAD_AWAKENED
:
415 /* we can proceed - inherited msgcount from waker */
416 assert(mqueue
->imq_msgcount
> 0);
419 case THREAD_INTERRUPTED
:
420 return MACH_SEND_INTERRUPTED
;
424 panic("ipc_mqueue_send");
428 ipc_mqueue_post(mqueue
, kmsg
);
429 return MACH_MSG_SUCCESS
;
433 * Routine: ipc_mqueue_release_msgcount
435 * Release a message queue reference in the case where we
439 * The message queue is locked.
440 * The message corresponding to this reference is off the queue.
443 ipc_mqueue_release_msgcount(
446 assert(imq_held(mqueue
));
447 assert(mqueue
->imq_msgcount
> 1 || ipc_kmsg_queue_empty(&mqueue
->imq_messages
));
449 mqueue
->imq_msgcount
--;
451 if (!imq_full(mqueue
) && mqueue
->imq_fullwaiters
) {
452 if (wait_queue_wakeup64_one_locked(
453 &mqueue
->imq_wait_queue
,
456 FALSE
) != KERN_SUCCESS
) {
457 mqueue
->imq_fullwaiters
= FALSE
;
459 /* gave away our slot - add reference back */
460 mqueue
->imq_msgcount
++;
466 * Routine: ipc_mqueue_post
468 * Post a message to a waiting receiver or enqueue it. If a
469 * receiver is waiting, we can release our reserved space in
473 * If we need to queue, our space in the message queue is reserved.
477 register ipc_mqueue_t mqueue
,
478 register ipc_kmsg_t kmsg
)
484 * While the msg queue is locked, we have control of the
485 * kmsg, so the ref in it for the port is still good.
487 * Check for a receiver for the message.
492 wait_queue_t waitq
= &mqueue
->imq_wait_queue
;
495 receiver
= wait_queue_wakeup64_identity_locked(
500 /* waitq still locked, thread locked */
502 if (receiver
== THREAD_NULL
) {
504 * no receivers; queue kmsg
506 assert(mqueue
->imq_msgcount
> 0);
507 ipc_kmsg_enqueue_macro(&mqueue
->imq_messages
, kmsg
);
512 * We found a waiting thread.
513 * If the message is too large or the scatter list is too small
514 * the thread we wake up will get that as its status.
516 if (receiver
->ith_msize
<
517 (kmsg
->ikm_header
->msgh_size
) +
518 REQUESTED_TRAILER_SIZE(receiver
->ith_option
)) {
519 receiver
->ith_msize
= kmsg
->ikm_header
->msgh_size
;
520 receiver
->ith_state
= MACH_RCV_TOO_LARGE
;
522 receiver
->ith_state
= MACH_MSG_SUCCESS
;
526 * If there is no problem with the upcoming receive, or the
527 * receiver thread didn't specifically ask for special too
528 * large error condition, go ahead and select it anyway.
530 if ((receiver
->ith_state
== MACH_MSG_SUCCESS
) ||
531 !(receiver
->ith_option
& MACH_RCV_LARGE
)) {
533 receiver
->ith_kmsg
= kmsg
;
534 receiver
->ith_seqno
= mqueue
->imq_seqno
++;
535 thread_unlock(receiver
);
537 /* we didn't need our reserved spot in the queue */
538 ipc_mqueue_release_msgcount(mqueue
);
543 * Otherwise, this thread needs to be released to run
544 * and handle its error without getting the message. We
545 * need to go back and pick another one.
547 receiver
->ith_kmsg
= IKM_NULL
;
548 receiver
->ith_seqno
= 0;
549 thread_unlock(receiver
);
555 current_task()->messages_sent
++;
561 ipc_mqueue_receive_results(wait_result_t saved_wait_result
)
563 thread_t self
= current_thread();
564 mach_msg_option_t option
= self
->ith_option
;
567 * why did we wake up?
569 switch (saved_wait_result
) {
570 case THREAD_TIMED_OUT
:
571 self
->ith_state
= MACH_RCV_TIMED_OUT
;
574 case THREAD_INTERRUPTED
:
575 self
->ith_state
= MACH_RCV_INTERRUPTED
;
579 /* something bad happened to the port/set */
580 self
->ith_state
= MACH_RCV_PORT_CHANGED
;
583 case THREAD_AWAKENED
:
585 * We do not need to go select a message, somebody
586 * handed us one (or a too-large indication).
588 switch (self
->ith_state
) {
589 case MACH_RCV_SCATTER_SMALL
:
590 case MACH_RCV_TOO_LARGE
:
592 * Somebody tried to give us a too large
593 * message. If we indicated that we cared,
594 * then they only gave us the indication,
595 * otherwise they gave us the indication
596 * AND the message anyway.
598 if (option
& MACH_RCV_LARGE
) {
602 case MACH_MSG_SUCCESS
:
606 panic("ipc_mqueue_receive_results: strange ith_state");
610 panic("ipc_mqueue_receive_results: strange wait_result");
615 ipc_mqueue_receive_continue(
616 __unused
void *param
,
617 wait_result_t wresult
)
619 ipc_mqueue_receive_results(wresult
);
620 mach_msg_receive_continue(); /* hard-coded for now */
624 * Routine: ipc_mqueue_receive
626 * Receive a message from a message queue.
628 * If continuation is non-zero, then we might discard
629 * our kernel stack when we block. We will continue
630 * after unblocking by executing continuation.
632 * If resume is true, then we are resuming a receive
633 * operation after a blocked receive discarded our stack.
635 * Our caller must hold a reference for the port or port set
636 * to which this queue belongs, to keep the queue
637 * from being deallocated.
639 * The kmsg is returned with clean header fields
640 * and with the circular bit turned off.
642 * MACH_MSG_SUCCESS Message returned in kmsgp.
643 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
644 * MACH_RCV_TIMED_OUT No message obtained.
645 * MACH_RCV_INTERRUPTED No message obtained.
646 * MACH_RCV_PORT_DIED Port/set died; no message.
647 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
654 mach_msg_option_t option
,
655 mach_msg_size_t max_size
,
656 mach_msg_timeout_t rcv_timeout
,
659 ipc_kmsg_queue_t kmsgs
;
660 wait_result_t wresult
;
672 self
= current_thread();
674 if (imq_is_set(mqueue
)) {
675 wait_queue_link_t wql
;
676 ipc_mqueue_t port_mq
;
679 q
= &mqueue
->imq_setlinks
;
682 * If we are waiting on a portset mqueue, we need to see if
683 * any of the member ports have work for us. If so, try to
684 * deliver one of those messages. By holding the portset's
685 * mqueue lock during the search, we tie up any attempts by
686 * mqueue_deliver or portset membership changes that may
687 * cross our path. But this is a lock order violation, so we
688 * have to do it "softly." If we don't find a message waiting
689 * for us, we will assert our intention to wait while still
690 * holding that lock. When we release the lock, the deliver/
691 * change will succeed and find us.
694 queue_iterate(q
, wql
, wait_queue_link_t
, wql_setlinks
) {
695 port_mq
= (ipc_mqueue_t
)wql
->wql_queue
;
696 kmsgs
= &port_mq
->imq_messages
;
698 if (!imq_lock_try(port_mq
)) {
704 goto search_set
; /* start again at beginning - SMP */
708 * If there is still a message to be had, we will
709 * try to select it (may not succeed because of size
710 * and options). In any case, we deliver those
711 * results back to the user.
713 * We also move the port's linkage to the tail of the
714 * list for this set (fairness). Future versions will
715 * sort by timestamp or priority.
717 if (ipc_kmsg_queue_first(kmsgs
) == IKM_NULL
) {
721 queue_remove(q
, wql
, wait_queue_link_t
, wql_setlinks
);
722 queue_enter(q
, wql
, wait_queue_link_t
, wql_setlinks
);
725 ipc_mqueue_select(port_mq
, option
, max_size
);
728 if (self
->ith_kmsg
!= NULL
&&
729 self
->ith_kmsg
->ikm_sender
!= NULL
) {
730 lh
= self
->ith_kmsg
->ikm_sender
->label
;
731 task
= current_task();
732 tasklabel_lock(task
);
733 ip_lock(lh
->lh_port
);
734 rc
= mac_port_check_receive(&task
->maclabel
,
736 ip_unlock(lh
->lh_port
);
737 tasklabel_unlock(task
);
739 self
->ith_state
= MACH_RCV_INVALID_DATA
;
750 * Receive on a single port. Just try to get the messages.
752 kmsgs
= &mqueue
->imq_messages
;
753 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
754 ipc_mqueue_select(mqueue
, option
, max_size
);
757 if (self
->ith_kmsg
!= NULL
&&
758 self
->ith_kmsg
->ikm_sender
!= NULL
) {
759 lh
= self
->ith_kmsg
->ikm_sender
->label
;
760 task
= current_task();
761 tasklabel_lock(task
);
762 ip_lock(lh
->lh_port
);
763 rc
= mac_port_check_receive(&task
->maclabel
,
765 ip_unlock(lh
->lh_port
);
766 tasklabel_unlock(task
);
768 self
->ith_state
= MACH_RCV_INVALID_DATA
;
777 * Looks like we'll have to block. The mqueue we will
778 * block on (whether the set's or the local port's) is
781 if (option
& MACH_RCV_TIMEOUT
) {
782 if (rcv_timeout
== 0) {
785 self
->ith_state
= MACH_RCV_TIMED_OUT
;
791 self
->ith_state
= MACH_RCV_IN_PROGRESS
;
792 self
->ith_option
= option
;
793 self
->ith_msize
= max_size
;
795 if (option
& MACH_RCV_TIMEOUT
)
796 clock_interval_to_deadline(rcv_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
800 wresult
= wait_queue_assert_wait64_locked(&mqueue
->imq_wait_queue
,
802 interruptible
, deadline
,
808 if (wresult
== THREAD_WAITING
) {
809 counter((interruptible
== THREAD_ABORTSAFE
) ?
810 c_ipc_mqueue_receive_block_user
++ :
811 c_ipc_mqueue_receive_block_kernel
++);
813 if (self
->ith_continuation
)
814 thread_block(ipc_mqueue_receive_continue
);
817 wresult
= thread_block(THREAD_CONTINUE_NULL
);
819 ipc_mqueue_receive_results(wresult
);
824 * Routine: ipc_mqueue_select
826 * A receiver discovered that there was a message on the queue
827 * before he had to block. Pick the message off the queue and
828 * "post" it to himself.
831 * There is a message.
833 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
834 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
839 mach_msg_option_t option
,
840 mach_msg_size_t max_size
)
842 thread_t self
= current_thread();
844 mach_msg_return_t mr
;
845 mach_msg_size_t rcv_size
;
847 mr
= MACH_MSG_SUCCESS
;
851 * Do some sanity checking of our ability to receive
852 * before pulling the message off the queue.
854 kmsg
= ipc_kmsg_queue_first(&mqueue
->imq_messages
);
855 assert(kmsg
!= IKM_NULL
);
858 * If we really can't receive it, but we had the
859 * MACH_RCV_LARGE option set, then don't take it off
860 * the queue, instead return the appropriate error
863 rcv_size
= ipc_kmsg_copyout_size(kmsg
, self
->map
);
864 if (rcv_size
+ REQUESTED_TRAILER_SIZE(option
) > max_size
) {
865 mr
= MACH_RCV_TOO_LARGE
;
866 if (option
& MACH_RCV_LARGE
) {
867 self
->ith_kmsg
= IKM_NULL
;
868 self
->ith_msize
= rcv_size
;
870 self
->ith_state
= mr
;
875 ipc_kmsg_rmqueue_first_macro(&mqueue
->imq_messages
, kmsg
);
876 ipc_mqueue_release_msgcount(mqueue
);
877 self
->ith_seqno
= mqueue
->imq_seqno
++;
878 self
->ith_kmsg
= kmsg
;
879 self
->ith_state
= mr
;
881 current_task()->messages_received
++;
886 * Routine: ipc_mqueue_destroy
888 * Destroy a message queue. Set any blocked senders running.
889 * Destroy the kmsgs in the queue.
892 * Receivers were removed when the receive right was "changed"
898 ipc_kmsg_queue_t kmqueue
;
906 * rouse all blocked senders
908 mqueue
->imq_fullwaiters
= FALSE
;
909 wait_queue_wakeup64_all_locked(
910 &mqueue
->imq_wait_queue
,
915 kmqueue
= &mqueue
->imq_messages
;
917 while ((kmsg
= ipc_kmsg_dequeue(kmqueue
)) != IKM_NULL
) {
921 ipc_kmsg_destroy_dest(kmsg
);
931 * Routine: ipc_mqueue_set_qlimit
933 * Changes a message queue limit; the maximum number
934 * of messages which may be queued.
940 ipc_mqueue_set_qlimit(
942 mach_port_msgcount_t qlimit
)
946 assert(qlimit
<= MACH_PORT_QLIMIT_MAX
);
948 /* wake up senders allowed by the new qlimit */
951 if (qlimit
> mqueue
->imq_qlimit
) {
952 mach_port_msgcount_t i
, wakeup
;
954 /* caution: wakeup, qlimit are unsigned */
955 wakeup
= qlimit
- mqueue
->imq_qlimit
;
957 for (i
= 0; i
< wakeup
; i
++) {
958 if (wait_queue_wakeup64_one_locked(
959 &mqueue
->imq_wait_queue
,
962 FALSE
) == KERN_NOT_WAITING
) {
963 mqueue
->imq_fullwaiters
= FALSE
;
966 mqueue
->imq_msgcount
++; /* give it to the awakened thread */
969 mqueue
->imq_qlimit
= qlimit
;
975 * Routine: ipc_mqueue_set_seqno
977 * Changes an mqueue's sequence number.
979 * Caller holds a reference to the queue's containing object.
982 ipc_mqueue_set_seqno(
984 mach_port_seqno_t seqno
)
990 mqueue
->imq_seqno
= seqno
;
997 * Routine: ipc_mqueue_copyin
999 * Convert a name in a space to a message queue.
1001 * Nothing locked. If successful, the caller gets a ref for
1002 * for the object. This ref ensures the continued existence of
1005 * MACH_MSG_SUCCESS Found a message queue.
1006 * MACH_RCV_INVALID_NAME The space is dead.
1007 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1008 * MACH_RCV_INVALID_NAME
1009 * The denoted right is not receive or port set.
1010 * MACH_RCV_IN_SET Receive right is a member of a set.
1016 mach_port_name_t name
,
1017 ipc_mqueue_t
*mqueuep
,
1018 ipc_object_t
*objectp
)
1021 ipc_object_t object
;
1022 ipc_mqueue_t mqueue
;
1024 is_read_lock(space
);
1025 if (!space
->is_active
) {
1026 is_read_unlock(space
);
1027 return MACH_RCV_INVALID_NAME
;
1030 entry
= ipc_entry_lookup(space
, name
);
1031 if (entry
== IE_NULL
) {
1032 is_read_unlock(space
);
1033 return MACH_RCV_INVALID_NAME
;
1036 object
= entry
->ie_object
;
1038 if (entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) {
1041 port
= (ipc_port_t
) object
;
1042 assert(port
!= IP_NULL
);
1045 assert(ip_active(port
));
1046 assert(port
->ip_receiver_name
== name
);
1047 assert(port
->ip_receiver
== space
);
1048 is_read_unlock(space
);
1049 mqueue
= &port
->ip_messages
;
1051 } else if (entry
->ie_bits
& MACH_PORT_TYPE_PORT_SET
) {
1054 pset
= (ipc_pset_t
) object
;
1055 assert(pset
!= IPS_NULL
);
1058 assert(ips_active(pset
));
1059 assert(pset
->ips_local_name
== name
);
1060 is_read_unlock(space
);
1062 mqueue
= &pset
->ips_messages
;
1064 is_read_unlock(space
);
1065 return MACH_RCV_INVALID_NAME
;
1069 * At this point, the object is locked and active,
1070 * the space is unlocked, and mqueue is initialized.
1073 io_reference(object
);
1078 return MACH_MSG_SUCCESS
;