2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_mqueue.c
63 * Functions to manipulate IPC message queues.
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/waitq.h>
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_space.h>
94 #include <ipc/flipc.h>
98 #include <vm/vm_map.h>
101 #include <sys/event.h>
103 extern char *proc_name_address(void *p
);
105 int ipc_mqueue_full
; /* address is event for queue space */
106 int ipc_mqueue_rcv
; /* address is event for message arrival */
108 /* forward declarations */
109 void ipc_mqueue_receive_results(wait_result_t result
);
110 static void ipc_mqueue_peek_on_thread(
111 ipc_mqueue_t port_mq
,
112 mach_msg_option_t option
,
116 * Routine: ipc_mqueue_init
118 * Initialize a newly-allocated message queue.
124 uint64_t *reserved_link
)
127 waitq_set_init(&mqueue
->imq_set_queue
,
128 SYNC_POLICY_FIFO
|SYNC_POLICY_PREPOST
,
129 reserved_link
, NULL
);
131 waitq_init(&mqueue
->imq_wait_queue
, SYNC_POLICY_FIFO
);
132 ipc_kmsg_queue_init(&mqueue
->imq_messages
);
133 mqueue
->imq_seqno
= 0;
134 mqueue
->imq_msgcount
= 0;
135 mqueue
->imq_qlimit
= MACH_PORT_QLIMIT_DEFAULT
;
136 mqueue
->imq_fullwaiters
= FALSE
;
138 mqueue
->imq_fport
= FPORT_NULL
;
141 klist_init(&mqueue
->imq_klist
);
144 void ipc_mqueue_deinit(
147 boolean_t is_set
= imq_is_set(mqueue
);
150 waitq_set_deinit(&mqueue
->imq_set_queue
);
152 waitq_deinit(&mqueue
->imq_wait_queue
);
156 * Routine: imq_reserve_and_lock
158 * Atomically lock an ipc_mqueue_t object and reserve
159 * an appropriate number of prepost linkage objects for
160 * use in wakeup operations.
165 imq_reserve_and_lock(ipc_mqueue_t mq
, uint64_t *reserved_prepost
)
167 *reserved_prepost
= waitq_prepost_reserve(&mq
->imq_wait_queue
, 0,
174 * Routine: imq_release_and_unlock
176 * Unlock an ipc_mqueue_t object, re-enable interrupts,
177 * and release any unused prepost object reservations.
182 imq_release_and_unlock(ipc_mqueue_t mq
, uint64_t reserved_prepost
)
184 assert(imq_held(mq
));
185 waitq_unlock(&mq
->imq_wait_queue
);
186 waitq_prepost_release_reserve(reserved_prepost
);
191 * Routine: ipc_mqueue_member
193 * Indicate whether the (port) mqueue is a member of
194 * this portset's mqueue. We do this by checking
195 * whether the portset mqueue's waitq is an member of
196 * the port's mqueue waitq.
198 * the portset's mqueue is not already a member
199 * this may block while allocating linkage structures.
204 ipc_mqueue_t port_mqueue
,
205 ipc_mqueue_t set_mqueue
)
207 struct waitq
*port_waitq
= &port_mqueue
->imq_wait_queue
;
208 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
210 return waitq_member(port_waitq
, set_waitq
);
215 * Routine: ipc_mqueue_remove
217 * Remove the association between the queue and the specified
224 ipc_mqueue_t set_mqueue
)
226 struct waitq
*mq_waitq
= &mqueue
->imq_wait_queue
;
227 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
229 return waitq_unlink(mq_waitq
, set_waitq
);
233 * Routine: ipc_mqueue_remove_from_all
235 * Remove the mqueue from all the sets it is a member of
239 * mqueue unlocked and set links deallocated
242 ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue
)
244 struct waitq
*mq_waitq
= &mqueue
->imq_wait_queue
;
249 assert(waitq_valid(mq_waitq
));
250 kr
= waitq_unlink_all_unlock(mq_waitq
);
251 /* mqueue unlocked and set links deallocated */
255 * Routine: ipc_mqueue_remove_all
257 * Remove all the member queues from the specified set.
258 * Also removes the queue from any containing sets.
262 * mqueue unlocked all set links deallocated
265 ipc_mqueue_remove_all(ipc_mqueue_t mqueue
)
267 struct waitq_set
*mq_setq
= &mqueue
->imq_set_queue
;
270 assert(waitqs_is_set(mq_setq
));
271 waitq_set_unlink_all_unlock(mq_setq
);
272 /* mqueue unlocked set links deallocated */
277 * Routine: ipc_mqueue_add
279 * Associate the portset's mqueue with the port's mqueue.
280 * This has to be done so that posting the port will wakeup
281 * a portset waiter. If there are waiters on the portset
282 * mqueue and messages on the port mqueue, try to match them
289 ipc_mqueue_t port_mqueue
,
290 ipc_mqueue_t set_mqueue
,
291 uint64_t *reserved_link
,
292 uint64_t *reserved_prepost
)
294 struct waitq
*port_waitq
= &port_mqueue
->imq_wait_queue
;
295 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
296 ipc_kmsg_queue_t kmsgq
;
297 ipc_kmsg_t kmsg
, next
;
300 assert(reserved_link
&& *reserved_link
!= 0);
302 imq_lock(port_mqueue
);
305 * The link operation is now under the same lock-hold as
306 * message iteration and thread wakeup, but doesn't have to be...
308 kr
= waitq_link(port_waitq
, set_waitq
, WAITQ_ALREADY_LOCKED
, reserved_link
);
309 if (kr
!= KERN_SUCCESS
) {
310 imq_unlock(port_mqueue
);
315 * Now that the set has been added to the port, there may be
316 * messages queued on the port and threads waiting on the set
317 * waitq. Lets get them together.
319 kmsgq
= &port_mqueue
->imq_messages
;
320 for (kmsg
= ipc_kmsg_queue_first(kmsgq
);
323 next
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
327 mach_msg_size_t msize
;
330 th
= waitq_wakeup64_identify_locked(
333 THREAD_AWAKENED
, &th_spl
,
334 reserved_prepost
, WAITQ_ALL_PRIORITIES
,
336 /* waitq/mqueue still locked, thread locked */
338 if (th
== THREAD_NULL
)
342 * If the receiver waited with a facility not directly
343 * related to Mach messaging, then it isn't prepared to get
344 * handed the message directly. Just set it running, and
345 * go look for another thread that can.
347 if (th
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
348 if (th
->ith_state
== MACH_PEEK_IN_PROGRESS
) {
350 * wakeup the peeking thread, but
351 * continue to loop over the threads
352 * waiting on the port's mqueue to see
353 * if there are any actual receivers
355 ipc_mqueue_peek_on_thread(port_mqueue
,
365 * Found a receiver. see if they can handle the message
366 * correctly (the message is not too large for them, or
367 * they didn't care to be informed that the message was
368 * too large). If they can't handle it, take them off
369 * the list and let them go back and figure it out and
370 * just move onto the next.
372 msize
= ipc_kmsg_copyout_size(kmsg
, th
->map
);
374 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(th
), th
->ith_option
))) {
375 th
->ith_state
= MACH_RCV_TOO_LARGE
;
376 th
->ith_msize
= msize
;
377 if (th
->ith_option
& MACH_RCV_LARGE
) {
379 * let him go without message
381 th
->ith_receiver_name
= port_mqueue
->imq_receiver_name
;
382 th
->ith_kmsg
= IKM_NULL
;
386 continue; /* find another thread */
389 th
->ith_state
= MACH_MSG_SUCCESS
;
393 * This thread is going to take this message,
396 ipc_kmsg_rmqueue(kmsgq
, kmsg
);
398 mach_node_t node
= kmsg
->ikm_node
;
400 ipc_mqueue_release_msgcount(port_mqueue
, IMQ_NULL
);
403 th
->ith_seqno
= port_mqueue
->imq_seqno
++;
407 if (MACH_NODE_VALID(node
) && FPORT_VALID(port_mqueue
->imq_fport
))
408 flipc_msg_ack(node
, port_mqueue
, TRUE
);
410 break; /* go to next message */
414 imq_unlock(port_mqueue
);
419 * Routine: ipc_mqueue_changed
421 * Wake up receivers waiting in a message queue.
423 * The message queue is locked.
430 /* Indicate that this message queue is vanishing */
431 knote_vanish(&mqueue
->imq_klist
);
433 waitq_wakeup64_all_locked(&mqueue
->imq_wait_queue
,
437 WAITQ_ALL_PRIORITIES
,
445 * Routine: ipc_mqueue_send
447 * Send a message to a message queue. The message holds a reference
448 * for the destination port for this message queue in the
449 * msgh_remote_port field.
451 * If unsuccessful, the caller still has possession of
452 * the message and must do something with it. If successful,
453 * the message is queued, given to a receiver, or destroyed.
457 * MACH_MSG_SUCCESS The message was accepted.
458 * MACH_SEND_TIMED_OUT Caller still has message.
459 * MACH_SEND_INTERRUPTED Caller still has message.
465 mach_msg_option_t option
,
466 mach_msg_timeout_t send_timeout
)
472 * 1) We're under the queue limit.
473 * 2) Caller used the MACH_SEND_ALWAYS internal option.
474 * 3) Message is sent to a send-once right.
476 if (!imq_full(mqueue
) ||
477 (!imq_full_kernel(mqueue
) &&
478 ((option
& MACH_SEND_ALWAYS
) ||
479 (MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) ==
480 MACH_MSG_TYPE_PORT_SEND_ONCE
)))) {
481 mqueue
->imq_msgcount
++;
482 assert(mqueue
->imq_msgcount
> 0);
485 thread_t cur_thread
= current_thread();
489 * We have to wait for space to be granted to us.
491 if ((option
& MACH_SEND_TIMEOUT
) && (send_timeout
== 0)) {
493 return MACH_SEND_TIMED_OUT
;
495 if (imq_full_kernel(mqueue
)) {
497 return MACH_SEND_NO_BUFFER
;
499 mqueue
->imq_fullwaiters
= TRUE
;
501 if (option
& MACH_SEND_TIMEOUT
)
502 clock_interval_to_deadline(send_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
506 thread_set_pending_block_hint(cur_thread
, kThreadWaitPortSend
);
507 wresult
= waitq_assert_wait64_locked(
508 &mqueue
->imq_wait_queue
,
511 TIMEOUT_URGENCY_USER_NORMAL
,
512 deadline
, TIMEOUT_NO_LEEWAY
,
517 if (wresult
== THREAD_WAITING
) {
518 wresult
= thread_block(THREAD_CONTINUE_NULL
);
519 counter(c_ipc_mqueue_send_block
++);
524 case THREAD_AWAKENED
:
526 * we can proceed - inherited msgcount from waker
527 * or the message queue has been destroyed and the msgcount
528 * has been reset to zero (will detect in ipc_mqueue_post()).
532 case THREAD_TIMED_OUT
:
533 assert(option
& MACH_SEND_TIMEOUT
);
534 return MACH_SEND_TIMED_OUT
;
536 case THREAD_INTERRUPTED
:
537 return MACH_SEND_INTERRUPTED
;
540 /* mqueue is being destroyed */
541 return MACH_SEND_INVALID_DEST
;
543 panic("ipc_mqueue_send");
547 ipc_mqueue_post(mqueue
, kmsg
, option
);
548 return MACH_MSG_SUCCESS
;
552 * Routine: ipc_mqueue_override_send
554 * Set an override qos on the first message in the queue
555 * (if the queue is full). This is a send-possible override
556 * that will go away as soon as we drain a message from the
560 * The message queue is not locked.
561 * The caller holds a reference on the message queue.
563 extern void ipc_mqueue_override_send(
565 mach_msg_priority_t override
)
567 boolean_t __unused full_queue_empty
= FALSE
;
570 assert(imq_valid(mqueue
));
571 assert(!imq_is_set(mqueue
));
573 if (imq_full(mqueue
)) {
574 ipc_kmsg_t first
= ipc_kmsg_queue_first(&mqueue
->imq_messages
);
576 if (first
&& ipc_kmsg_override_qos(&mqueue
->imq_messages
, first
, override
))
577 KNOTE(&mqueue
->imq_klist
, 0);
579 full_queue_empty
= TRUE
;
583 #if DEVELOPMENT || DEBUG
584 if (full_queue_empty
) {
585 ipc_port_t port
= ip_from_mq(mqueue
);
587 if (ip_active(port
) && !port
->ip_tempowner
&&
588 port
->ip_receiver_name
&& port
->ip_receiver
&&
589 port
->ip_receiver
!= ipc_space_kernel
) {
590 dst_pid
= task_pid(port
->ip_receiver
->is_task
);
597 * Routine: ipc_mqueue_release_msgcount
599 * Release a message queue reference in the case where we
603 * The message queue is locked.
604 * The message corresponding to this reference is off the queue.
605 * There is no need to pass reserved preposts because this will
606 * never prepost to anyone
609 ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq
, ipc_mqueue_t set_mq
)
612 assert(imq_held(port_mq
));
613 assert(port_mq
->imq_msgcount
> 1 || ipc_kmsg_queue_empty(&port_mq
->imq_messages
));
615 port_mq
->imq_msgcount
--;
617 if (!imq_full(port_mq
) && port_mq
->imq_fullwaiters
) {
619 * boost the priority of the awoken thread
620 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
621 * the message queue slot we've just reserved.
623 * NOTE: this will never prepost
625 if (waitq_wakeup64_one_locked(&port_mq
->imq_wait_queue
,
629 WAITQ_PROMOTE_PRIORITY
,
630 WAITQ_KEEP_LOCKED
) != KERN_SUCCESS
) {
631 port_mq
->imq_fullwaiters
= FALSE
;
633 /* gave away our slot - add reference back */
634 port_mq
->imq_msgcount
++;
638 if (ipc_kmsg_queue_empty(&port_mq
->imq_messages
)) {
639 /* no more msgs: invalidate the port's prepost object */
640 waitq_clear_prepost_locked(&port_mq
->imq_wait_queue
);
645 * Routine: ipc_mqueue_post
647 * Post a message to a waiting receiver or enqueue it. If a
648 * receiver is waiting, we can release our reserved space in
653 * If we need to queue, our space in the message queue is reserved.
659 mach_msg_option_t __unused option
)
661 uint64_t reserved_prepost
= 0;
662 boolean_t destroy_msg
= FALSE
;
664 ipc_kmsg_trace_send(kmsg
, option
);
667 * While the msg queue is locked, we have control of the
668 * kmsg, so the ref in it for the port is still good.
670 * Check for a receiver for the message.
672 imq_reserve_and_lock(mqueue
, &reserved_prepost
);
674 /* we may have raced with port destruction! */
675 if (!imq_valid(mqueue
)) {
681 struct waitq
*waitq
= &mqueue
->imq_wait_queue
;
684 mach_msg_size_t msize
;
686 receiver
= waitq_wakeup64_identify_locked(waitq
,
691 WAITQ_ALL_PRIORITIES
,
693 /* waitq still locked, thread locked */
695 if (receiver
== THREAD_NULL
) {
698 * no receivers; queue kmsg if space still reserved
699 * Reservations are cancelled when the port goes inactive.
700 * note that this will enqueue the message for any
701 * "peeking" receivers.
703 * Also, post the knote to wake up any threads waiting
704 * on that style of interface if this insertion is of
705 * note (first insertion, or adjusted override qos all
706 * the way to the head of the queue).
708 * This is just for ports. portset knotes are stay-active,
709 * and their threads get awakened through the !MACH_RCV_IN_PROGRESS
712 if (mqueue
->imq_msgcount
> 0) {
713 if (ipc_kmsg_enqueue_qos(&mqueue
->imq_messages
, kmsg
))
714 KNOTE(&mqueue
->imq_klist
, 0);
719 * Otherwise, the message queue must belong to an inactive
720 * port, so just destroy the message and pretend it was posted.
727 * If a thread is attempting a "peek" into the message queue
728 * (MACH_PEEK_IN_PROGRESS), then we enqueue the message and set the
729 * thread running. A successful peek is essentially the same as
730 * message delivery since the peeking thread takes responsibility
731 * for delivering the message and (eventually) removing it from
732 * the mqueue. Only one thread can successfully use the peek
733 * facility on any given port, so we exit the waitq loop after
734 * encountering such a thread.
736 if (receiver
->ith_state
== MACH_PEEK_IN_PROGRESS
&& mqueue
->imq_msgcount
> 0) {
737 ipc_kmsg_enqueue_qos(&mqueue
->imq_messages
, kmsg
);
738 ipc_mqueue_peek_on_thread(mqueue
, receiver
->ith_option
, receiver
);
739 thread_unlock(receiver
);
741 break; /* Message was posted, so break out of loop */
745 * If the receiver waited with a facility not directly related
746 * to Mach messaging, then it isn't prepared to get handed the
747 * message directly. Just set it running, and go look for
748 * another thread that can.
750 if (receiver
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
751 thread_unlock(receiver
);
758 * We found a waiting thread.
759 * If the message is too large or the scatter list is too small
760 * the thread we wake up will get that as its status.
762 msize
= ipc_kmsg_copyout_size(kmsg
, receiver
->map
);
763 if (receiver
->ith_rsize
<
764 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver
), receiver
->ith_option
))) {
765 receiver
->ith_msize
= msize
;
766 receiver
->ith_state
= MACH_RCV_TOO_LARGE
;
768 receiver
->ith_state
= MACH_MSG_SUCCESS
;
772 * If there is no problem with the upcoming receive, or the
773 * receiver thread didn't specifically ask for special too
774 * large error condition, go ahead and select it anyway.
776 if ((receiver
->ith_state
== MACH_MSG_SUCCESS
) ||
777 !(receiver
->ith_option
& MACH_RCV_LARGE
)) {
778 receiver
->ith_kmsg
= kmsg
;
779 receiver
->ith_seqno
= mqueue
->imq_seqno
++;
781 mach_node_t node
= kmsg
->ikm_node
;
783 thread_unlock(receiver
);
786 /* we didn't need our reserved spot in the queue */
787 ipc_mqueue_release_msgcount(mqueue
, IMQ_NULL
);
790 if (MACH_NODE_VALID(node
) && FPORT_VALID(mqueue
->imq_fport
))
791 flipc_msg_ack(node
, mqueue
, TRUE
);
797 * Otherwise, this thread needs to be released to run
798 * and handle its error without getting the message. We
799 * need to go back and pick another one.
801 receiver
->ith_receiver_name
= mqueue
->imq_receiver_name
;
802 receiver
->ith_kmsg
= IKM_NULL
;
803 receiver
->ith_seqno
= 0;
804 thread_unlock(receiver
);
809 /* clear the waitq boost we may have been given */
810 waitq_clear_promotion_locked(&mqueue
->imq_wait_queue
, current_thread());
811 imq_release_and_unlock(mqueue
, reserved_prepost
);
813 ipc_kmsg_destroy(kmsg
);
815 current_task()->messages_sent
++;
821 ipc_mqueue_receive_results(wait_result_t saved_wait_result
)
823 thread_t self
= current_thread();
824 mach_msg_option_t option
= self
->ith_option
;
827 * why did we wake up?
829 switch (saved_wait_result
) {
830 case THREAD_TIMED_OUT
:
831 self
->ith_state
= MACH_RCV_TIMED_OUT
;
834 case THREAD_INTERRUPTED
:
835 self
->ith_state
= MACH_RCV_INTERRUPTED
;
839 /* something bad happened to the port/set */
840 self
->ith_state
= MACH_RCV_PORT_CHANGED
;
843 case THREAD_AWAKENED
:
845 * We do not need to go select a message, somebody
846 * handed us one (or a too-large indication).
848 switch (self
->ith_state
) {
849 case MACH_RCV_SCATTER_SMALL
:
850 case MACH_RCV_TOO_LARGE
:
852 * Somebody tried to give us a too large
853 * message. If we indicated that we cared,
854 * then they only gave us the indication,
855 * otherwise they gave us the indication
856 * AND the message anyway.
858 if (option
& MACH_RCV_LARGE
) {
862 case MACH_MSG_SUCCESS
:
863 case MACH_PEEK_READY
:
867 panic("ipc_mqueue_receive_results: strange ith_state");
871 panic("ipc_mqueue_receive_results: strange wait_result");
876 ipc_mqueue_receive_continue(
877 __unused
void *param
,
878 wait_result_t wresult
)
880 ipc_mqueue_receive_results(wresult
);
881 mach_msg_receive_continue(); /* hard-coded for now */
885 * Routine: ipc_mqueue_receive
887 * Receive a message from a message queue.
890 * Our caller must hold a reference for the port or port set
891 * to which this queue belongs, to keep the queue
892 * from being deallocated.
894 * The kmsg is returned with clean header fields
895 * and with the circular bit turned off through the ith_kmsg
896 * field of the thread's receive continuation state.
898 * MACH_MSG_SUCCESS Message returned in ith_kmsg.
899 * MACH_RCV_TOO_LARGE Message size returned in ith_msize.
900 * MACH_RCV_TIMED_OUT No message obtained.
901 * MACH_RCV_INTERRUPTED No message obtained.
902 * MACH_RCV_PORT_DIED Port/set died; no message.
903 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
910 mach_msg_option_t option
,
911 mach_msg_size_t max_size
,
912 mach_msg_timeout_t rcv_timeout
,
915 wait_result_t wresult
;
916 thread_t self
= current_thread();
919 wresult
= ipc_mqueue_receive_on_thread(mqueue
, option
, max_size
,
920 rcv_timeout
, interruptible
,
922 /* mqueue unlocked */
923 if (wresult
== THREAD_NOT_WAITING
)
926 if (wresult
== THREAD_WAITING
) {
927 counter((interruptible
== THREAD_ABORTSAFE
) ?
928 c_ipc_mqueue_receive_block_user
++ :
929 c_ipc_mqueue_receive_block_kernel
++);
931 if (self
->ith_continuation
)
932 thread_block(ipc_mqueue_receive_continue
);
935 wresult
= thread_block(THREAD_CONTINUE_NULL
);
937 ipc_mqueue_receive_results(wresult
);
940 static int mqueue_process_prepost_receive(void *ctx
, struct waitq
*waitq
,
941 struct waitq_set
*wqset
)
943 ipc_mqueue_t port_mq
, *pmq_ptr
;
946 port_mq
= (ipc_mqueue_t
)waitq
;
949 * If there are no messages on this queue, skip it and remove
950 * it from the prepost list
952 if (ipc_kmsg_queue_empty(&port_mq
->imq_messages
))
953 return WQ_ITERATE_INVALIDATE_CONTINUE
;
956 * There are messages waiting on this port.
957 * Instruct the prepost iteration logic to break, but keep the
960 pmq_ptr
= (ipc_mqueue_t
*)ctx
;
963 return WQ_ITERATE_BREAK_KEEP_LOCKED
;
967 * Routine: ipc_mqueue_receive_on_thread
969 * Receive a message from a message queue using a specified thread.
970 * If no message available, assert_wait on the appropriate waitq.
973 * Assumes thread is self.
974 * Called with mqueue locked.
975 * Returns with mqueue unlocked.
976 * May have assert-waited. Caller must block in those cases.
979 ipc_mqueue_receive_on_thread(
981 mach_msg_option_t option
,
982 mach_msg_size_t max_size
,
983 mach_msg_timeout_t rcv_timeout
,
987 wait_result_t wresult
;
990 /* called with mqueue locked */
992 /* no need to reserve anything: we never prepost to anyone */
994 if (!imq_valid(mqueue
)) {
995 /* someone raced us to destroy this mqueue/port! */
998 * ipc_mqueue_receive_results updates the thread's ith_state
999 * TODO: differentiate between rights being moved and
1000 * rights/ports being destroyed (21885327)
1002 return THREAD_RESTART
;
1005 if (imq_is_set(mqueue
)) {
1006 ipc_mqueue_t port_mq
= IMQ_NULL
;
1008 (void)waitq_set_iterate_preposts(&mqueue
->imq_set_queue
,
1010 mqueue_process_prepost_receive
);
1012 if (port_mq
!= IMQ_NULL
) {
1014 * We get here if there is at least one message
1015 * waiting on port_mq. We have instructed the prepost
1016 * iteration logic to leave both the port_mq and the
1017 * set mqueue locked.
1019 * TODO: previously, we would place this port at the
1020 * back of the prepost list...
1025 * Continue on to handling the message with just
1026 * the port mqueue locked.
1028 if (option
& MACH_PEEK_MSG
)
1029 ipc_mqueue_peek_on_thread(port_mq
, option
, thread
);
1031 ipc_mqueue_select_on_thread(port_mq
, mqueue
, option
,
1034 imq_unlock(port_mq
);
1035 return THREAD_NOT_WAITING
;
1037 } else if (imq_is_queue(mqueue
)) {
1038 ipc_kmsg_queue_t kmsgs
;
1041 * Receive on a single port. Just try to get the messages.
1043 kmsgs
= &mqueue
->imq_messages
;
1044 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
1045 if (option
& MACH_PEEK_MSG
)
1046 ipc_mqueue_peek_on_thread(mqueue
, option
, thread
);
1048 ipc_mqueue_select_on_thread(mqueue
, IMQ_NULL
, option
,
1051 return THREAD_NOT_WAITING
;
1054 panic("Unknown mqueue type 0x%x: likely memory corruption!\n",
1055 mqueue
->imq_wait_queue
.waitq_type
);
1059 * Looks like we'll have to block. The mqueue we will
1060 * block on (whether the set's or the local port's) is
1063 if (option
& MACH_RCV_TIMEOUT
) {
1064 if (rcv_timeout
== 0) {
1066 thread
->ith_state
= MACH_RCV_TIMED_OUT
;
1067 return THREAD_NOT_WAITING
;
1071 thread
->ith_option
= option
;
1072 thread
->ith_rsize
= max_size
;
1073 thread
->ith_msize
= 0;
1075 if (option
& MACH_PEEK_MSG
)
1076 thread
->ith_state
= MACH_PEEK_IN_PROGRESS
;
1078 thread
->ith_state
= MACH_RCV_IN_PROGRESS
;
1080 if (option
& MACH_RCV_TIMEOUT
)
1081 clock_interval_to_deadline(rcv_timeout
, 1000*NSEC_PER_USEC
, &deadline
);
1085 thread_set_pending_block_hint(thread
, kThreadWaitPortReceive
);
1086 wresult
= waitq_assert_wait64_locked(&mqueue
->imq_wait_queue
,
1089 TIMEOUT_URGENCY_USER_NORMAL
,
1093 /* preposts should be detected above, not here */
1094 if (wresult
== THREAD_AWAKENED
)
1095 panic("ipc_mqueue_receive_on_thread: sleep walking");
1104 * Routine: ipc_mqueue_peek_on_thread
1106 * A receiver discovered that there was a message on the queue
1107 * before he had to block. Tell a thread about the message queue,
1108 * but don't pick off any messages.
1111 * at least one message on port_mq's message queue
1113 * Returns: (on thread->ith_state)
1114 * MACH_PEEK_READY ith_peekq contains a message queue
1117 ipc_mqueue_peek_on_thread(
1118 ipc_mqueue_t port_mq
,
1119 mach_msg_option_t option
,
1123 assert(option
& MACH_PEEK_MSG
);
1124 assert(ipc_kmsg_queue_first(&port_mq
->imq_messages
) != IKM_NULL
);
1127 * Take a reference on the mqueue's associated port:
1128 * the peeking thread will be responsible to release this reference
1129 * using ip_release_mq()
1131 ip_reference_mq(port_mq
);
1132 thread
->ith_peekq
= port_mq
;
1133 thread
->ith_state
= MACH_PEEK_READY
;
1137 * Routine: ipc_mqueue_select_on_thread
1139 * A receiver discovered that there was a message on the queue
1140 * before he had to block. Pick the message off the queue and
1141 * "post" it to thread.
1144 * thread not locked.
1145 * There is a message.
1146 * No need to reserve prepost objects - it will never prepost
1149 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
1150 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
1153 ipc_mqueue_select_on_thread(
1154 ipc_mqueue_t port_mq
,
1155 ipc_mqueue_t set_mq
,
1156 mach_msg_option_t option
,
1157 mach_msg_size_t max_size
,
1161 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
1162 mach_msg_size_t msize
;
1165 * Do some sanity checking of our ability to receive
1166 * before pulling the message off the queue.
1168 kmsg
= ipc_kmsg_queue_first(&port_mq
->imq_messages
);
1169 assert(kmsg
!= IKM_NULL
);
1172 * If we really can't receive it, but we had the
1173 * MACH_RCV_LARGE option set, then don't take it off
1174 * the queue, instead return the appropriate error
1175 * (and size needed).
1177 msize
= ipc_kmsg_copyout_size(kmsg
, thread
->map
);
1178 if (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit(thread
), option
) > max_size
) {
1179 mr
= MACH_RCV_TOO_LARGE
;
1180 if (option
& MACH_RCV_LARGE
) {
1181 thread
->ith_receiver_name
= port_mq
->imq_receiver_name
;
1182 thread
->ith_kmsg
= IKM_NULL
;
1183 thread
->ith_msize
= msize
;
1184 thread
->ith_seqno
= 0;
1185 thread
->ith_state
= mr
;
1190 ipc_kmsg_rmqueue(&port_mq
->imq_messages
, kmsg
);
1192 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port_mq
->imq_fport
))
1193 flipc_msg_ack(kmsg
->ikm_node
, port_mq
, TRUE
);
1195 ipc_mqueue_release_msgcount(port_mq
, set_mq
);
1196 thread
->ith_seqno
= port_mq
->imq_seqno
++;
1197 thread
->ith_kmsg
= kmsg
;
1198 thread
->ith_state
= mr
;
1200 current_task()->messages_received
++;
1205 * Routine: ipc_mqueue_peek_locked
1207 * Peek at a (non-set) message queue to see if it has a message
1208 * matching the sequence number provided (if zero, then the
1209 * first message in the queue) and return vital info about the
1213 * The ipc_mqueue_t is locked by callers.
1214 * Other locks may be held by callers, so this routine cannot block.
1215 * Caller holds reference on the message queue.
1218 ipc_mqueue_peek_locked(ipc_mqueue_t mq
,
1219 mach_port_seqno_t
* seqnop
,
1220 mach_msg_size_t
* msg_sizep
,
1221 mach_msg_id_t
* msg_idp
,
1222 mach_msg_max_trailer_t
* msg_trailerp
,
1225 ipc_kmsg_queue_t kmsgq
;
1227 mach_port_seqno_t seqno
, msgoff
;
1230 assert(!imq_is_set(mq
));
1237 seqno
= mq
->imq_seqno
;
1239 } else if (seqno
>= mq
->imq_seqno
&&
1240 seqno
< mq
->imq_seqno
+ mq
->imq_msgcount
) {
1241 msgoff
= seqno
- mq
->imq_seqno
;
1245 /* look for the message that would match that seqno */
1246 kmsgq
= &mq
->imq_messages
;
1247 kmsg
= ipc_kmsg_queue_first(kmsgq
);
1248 while (msgoff
-- && kmsg
!= IKM_NULL
) {
1249 kmsg
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
1251 if (kmsg
== IKM_NULL
)
1254 /* found one - return the requested info */
1257 if (msg_sizep
!= NULL
)
1258 *msg_sizep
= kmsg
->ikm_header
->msgh_size
;
1259 if (msg_idp
!= NULL
)
1260 *msg_idp
= kmsg
->ikm_header
->msgh_id
;
1261 if (msg_trailerp
!= NULL
)
1262 memcpy(msg_trailerp
,
1263 (mach_msg_max_trailer_t
*)((vm_offset_t
)kmsg
->ikm_header
+
1264 round_msg(kmsg
->ikm_header
->msgh_size
)),
1265 sizeof(mach_msg_max_trailer_t
));
1277 * Routine: ipc_mqueue_peek
1279 * Peek at a (non-set) message queue to see if it has a message
1280 * matching the sequence number provided (if zero, then the
1281 * first message in the queue) and return vital info about the
1285 * The ipc_mqueue_t is unlocked.
1286 * Locks may be held by callers, so this routine cannot block.
1287 * Caller holds reference on the message queue.
1290 ipc_mqueue_peek(ipc_mqueue_t mq
,
1291 mach_port_seqno_t
* seqnop
,
1292 mach_msg_size_t
* msg_sizep
,
1293 mach_msg_id_t
* msg_idp
,
1294 mach_msg_max_trailer_t
* msg_trailerp
,
1301 res
= ipc_mqueue_peek_locked(mq
, seqnop
, msg_sizep
, msg_idp
,
1302 msg_trailerp
, kmsgp
);
1309 * Routine: ipc_mqueue_release_peek_ref
1311 * Release the reference on an mqueue's associated port which was
1312 * granted to a thread in ipc_mqueue_peek_on_thread (on the
1313 * MACH_PEEK_MSG thread wakeup path).
1316 * The ipc_mqueue_t should be locked on entry.
1317 * The ipc_mqueue_t will be _unlocked_ on return
1318 * (and potentially invalid!)
1321 void ipc_mqueue_release_peek_ref(ipc_mqueue_t mq
)
1323 assert(!imq_is_set(mq
));
1324 assert(imq_held(mq
));
1327 * clear any preposts this mq may have generated
1328 * (which would cause subsequent immediate wakeups)
1330 waitq_clear_prepost_locked(&mq
->imq_wait_queue
);
1335 * release the port reference: we need to do this outside the lock
1336 * because we might be holding the last port reference!
1342 * peek at the contained port message queues, break prepost iteration as soon
1343 * as we spot a message on one of the message queues referenced by the set's
1344 * prepost list. No need to lock each message queue, as only the head of each
1345 * queue is checked. If a message wasn't there before we entered here, no need
1346 * to find it (if we do, great).
1348 static int mqueue_peek_iterator(void *ctx
, struct waitq
*waitq
,
1349 struct waitq_set
*wqset
)
1351 ipc_mqueue_t port_mq
= (ipc_mqueue_t
)waitq
;
1352 ipc_kmsg_queue_t kmsgs
= &port_mq
->imq_messages
;
1357 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
)
1358 return WQ_ITERATE_BREAK
; /* break out of the prepost iteration */
1360 return WQ_ITERATE_CONTINUE
;
1364 * Routine: ipc_mqueue_set_peek
1366 * Peek at a message queue set to see if it has any ports
1370 * Locks may be held by callers, so this routine cannot block.
1371 * Caller holds reference on the message queue.
1374 ipc_mqueue_set_peek(ipc_mqueue_t mq
)
1381 * We may have raced with port destruction where the mqueue is marked
1382 * as invalid. In that case, even though we don't have messages, we
1383 * have an end-of-life event to deliver.
1385 if (!imq_is_valid(mq
))
1388 ret
= waitq_set_iterate_preposts(&mq
->imq_set_queue
, NULL
,
1389 mqueue_peek_iterator
);
1393 return (ret
== WQ_ITERATE_BREAK
);
1397 * Routine: ipc_mqueue_set_gather_member_names
1399 * Discover all ports which are members of a given port set.
1400 * Because the waitq linkage mechanism was redesigned to save
1401 * significan amounts of memory, it no longer keeps back-pointers
1402 * from a port set to a port. Therefore, we must iterate over all
1403 * ports within a given IPC space and individually query them to
1404 * see if they are members of the given set. Port names of ports
1405 * found to be members of the given set will be gathered into the
1406 * provided 'names' array. Actual returned names are limited to
1407 * maxnames entries, but we keep counting the actual number of
1408 * members to let the caller decide to retry if necessary.
1411 * Locks may be held by callers, so this routine cannot block.
1412 * Caller holds reference on the message queue (via port set).
1415 ipc_mqueue_set_gather_member_names(
1417 ipc_mqueue_t set_mq
,
1418 ipc_entry_num_t maxnames
,
1419 mach_port_name_t
*names
,
1420 ipc_entry_num_t
*actualp
)
1423 ipc_entry_num_t tsize
;
1424 struct waitq_set
*wqset
;
1425 ipc_entry_num_t actual
= 0;
1427 assert(set_mq
!= IMQ_NULL
);
1428 wqset
= &set_mq
->imq_set_queue
;
1430 assert(space
!= IS_NULL
);
1431 is_read_lock(space
);
1432 if (!is_active(space
)) {
1433 is_read_unlock(space
);
1437 if (!waitq_set_is_valid(wqset
)) {
1438 is_read_unlock(space
);
1442 table
= space
->is_table
;
1443 tsize
= space
->is_table_size
;
1444 for (ipc_entry_num_t idx
= 0; idx
< tsize
; idx
++) {
1445 ipc_entry_t entry
= &table
[idx
];
1447 /* only receive rights can be members of port sets */
1448 if ((entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) != MACH_PORT_TYPE_NONE
) {
1449 __IGNORE_WCASTALIGN(ipc_port_t port
= (ipc_port_t
)entry
->ie_object
);
1450 ipc_mqueue_t mq
= &port
->ip_messages
;
1452 assert(IP_VALID(port
));
1453 if (ip_active(port
) &&
1454 waitq_member(&mq
->imq_wait_queue
, wqset
)) {
1455 if (actual
< maxnames
)
1456 names
[actual
] = mq
->imq_receiver_name
;
1462 is_read_unlock(space
);
1470 * Routine: ipc_mqueue_destroy_locked
1472 * Destroy a (non-set) message queue.
1473 * Set any blocked senders running.
1474 * Destroy the kmsgs in the queue.
1477 * Receivers were removed when the receive right was "changed"
1480 ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue
)
1482 ipc_kmsg_queue_t kmqueue
;
1484 boolean_t reap
= FALSE
;
1486 assert(!imq_is_set(mqueue
));
1489 * rouse all blocked senders
1490 * (don't boost anyone - we're tearing this queue down)
1493 mqueue
->imq_fullwaiters
= FALSE
;
1494 waitq_wakeup64_all_locked(&mqueue
->imq_wait_queue
,
1498 WAITQ_ALL_PRIORITIES
,
1502 * Move messages from the specified queue to the per-thread
1503 * clean/drain queue while we have the mqueue lock.
1505 kmqueue
= &mqueue
->imq_messages
;
1506 while ((kmsg
= ipc_kmsg_dequeue(kmqueue
)) != IKM_NULL
) {
1508 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(mqueue
->imq_fport
))
1509 flipc_msg_ack(kmsg
->ikm_node
, mqueue
, TRUE
);
1512 first
= ipc_kmsg_delayed_destroy(kmsg
);
1518 * Wipe out message count, both for messages about to be
1519 * reaped and for reserved space for (previously) woken senders.
1520 * This is the indication to them that their reserved space is gone
1521 * (the mqueue was destroyed).
1523 mqueue
->imq_msgcount
= 0;
1525 /* invalidate the waitq for subsequent mqueue operations */
1526 waitq_invalidate_locked(&mqueue
->imq_wait_queue
);
1528 /* clear out any preposting we may have done */
1529 waitq_clear_prepost_locked(&mqueue
->imq_wait_queue
);
1532 * assert that we are destroying / invalidating a queue that's
1533 * not a member of any other queue.
1535 assert(mqueue
->imq_preposts
== 0);
1536 assert(mqueue
->imq_in_pset
== 0);
1542 * Routine: ipc_mqueue_set_qlimit
1544 * Changes a message queue limit; the maximum number
1545 * of messages which may be queued.
1551 ipc_mqueue_set_qlimit(
1552 ipc_mqueue_t mqueue
,
1553 mach_port_msgcount_t qlimit
)
1556 assert(qlimit
<= MACH_PORT_QLIMIT_MAX
);
1558 /* wake up senders allowed by the new qlimit */
1560 if (qlimit
> mqueue
->imq_qlimit
) {
1561 mach_port_msgcount_t i
, wakeup
;
1563 /* caution: wakeup, qlimit are unsigned */
1564 wakeup
= qlimit
- mqueue
->imq_qlimit
;
1566 for (i
= 0; i
< wakeup
; i
++) {
1568 * boost the priority of the awoken thread
1569 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
1570 * the message queue slot we've just reserved.
1572 * NOTE: this will never prepost
1574 if (waitq_wakeup64_one_locked(&mqueue
->imq_wait_queue
,
1578 WAITQ_PROMOTE_PRIORITY
,
1579 WAITQ_KEEP_LOCKED
) == KERN_NOT_WAITING
) {
1580 mqueue
->imq_fullwaiters
= FALSE
;
1583 mqueue
->imq_msgcount
++; /* give it to the awakened thread */
1586 mqueue
->imq_qlimit
= qlimit
;
1591 * Routine: ipc_mqueue_set_seqno
1593 * Changes an mqueue's sequence number.
1595 * Caller holds a reference to the queue's containing object.
1598 ipc_mqueue_set_seqno(
1599 ipc_mqueue_t mqueue
,
1600 mach_port_seqno_t seqno
)
1603 mqueue
->imq_seqno
= seqno
;
1609 * Routine: ipc_mqueue_copyin
1611 * Convert a name in a space to a message queue.
1613 * Nothing locked. If successful, the caller gets a ref for
1614 * for the object. This ref ensures the continued existence of
1617 * MACH_MSG_SUCCESS Found a message queue.
1618 * MACH_RCV_INVALID_NAME The space is dead.
1619 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1620 * MACH_RCV_INVALID_NAME
1621 * The denoted right is not receive or port set.
1622 * MACH_RCV_IN_SET Receive right is a member of a set.
1628 mach_port_name_t name
,
1629 ipc_mqueue_t
*mqueuep
,
1630 ipc_object_t
*objectp
)
1633 ipc_object_t object
;
1634 ipc_mqueue_t mqueue
;
1636 is_read_lock(space
);
1637 if (!is_active(space
)) {
1638 is_read_unlock(space
);
1639 return MACH_RCV_INVALID_NAME
;
1642 entry
= ipc_entry_lookup(space
, name
);
1643 if (entry
== IE_NULL
) {
1644 is_read_unlock(space
);
1645 return MACH_RCV_INVALID_NAME
;
1648 object
= entry
->ie_object
;
1650 if (entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) {
1653 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) object
);
1654 assert(port
!= IP_NULL
);
1657 assert(ip_active(port
));
1658 assert(port
->ip_receiver_name
== name
);
1659 assert(port
->ip_receiver
== space
);
1660 is_read_unlock(space
);
1661 mqueue
= &port
->ip_messages
;
1663 } else if (entry
->ie_bits
& MACH_PORT_TYPE_PORT_SET
) {
1666 __IGNORE_WCASTALIGN(pset
= (ipc_pset_t
) object
);
1667 assert(pset
!= IPS_NULL
);
1670 assert(ips_active(pset
));
1671 is_read_unlock(space
);
1673 mqueue
= &pset
->ips_messages
;
1675 is_read_unlock(space
);
1676 return MACH_RCV_INVALID_NAME
;
1680 * At this point, the object is locked and active,
1681 * the space is unlocked, and mqueue is initialized.
1684 io_reference(object
);
1689 return MACH_MSG_SUCCESS
;