2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: ipc/ipc_mqueue.c
63 * Functions to manipulate IPC message queues.
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/waitq.h>
88 #include <ipc/ipc_mqueue.h>
89 #include <ipc/ipc_kmsg.h>
90 #include <ipc/ipc_port.h>
91 #include <ipc/ipc_pset.h>
92 #include <ipc/ipc_space.h>
95 #include <ipc/flipc.h>
99 #include <vm/vm_map.h>
102 #include <sys/event.h>
104 extern char *proc_name_address(void *p
);
106 int ipc_mqueue_full
; /* address is event for queue space */
107 int ipc_mqueue_rcv
; /* address is event for message arrival */
109 /* forward declarations */
110 static void ipc_mqueue_receive_results(wait_result_t result
);
111 static void ipc_mqueue_peek_on_thread(
112 ipc_mqueue_t port_mq
,
113 mach_msg_option_t option
,
117 * Routine: ipc_mqueue_init
119 * Initialize a newly-allocated message queue.
124 ipc_mqueue_kind_t kind
)
127 case IPC_MQUEUE_KIND_SET
:
128 waitq_set_init(&mqueue
->imq_set_queue
,
129 SYNC_POLICY_FIFO
| SYNC_POLICY_PREPOST
,
132 case IPC_MQUEUE_KIND_NONE
: /* cheat: we really should have "no" mqueue */
133 case IPC_MQUEUE_KIND_PORT
:
134 waitq_init(&mqueue
->imq_wait_queue
,
135 SYNC_POLICY_FIFO
| SYNC_POLICY_TURNSTILE_PROXY
);
136 ipc_kmsg_queue_init(&mqueue
->imq_messages
);
137 mqueue
->imq_seqno
= 0;
138 mqueue
->imq_msgcount
= 0;
139 mqueue
->imq_qlimit
= MACH_PORT_QLIMIT_DEFAULT
;
140 mqueue
->imq_context
= 0;
141 mqueue
->imq_fullwaiters
= FALSE
;
143 mqueue
->imq_fport
= FPORT_NULL
;
147 klist_init(&mqueue
->imq_klist
);
154 boolean_t is_set
= imq_is_set(mqueue
);
157 waitq_set_deinit(&mqueue
->imq_set_queue
);
159 waitq_deinit(&mqueue
->imq_wait_queue
);
164 * Routine: imq_reserve_and_lock
166 * Atomically lock an ipc_mqueue_t object and reserve
167 * an appropriate number of prepost linkage objects for
168 * use in wakeup operations.
173 imq_reserve_and_lock(ipc_mqueue_t mq
, uint64_t *reserved_prepost
)
175 *reserved_prepost
= waitq_prepost_reserve(&mq
->imq_wait_queue
, 0,
181 * Routine: imq_release_and_unlock
183 * Unlock an ipc_mqueue_t object, re-enable interrupts,
184 * and release any unused prepost object reservations.
189 imq_release_and_unlock(ipc_mqueue_t mq
, uint64_t reserved_prepost
)
191 assert(imq_held(mq
));
192 waitq_unlock(&mq
->imq_wait_queue
);
193 waitq_prepost_release_reserve(reserved_prepost
);
198 * Routine: ipc_mqueue_member
200 * Indicate whether the (port) mqueue is a member of
201 * this portset's mqueue. We do this by checking
202 * whether the portset mqueue's waitq is an member of
203 * the port's mqueue waitq.
205 * the portset's mqueue is not already a member
206 * this may block while allocating linkage structures.
211 ipc_mqueue_t port_mqueue
,
212 ipc_mqueue_t set_mqueue
)
214 struct waitq
*port_waitq
= &port_mqueue
->imq_wait_queue
;
215 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
217 return waitq_member(port_waitq
, set_waitq
);
221 * Routine: ipc_mqueue_remove
223 * Remove the association between the queue and the specified
230 ipc_mqueue_t set_mqueue
)
232 struct waitq
*mq_waitq
= &mqueue
->imq_wait_queue
;
233 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
235 return waitq_unlink(mq_waitq
, set_waitq
);
239 * Routine: ipc_mqueue_remove_from_all
241 * Remove the mqueue from all the sets it is a member of
245 * mqueue unlocked and set links deallocated
248 ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue
)
250 struct waitq
*mq_waitq
= &mqueue
->imq_wait_queue
;
255 assert(waitq_valid(mq_waitq
));
256 kr
= waitq_unlink_all_unlock(mq_waitq
);
257 /* mqueue unlocked and set links deallocated */
261 * Routine: ipc_mqueue_remove_all
263 * Remove all the member queues from the specified set.
264 * Also removes the queue from any containing sets.
268 * mqueue unlocked all set links deallocated
271 ipc_mqueue_remove_all(ipc_mqueue_t mqueue
)
273 struct waitq_set
*mq_setq
= &mqueue
->imq_set_queue
;
276 assert(waitqs_is_set(mq_setq
));
277 waitq_set_unlink_all_unlock(mq_setq
);
278 /* mqueue unlocked set links deallocated */
283 * Routine: ipc_mqueue_add
285 * Associate the portset's mqueue with the port's mqueue.
286 * This has to be done so that posting the port will wakeup
287 * a portset waiter. If there are waiters on the portset
288 * mqueue and messages on the port mqueue, try to match them
295 ipc_mqueue_t port_mqueue
,
296 ipc_mqueue_t set_mqueue
,
297 uint64_t *reserved_link
,
298 uint64_t *reserved_prepost
)
300 struct waitq
*port_waitq
= &port_mqueue
->imq_wait_queue
;
301 struct waitq_set
*set_waitq
= &set_mqueue
->imq_set_queue
;
302 ipc_kmsg_queue_t kmsgq
;
303 ipc_kmsg_t kmsg
, next
;
306 assert(reserved_link
&& *reserved_link
!= 0);
307 assert(waitqs_is_linked(set_waitq
));
309 imq_lock(port_mqueue
);
312 * The link operation is now under the same lock-hold as
313 * message iteration and thread wakeup, but doesn't have to be...
315 kr
= waitq_link(port_waitq
, set_waitq
, WAITQ_ALREADY_LOCKED
, reserved_link
);
316 if (kr
!= KERN_SUCCESS
) {
317 imq_unlock(port_mqueue
);
322 * Now that the set has been added to the port, there may be
323 * messages queued on the port and threads waiting on the set
324 * waitq. Lets get them together.
326 kmsgq
= &port_mqueue
->imq_messages
;
327 for (kmsg
= ipc_kmsg_queue_first(kmsgq
);
330 next
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
334 mach_msg_size_t msize
;
337 th
= waitq_wakeup64_identify_locked(
340 THREAD_AWAKENED
, &th_spl
,
341 reserved_prepost
, WAITQ_ALL_PRIORITIES
,
343 /* waitq/mqueue still locked, thread locked */
345 if (th
== THREAD_NULL
) {
350 * If the receiver waited with a facility not directly
351 * related to Mach messaging, then it isn't prepared to get
352 * handed the message directly. Just set it running, and
353 * go look for another thread that can.
355 if (th
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
356 if (th
->ith_state
== MACH_PEEK_IN_PROGRESS
) {
358 * wakeup the peeking thread, but
359 * continue to loop over the threads
360 * waiting on the port's mqueue to see
361 * if there are any actual receivers
363 ipc_mqueue_peek_on_thread(port_mqueue
,
373 * Found a receiver. see if they can handle the message
374 * correctly (the message is not too large for them, or
375 * they didn't care to be informed that the message was
376 * too large). If they can't handle it, take them off
377 * the list and let them go back and figure it out and
378 * just move onto the next.
380 msize
= ipc_kmsg_copyout_size(kmsg
, th
->map
);
382 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(th
), th
->ith_option
))) {
383 th
->ith_state
= MACH_RCV_TOO_LARGE
;
384 th
->ith_msize
= msize
;
385 if (th
->ith_option
& MACH_RCV_LARGE
) {
387 * let him go without message
389 th
->ith_receiver_name
= port_mqueue
->imq_receiver_name
;
390 th
->ith_kmsg
= IKM_NULL
;
394 continue; /* find another thread */
397 th
->ith_state
= MACH_MSG_SUCCESS
;
401 * This thread is going to take this message,
404 ipc_kmsg_rmqueue(kmsgq
, kmsg
);
406 mach_node_t node
= kmsg
->ikm_node
;
408 ipc_mqueue_release_msgcount(port_mqueue
, IMQ_NULL
);
411 th
->ith_seqno
= port_mqueue
->imq_seqno
++;
415 if (MACH_NODE_VALID(node
) && FPORT_VALID(port_mqueue
->imq_fport
)) {
416 flipc_msg_ack(node
, port_mqueue
, TRUE
);
419 break; /* go to next message */
423 imq_unlock(port_mqueue
);
429 * Routine: ipc_mqueue_has_klist
431 * Returns whether the given mqueue imq_klist field can be used as a klist.
434 ipc_mqueue_has_klist(ipc_mqueue_t mqueue
)
436 ipc_object_t object
= imq_to_object(mqueue
);
437 if (io_otype(object
) != IOT_PORT
) {
440 ipc_port_t port
= ip_from_mq(mqueue
);
441 if (port
->ip_specialreply
) {
444 return port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
;
448 * Routine: ipc_mqueue_changed
450 * Wake up receivers waiting in a message queue.
452 * The message queue is locked.
459 if (ipc_mqueue_has_klist(mqueue
) && SLIST_FIRST(&mqueue
->imq_klist
)) {
461 * Indicate that this message queue is vanishing
463 * When this is called, the associated receive right may be in flight
464 * between two tasks: the one it used to live in, and the one that armed
465 * a port destroyed notification for it.
467 * The new process may want to register the port it gets back with an
468 * EVFILT_MACHPORT filter again, and may have pending sync IPC on this
469 * port pending already, in which case we want the imq_klist field to be
470 * reusable for nefarious purposes.
472 * Fortunately, we really don't need this linkage anymore after this
473 * point as EV_VANISHED / EV_EOF will be the last thing delivered ever.
475 * Note: we don't have the space lock here, however, this covers the
476 * case of when a task is terminating the space, triggering
477 * several knote_vanish() calls.
479 * We don't need the lock to observe that the space is inactive as
480 * we just deactivated it on the same thread.
482 * We still need to call knote_vanish() so that the knote is
483 * marked with EV_VANISHED or EV_EOF so that the detach step
484 * in filt_machportdetach is skipped correctly.
487 knote_vanish(&mqueue
->imq_klist
, is_active(space
));
490 if (io_otype(imq_to_object(mqueue
)) == IOT_PORT
) {
491 ipc_port_adjust_sync_link_state_locked(ip_from_mq(mqueue
), PORT_SYNC_LINK_ANY
, NULL
);
493 klist_init(&mqueue
->imq_klist
);
496 waitq_wakeup64_all_locked(&mqueue
->imq_wait_queue
,
500 WAITQ_ALL_PRIORITIES
,
508 * Routine: ipc_mqueue_send
510 * Send a message to a message queue. The message holds a reference
511 * for the destination port for this message queue in the
512 * msgh_remote_port field.
514 * If unsuccessful, the caller still has possession of
515 * the message and must do something with it. If successful,
516 * the message is queued, given to a receiver, or destroyed.
520 * MACH_MSG_SUCCESS The message was accepted.
521 * MACH_SEND_TIMED_OUT Caller still has message.
522 * MACH_SEND_INTERRUPTED Caller still has message.
528 mach_msg_option_t option
,
529 mach_msg_timeout_t send_timeout
)
535 * 1) We're under the queue limit.
536 * 2) Caller used the MACH_SEND_ALWAYS internal option.
537 * 3) Message is sent to a send-once right.
539 if (!imq_full(mqueue
) ||
540 (!imq_full_kernel(mqueue
) &&
541 ((option
& MACH_SEND_ALWAYS
) ||
542 (MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) ==
543 MACH_MSG_TYPE_PORT_SEND_ONCE
)))) {
544 mqueue
->imq_msgcount
++;
545 assert(mqueue
->imq_msgcount
> 0);
548 thread_t cur_thread
= current_thread();
549 ipc_port_t port
= ip_from_mq(mqueue
);
550 struct turnstile
*send_turnstile
= TURNSTILE_NULL
;
554 * We have to wait for space to be granted to us.
556 if ((option
& MACH_SEND_TIMEOUT
) && (send_timeout
== 0)) {
558 return MACH_SEND_TIMED_OUT
;
560 if (imq_full_kernel(mqueue
)) {
562 return MACH_SEND_NO_BUFFER
;
564 mqueue
->imq_fullwaiters
= TRUE
;
566 if (option
& MACH_SEND_TIMEOUT
) {
567 clock_interval_to_deadline(send_timeout
, 1000 * NSEC_PER_USEC
, &deadline
);
572 thread_set_pending_block_hint(cur_thread
, kThreadWaitPortSend
);
574 send_turnstile
= turnstile_prepare((uintptr_t)port
,
575 port_send_turnstile_address(port
),
576 TURNSTILE_NULL
, TURNSTILE_SYNC_IPC
);
578 ipc_port_send_update_inheritor(port
, send_turnstile
,
579 TURNSTILE_DELAYED_UPDATE
);
581 wresult
= waitq_assert_wait64_leeway(
582 &send_turnstile
->ts_waitq
,
585 TIMEOUT_URGENCY_USER_NORMAL
,
590 turnstile_update_inheritor_complete(send_turnstile
,
591 TURNSTILE_INTERLOCK_NOT_HELD
);
593 if (wresult
== THREAD_WAITING
) {
594 wresult
= thread_block(THREAD_CONTINUE_NULL
);
595 counter(c_ipc_mqueue_send_block
++);
598 /* Call turnstile complete with interlock held */
600 turnstile_complete((uintptr_t)port
, port_send_turnstile_address(port
), NULL
, TURNSTILE_SYNC_IPC
);
603 /* Call cleanup after dropping the interlock */
607 case THREAD_AWAKENED
:
609 * we can proceed - inherited msgcount from waker
610 * or the message queue has been destroyed and the msgcount
611 * has been reset to zero (will detect in ipc_mqueue_post()).
615 case THREAD_TIMED_OUT
:
616 assert(option
& MACH_SEND_TIMEOUT
);
617 return MACH_SEND_TIMED_OUT
;
619 case THREAD_INTERRUPTED
:
620 return MACH_SEND_INTERRUPTED
;
623 /* mqueue is being destroyed */
624 return MACH_SEND_INVALID_DEST
;
626 panic("ipc_mqueue_send");
630 ipc_mqueue_post(mqueue
, kmsg
, option
);
631 return MACH_MSG_SUCCESS
;
635 * Routine: ipc_mqueue_override_send
637 * Set an override qos on the first message in the queue
638 * (if the queue is full). This is a send-possible override
639 * that will go away as soon as we drain a message from the
643 * The message queue is not locked.
644 * The caller holds a reference on the message queue.
647 ipc_mqueue_override_send(
649 mach_msg_qos_t qos_ovr
)
651 boolean_t __unused full_queue_empty
= FALSE
;
654 assert(imq_valid(mqueue
));
655 assert(!imq_is_set(mqueue
));
657 if (imq_full(mqueue
)) {
658 ipc_kmsg_t first
= ipc_kmsg_queue_first(&mqueue
->imq_messages
);
660 if (first
&& ipc_kmsg_override_qos(&mqueue
->imq_messages
, first
, qos_ovr
)) {
661 ipc_object_t object
= imq_to_object(mqueue
);
662 assert(io_otype(object
) == IOT_PORT
);
663 ipc_port_t port
= ip_object_to_port(object
);
664 if (ip_active(port
) &&
665 port
->ip_receiver_name
!= MACH_PORT_NULL
&&
666 is_active(port
->ip_receiver
) &&
667 ipc_mqueue_has_klist(mqueue
)) {
668 KNOTE(&mqueue
->imq_klist
, 0);
672 full_queue_empty
= TRUE
;
677 #if DEVELOPMENT || DEBUG
678 if (full_queue_empty
) {
679 ipc_port_t port
= ip_from_mq(mqueue
);
681 if (ip_active(port
) && !port
->ip_tempowner
&&
682 port
->ip_receiver_name
&& port
->ip_receiver
&&
683 port
->ip_receiver
!= ipc_space_kernel
) {
684 dst_pid
= task_pid(port
->ip_receiver
->is_task
);
691 * Routine: ipc_mqueue_release_msgcount
693 * Release a message queue reference in the case where we
697 * The message queue is locked.
698 * The message corresponding to this reference is off the queue.
699 * There is no need to pass reserved preposts because this will
700 * never prepost to anyone
703 ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq
, ipc_mqueue_t set_mq
)
705 struct turnstile
*send_turnstile
= port_send_turnstile(ip_from_mq(port_mq
));
707 assert(imq_held(port_mq
));
708 assert(port_mq
->imq_msgcount
> 1 || ipc_kmsg_queue_empty(&port_mq
->imq_messages
));
710 port_mq
->imq_msgcount
--;
712 if (!imq_full(port_mq
) && port_mq
->imq_fullwaiters
&&
713 send_turnstile
!= TURNSTILE_NULL
) {
715 * boost the priority of the awoken thread
716 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
717 * the message queue slot we've just reserved.
719 * NOTE: this will never prepost
721 * The wakeup happens on a turnstile waitq
722 * which will wakeup the highest priority waiter.
723 * A potential downside of this would be starving low
724 * priority senders if there is a constant churn of
725 * high priority threads trying to send to this port.
727 if (waitq_wakeup64_one(&send_turnstile
->ts_waitq
,
730 WAITQ_PROMOTE_PRIORITY
) != KERN_SUCCESS
) {
731 port_mq
->imq_fullwaiters
= FALSE
;
733 /* gave away our slot - add reference back */
734 port_mq
->imq_msgcount
++;
738 if (ipc_kmsg_queue_empty(&port_mq
->imq_messages
)) {
739 /* no more msgs: invalidate the port's prepost object */
740 waitq_clear_prepost_locked(&port_mq
->imq_wait_queue
);
745 * Routine: ipc_mqueue_post
747 * Post a message to a waiting receiver or enqueue it. If a
748 * receiver is waiting, we can release our reserved space in
753 * If we need to queue, our space in the message queue is reserved.
759 mach_msg_option_t __unused option
)
761 uint64_t reserved_prepost
= 0;
762 boolean_t destroy_msg
= FALSE
;
764 ipc_kmsg_trace_send(kmsg
, option
);
767 * While the msg queue is locked, we have control of the
768 * kmsg, so the ref in it for the port is still good.
770 * Check for a receiver for the message.
772 imq_reserve_and_lock(mqueue
, &reserved_prepost
);
774 /* we may have raced with port destruction! */
775 if (!imq_valid(mqueue
)) {
781 struct waitq
*waitq
= &mqueue
->imq_wait_queue
;
784 mach_msg_size_t msize
;
786 receiver
= waitq_wakeup64_identify_locked(waitq
,
791 WAITQ_ALL_PRIORITIES
,
793 /* waitq still locked, thread locked */
795 if (receiver
== THREAD_NULL
) {
797 * no receivers; queue kmsg if space still reserved
798 * Reservations are cancelled when the port goes inactive.
799 * note that this will enqueue the message for any
800 * "peeking" receivers.
802 * Also, post the knote to wake up any threads waiting
803 * on that style of interface if this insertion is of
804 * note (first insertion, or adjusted override qos all
805 * the way to the head of the queue).
807 * This is just for ports. portset knotes are stay-active,
808 * and their threads get awakened through the !MACH_RCV_IN_PROGRESS
811 if (mqueue
->imq_msgcount
> 0) {
812 if (ipc_kmsg_enqueue_qos(&mqueue
->imq_messages
, kmsg
)) {
813 /* if the space is dead there is no point calling KNOTE */
814 ipc_object_t object
= imq_to_object(mqueue
);
815 assert(io_otype(object
) == IOT_PORT
);
816 ipc_port_t port
= ip_object_to_port(object
);
817 if (ip_active(port
) &&
818 port
->ip_receiver_name
!= MACH_PORT_NULL
&&
819 is_active(port
->ip_receiver
) &&
820 ipc_mqueue_has_klist(mqueue
)) {
821 KNOTE(&mqueue
->imq_klist
, 0);
828 * Otherwise, the message queue must belong to an inactive
829 * port, so just destroy the message and pretend it was posted.
836 * If a thread is attempting a "peek" into the message queue
837 * (MACH_PEEK_IN_PROGRESS), then we enqueue the message and set the
838 * thread running. A successful peek is essentially the same as
839 * message delivery since the peeking thread takes responsibility
840 * for delivering the message and (eventually) removing it from
841 * the mqueue. Only one thread can successfully use the peek
842 * facility on any given port, so we exit the waitq loop after
843 * encountering such a thread.
845 if (receiver
->ith_state
== MACH_PEEK_IN_PROGRESS
&& mqueue
->imq_msgcount
> 0) {
846 ipc_kmsg_enqueue_qos(&mqueue
->imq_messages
, kmsg
);
847 ipc_mqueue_peek_on_thread(mqueue
, receiver
->ith_option
, receiver
);
848 thread_unlock(receiver
);
850 break; /* Message was posted, so break out of loop */
854 * If the receiver waited with a facility not directly related
855 * to Mach messaging, then it isn't prepared to get handed the
856 * message directly. Just set it running, and go look for
857 * another thread that can.
859 if (receiver
->ith_state
!= MACH_RCV_IN_PROGRESS
) {
860 thread_unlock(receiver
);
867 * We found a waiting thread.
868 * If the message is too large or the scatter list is too small
869 * the thread we wake up will get that as its status.
871 msize
= ipc_kmsg_copyout_size(kmsg
, receiver
->map
);
872 if (receiver
->ith_rsize
<
873 (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(receiver
), receiver
->ith_option
))) {
874 receiver
->ith_msize
= msize
;
875 receiver
->ith_state
= MACH_RCV_TOO_LARGE
;
877 receiver
->ith_state
= MACH_MSG_SUCCESS
;
881 * If there is no problem with the upcoming receive, or the
882 * receiver thread didn't specifically ask for special too
883 * large error condition, go ahead and select it anyway.
885 if ((receiver
->ith_state
== MACH_MSG_SUCCESS
) ||
886 !(receiver
->ith_option
& MACH_RCV_LARGE
)) {
887 receiver
->ith_kmsg
= kmsg
;
888 receiver
->ith_seqno
= mqueue
->imq_seqno
++;
890 mach_node_t node
= kmsg
->ikm_node
;
892 thread_unlock(receiver
);
895 /* we didn't need our reserved spot in the queue */
896 ipc_mqueue_release_msgcount(mqueue
, IMQ_NULL
);
899 if (MACH_NODE_VALID(node
) && FPORT_VALID(mqueue
->imq_fport
)) {
900 flipc_msg_ack(node
, mqueue
, TRUE
);
907 * Otherwise, this thread needs to be released to run
908 * and handle its error without getting the message. We
909 * need to go back and pick another one.
911 receiver
->ith_receiver_name
= mqueue
->imq_receiver_name
;
912 receiver
->ith_kmsg
= IKM_NULL
;
913 receiver
->ith_seqno
= 0;
914 thread_unlock(receiver
);
919 /* clear the waitq boost we may have been given */
920 waitq_clear_promotion_locked(&mqueue
->imq_wait_queue
, current_thread());
921 imq_release_and_unlock(mqueue
, reserved_prepost
);
923 ipc_kmsg_destroy(kmsg
);
926 current_task()->messages_sent
++;
932 ipc_mqueue_receive_results(wait_result_t saved_wait_result
)
934 thread_t self
= current_thread();
935 mach_msg_option_t option
= self
->ith_option
;
938 * why did we wake up?
940 switch (saved_wait_result
) {
941 case THREAD_TIMED_OUT
:
942 self
->ith_state
= MACH_RCV_TIMED_OUT
;
945 case THREAD_INTERRUPTED
:
946 self
->ith_state
= MACH_RCV_INTERRUPTED
;
950 /* something bad happened to the port/set */
951 self
->ith_state
= MACH_RCV_PORT_CHANGED
;
954 case THREAD_AWAKENED
:
956 * We do not need to go select a message, somebody
957 * handed us one (or a too-large indication).
959 switch (self
->ith_state
) {
960 case MACH_RCV_SCATTER_SMALL
:
961 case MACH_RCV_TOO_LARGE
:
963 * Somebody tried to give us a too large
964 * message. If we indicated that we cared,
965 * then they only gave us the indication,
966 * otherwise they gave us the indication
967 * AND the message anyway.
969 if (option
& MACH_RCV_LARGE
) {
973 case MACH_MSG_SUCCESS
:
975 case MACH_PEEK_READY
:
979 panic("ipc_mqueue_receive_results: strange ith_state");
983 panic("ipc_mqueue_receive_results: strange wait_result");
988 ipc_mqueue_receive_continue(
989 __unused
void *param
,
990 wait_result_t wresult
)
992 ipc_mqueue_receive_results(wresult
);
993 mach_msg_receive_continue(); /* hard-coded for now */
997 * Routine: ipc_mqueue_receive
999 * Receive a message from a message queue.
1002 * Our caller must hold a reference for the port or port set
1003 * to which this queue belongs, to keep the queue
1004 * from being deallocated.
1006 * The kmsg is returned with clean header fields
1007 * and with the circular bit turned off through the ith_kmsg
1008 * field of the thread's receive continuation state.
1010 * MACH_MSG_SUCCESS Message returned in ith_kmsg.
1011 * MACH_RCV_TOO_LARGE Message size returned in ith_msize.
1012 * MACH_RCV_TIMED_OUT No message obtained.
1013 * MACH_RCV_INTERRUPTED No message obtained.
1014 * MACH_RCV_PORT_DIED Port/set died; no message.
1015 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
1021 ipc_mqueue_t mqueue
,
1022 mach_msg_option_t option
,
1023 mach_msg_size_t max_size
,
1024 mach_msg_timeout_t rcv_timeout
,
1027 wait_result_t wresult
;
1028 thread_t self
= current_thread();
1031 wresult
= ipc_mqueue_receive_on_thread(mqueue
, option
, max_size
,
1032 rcv_timeout
, interruptible
,
1034 /* mqueue unlocked */
1035 if (wresult
== THREAD_NOT_WAITING
) {
1039 if (wresult
== THREAD_WAITING
) {
1040 counter((interruptible
== THREAD_ABORTSAFE
) ?
1041 c_ipc_mqueue_receive_block_user
++ :
1042 c_ipc_mqueue_receive_block_kernel
++);
1044 if (self
->ith_continuation
) {
1045 thread_block(ipc_mqueue_receive_continue
);
1049 wresult
= thread_block(THREAD_CONTINUE_NULL
);
1051 ipc_mqueue_receive_results(wresult
);
1055 mqueue_process_prepost_receive(void *ctx
, struct waitq
*waitq
,
1056 struct waitq_set
*wqset
)
1058 ipc_mqueue_t port_mq
, *pmq_ptr
;
1061 port_mq
= (ipc_mqueue_t
)waitq
;
1064 * If there are no messages on this queue, skip it and remove
1065 * it from the prepost list
1067 if (ipc_kmsg_queue_empty(&port_mq
->imq_messages
)) {
1068 return WQ_ITERATE_INVALIDATE_CONTINUE
;
1072 * There are messages waiting on this port.
1073 * Instruct the prepost iteration logic to break, but keep the
1076 pmq_ptr
= (ipc_mqueue_t
*)ctx
;
1080 return WQ_ITERATE_BREAK_KEEP_LOCKED
;
1084 * Routine: ipc_mqueue_receive_on_thread
1086 * Receive a message from a message queue using a specified thread.
1087 * If no message available, assert_wait on the appropriate waitq.
1090 * Assumes thread is self.
1091 * Called with mqueue locked.
1092 * Returns with mqueue unlocked.
1093 * May have assert-waited. Caller must block in those cases.
1096 ipc_mqueue_receive_on_thread(
1097 ipc_mqueue_t mqueue
,
1098 mach_msg_option_t option
,
1099 mach_msg_size_t max_size
,
1100 mach_msg_timeout_t rcv_timeout
,
1104 wait_result_t wresult
;
1106 struct turnstile
*rcv_turnstile
= TURNSTILE_NULL
;
1108 /* called with mqueue locked */
1110 /* no need to reserve anything: we never prepost to anyone */
1112 if (!imq_valid(mqueue
)) {
1113 /* someone raced us to destroy this mqueue/port! */
1116 * ipc_mqueue_receive_results updates the thread's ith_state
1117 * TODO: differentiate between rights being moved and
1118 * rights/ports being destroyed (21885327)
1120 return THREAD_RESTART
;
1123 if (imq_is_set(mqueue
)) {
1124 ipc_mqueue_t port_mq
= IMQ_NULL
;
1126 (void)waitq_set_iterate_preposts(&mqueue
->imq_set_queue
,
1128 mqueue_process_prepost_receive
);
1130 if (port_mq
!= IMQ_NULL
) {
1132 * We get here if there is at least one message
1133 * waiting on port_mq. We have instructed the prepost
1134 * iteration logic to leave both the port_mq and the
1135 * set mqueue locked.
1137 * TODO: previously, we would place this port at the
1138 * back of the prepost list...
1143 * Continue on to handling the message with just
1144 * the port mqueue locked.
1146 if (option
& MACH_PEEK_MSG
) {
1147 ipc_mqueue_peek_on_thread(port_mq
, option
, thread
);
1149 ipc_mqueue_select_on_thread(port_mq
, mqueue
, option
,
1153 imq_unlock(port_mq
);
1154 return THREAD_NOT_WAITING
;
1156 } else if (imq_is_queue(mqueue
) || imq_is_turnstile_proxy(mqueue
)) {
1157 ipc_kmsg_queue_t kmsgs
;
1160 * Receive on a single port. Just try to get the messages.
1162 kmsgs
= &mqueue
->imq_messages
;
1163 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
1164 if (option
& MACH_PEEK_MSG
) {
1165 ipc_mqueue_peek_on_thread(mqueue
, option
, thread
);
1167 ipc_mqueue_select_on_thread(mqueue
, IMQ_NULL
, option
,
1171 return THREAD_NOT_WAITING
;
1174 panic("Unknown mqueue type 0x%x: likely memory corruption!\n",
1175 mqueue
->imq_wait_queue
.waitq_type
);
1179 * Looks like we'll have to block. The mqueue we will
1180 * block on (whether the set's or the local port's) is
1183 if (option
& MACH_RCV_TIMEOUT
) {
1184 if (rcv_timeout
== 0) {
1186 thread
->ith_state
= MACH_RCV_TIMED_OUT
;
1187 return THREAD_NOT_WAITING
;
1191 thread
->ith_option
= option
;
1192 thread
->ith_rsize
= max_size
;
1193 thread
->ith_msize
= 0;
1195 if (option
& MACH_PEEK_MSG
) {
1196 thread
->ith_state
= MACH_PEEK_IN_PROGRESS
;
1198 thread
->ith_state
= MACH_RCV_IN_PROGRESS
;
1201 if (option
& MACH_RCV_TIMEOUT
) {
1202 clock_interval_to_deadline(rcv_timeout
, 1000 * NSEC_PER_USEC
, &deadline
);
1208 * Threads waiting on a reply port (not portset)
1209 * will wait on its receive turnstile.
1211 * Donate waiting thread's turnstile and
1212 * setup inheritor for special reply port.
1213 * Based on the state of the special reply
1214 * port, the inheritor would be the send
1215 * turnstile of the connection port on which
1216 * the send of sync ipc would happen or
1217 * workloop's turnstile who would reply to
1218 * the sync ipc message.
1220 * Pass in mqueue wait in waitq_assert_wait to
1221 * support port set wakeup. The mqueue waitq of port
1222 * will be converted to to turnstile waitq
1223 * in waitq_assert_wait instead of global waitqs.
1225 if (imq_is_turnstile_proxy(mqueue
)) {
1226 ipc_port_t port
= ip_from_mq(mqueue
);
1227 rcv_turnstile
= turnstile_prepare((uintptr_t)port
,
1228 port_rcv_turnstile_address(port
),
1229 TURNSTILE_NULL
, TURNSTILE_SYNC_IPC
);
1231 ipc_port_recv_update_inheritor(port
, rcv_turnstile
,
1232 TURNSTILE_DELAYED_UPDATE
);
1235 thread_set_pending_block_hint(thread
, kThreadWaitPortReceive
);
1236 wresult
= waitq_assert_wait64_locked(&mqueue
->imq_wait_queue
,
1239 TIMEOUT_URGENCY_USER_NORMAL
,
1243 /* preposts should be detected above, not here */
1244 if (wresult
== THREAD_AWAKENED
) {
1245 panic("ipc_mqueue_receive_on_thread: sleep walking");
1250 /* Check if its a port mqueue and if it needs to call turnstile_update_inheritor_complete */
1251 if (rcv_turnstile
!= TURNSTILE_NULL
) {
1252 turnstile_update_inheritor_complete(rcv_turnstile
, TURNSTILE_INTERLOCK_NOT_HELD
);
1254 /* Its callers responsibility to call turnstile_complete to get the turnstile back */
1261 * Routine: ipc_mqueue_peek_on_thread
1263 * A receiver discovered that there was a message on the queue
1264 * before he had to block. Tell a thread about the message queue,
1265 * but don't pick off any messages.
1268 * at least one message on port_mq's message queue
1270 * Returns: (on thread->ith_state)
1271 * MACH_PEEK_READY ith_peekq contains a message queue
1274 ipc_mqueue_peek_on_thread(
1275 ipc_mqueue_t port_mq
,
1276 mach_msg_option_t option
,
1280 assert(option
& MACH_PEEK_MSG
);
1281 assert(ipc_kmsg_queue_first(&port_mq
->imq_messages
) != IKM_NULL
);
1284 * Take a reference on the mqueue's associated port:
1285 * the peeking thread will be responsible to release this reference
1286 * using ip_release_mq()
1288 ip_reference_mq(port_mq
);
1289 thread
->ith_peekq
= port_mq
;
1290 thread
->ith_state
= MACH_PEEK_READY
;
1294 * Routine: ipc_mqueue_select_on_thread
1296 * A receiver discovered that there was a message on the queue
1297 * before he had to block. Pick the message off the queue and
1298 * "post" it to thread.
1301 * thread not locked.
1302 * There is a message.
1303 * No need to reserve prepost objects - it will never prepost
1306 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
1307 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
1310 ipc_mqueue_select_on_thread(
1311 ipc_mqueue_t port_mq
,
1312 ipc_mqueue_t set_mq
,
1313 mach_msg_option_t option
,
1314 mach_msg_size_t max_size
,
1318 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
1319 mach_msg_size_t msize
;
1322 * Do some sanity checking of our ability to receive
1323 * before pulling the message off the queue.
1325 kmsg
= ipc_kmsg_queue_first(&port_mq
->imq_messages
);
1326 assert(kmsg
!= IKM_NULL
);
1329 * If we really can't receive it, but we had the
1330 * MACH_RCV_LARGE option set, then don't take it off
1331 * the queue, instead return the appropriate error
1332 * (and size needed).
1334 msize
= ipc_kmsg_copyout_size(kmsg
, thread
->map
);
1335 if (msize
+ REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread
), option
) > max_size
) {
1336 mr
= MACH_RCV_TOO_LARGE
;
1337 if (option
& MACH_RCV_LARGE
) {
1338 thread
->ith_receiver_name
= port_mq
->imq_receiver_name
;
1339 thread
->ith_kmsg
= IKM_NULL
;
1340 thread
->ith_msize
= msize
;
1341 thread
->ith_seqno
= 0;
1342 thread
->ith_state
= mr
;
1347 ipc_kmsg_rmqueue(&port_mq
->imq_messages
, kmsg
);
1349 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port_mq
->imq_fport
)) {
1350 flipc_msg_ack(kmsg
->ikm_node
, port_mq
, TRUE
);
1353 ipc_mqueue_release_msgcount(port_mq
, set_mq
);
1354 thread
->ith_seqno
= port_mq
->imq_seqno
++;
1355 thread
->ith_kmsg
= kmsg
;
1356 thread
->ith_state
= mr
;
1358 current_task()->messages_received
++;
1363 * Routine: ipc_mqueue_peek_locked
1365 * Peek at a (non-set) message queue to see if it has a message
1366 * matching the sequence number provided (if zero, then the
1367 * first message in the queue) and return vital info about the
1371 * The ipc_mqueue_t is locked by callers.
1372 * Other locks may be held by callers, so this routine cannot block.
1373 * Caller holds reference on the message queue.
1376 ipc_mqueue_peek_locked(ipc_mqueue_t mq
,
1377 mach_port_seqno_t
* seqnop
,
1378 mach_msg_size_t
* msg_sizep
,
1379 mach_msg_id_t
* msg_idp
,
1380 mach_msg_max_trailer_t
* msg_trailerp
,
1383 ipc_kmsg_queue_t kmsgq
;
1385 mach_port_seqno_t seqno
, msgoff
;
1388 assert(!imq_is_set(mq
));
1391 if (seqnop
!= NULL
) {
1396 seqno
= mq
->imq_seqno
;
1398 } else if (seqno
>= mq
->imq_seqno
&&
1399 seqno
< mq
->imq_seqno
+ mq
->imq_msgcount
) {
1400 msgoff
= seqno
- mq
->imq_seqno
;
1405 /* look for the message that would match that seqno */
1406 kmsgq
= &mq
->imq_messages
;
1407 kmsg
= ipc_kmsg_queue_first(kmsgq
);
1408 while (msgoff
-- && kmsg
!= IKM_NULL
) {
1409 kmsg
= ipc_kmsg_queue_next(kmsgq
, kmsg
);
1411 if (kmsg
== IKM_NULL
) {
1415 /* found one - return the requested info */
1416 if (seqnop
!= NULL
) {
1419 if (msg_sizep
!= NULL
) {
1420 *msg_sizep
= kmsg
->ikm_header
->msgh_size
;
1422 if (msg_idp
!= NULL
) {
1423 *msg_idp
= kmsg
->ikm_header
->msgh_id
;
1425 if (msg_trailerp
!= NULL
) {
1426 memcpy(msg_trailerp
,
1427 (mach_msg_max_trailer_t
*)((vm_offset_t
)kmsg
->ikm_header
+
1428 mach_round_msg(kmsg
->ikm_header
->msgh_size
)),
1429 sizeof(mach_msg_max_trailer_t
));
1431 if (kmsgp
!= NULL
) {
1443 * Routine: ipc_mqueue_peek
1445 * Peek at a (non-set) message queue to see if it has a message
1446 * matching the sequence number provided (if zero, then the
1447 * first message in the queue) and return vital info about the
1451 * The ipc_mqueue_t is unlocked.
1452 * Locks may be held by callers, so this routine cannot block.
1453 * Caller holds reference on the message queue.
1456 ipc_mqueue_peek(ipc_mqueue_t mq
,
1457 mach_port_seqno_t
* seqnop
,
1458 mach_msg_size_t
* msg_sizep
,
1459 mach_msg_id_t
* msg_idp
,
1460 mach_msg_max_trailer_t
* msg_trailerp
,
1467 res
= ipc_mqueue_peek_locked(mq
, seqnop
, msg_sizep
, msg_idp
,
1468 msg_trailerp
, kmsgp
);
1475 * Routine: ipc_mqueue_release_peek_ref
1477 * Release the reference on an mqueue's associated port which was
1478 * granted to a thread in ipc_mqueue_peek_on_thread (on the
1479 * MACH_PEEK_MSG thread wakeup path).
1482 * The ipc_mqueue_t should be locked on entry.
1483 * The ipc_mqueue_t will be _unlocked_ on return
1484 * (and potentially invalid!)
1488 ipc_mqueue_release_peek_ref(ipc_mqueue_t mq
)
1490 assert(!imq_is_set(mq
));
1491 assert(imq_held(mq
));
1494 * clear any preposts this mq may have generated
1495 * (which would cause subsequent immediate wakeups)
1497 waitq_clear_prepost_locked(&mq
->imq_wait_queue
);
1502 * release the port reference: we need to do this outside the lock
1503 * because we might be holding the last port reference!
1509 * peek at the contained port message queues, break prepost iteration as soon
1510 * as we spot a message on one of the message queues referenced by the set's
1511 * prepost list. No need to lock each message queue, as only the head of each
1512 * queue is checked. If a message wasn't there before we entered here, no need
1513 * to find it (if we do, great).
1516 mqueue_peek_iterator(void *ctx
, struct waitq
*waitq
,
1517 struct waitq_set
*wqset
)
1519 ipc_mqueue_t port_mq
= (ipc_mqueue_t
)waitq
;
1520 ipc_kmsg_queue_t kmsgs
= &port_mq
->imq_messages
;
1525 if (ipc_kmsg_queue_first(kmsgs
) != IKM_NULL
) {
1526 return WQ_ITERATE_BREAK
; /* break out of the prepost iteration */
1528 return WQ_ITERATE_CONTINUE
;
1532 * Routine: ipc_mqueue_set_peek
1534 * Peek at a message queue set to see if it has any ports
1538 * Locks may be held by callers, so this routine cannot block.
1539 * Caller holds reference on the message queue.
1542 ipc_mqueue_set_peek(ipc_mqueue_t mq
)
1549 * We may have raced with port destruction where the mqueue is marked
1550 * as invalid. In that case, even though we don't have messages, we
1551 * have an end-of-life event to deliver.
1553 if (!imq_is_valid(mq
)) {
1557 ret
= waitq_set_iterate_preposts(&mq
->imq_set_queue
, NULL
,
1558 mqueue_peek_iterator
);
1562 return ret
== WQ_ITERATE_BREAK
;
1566 * Routine: ipc_mqueue_set_gather_member_names
1568 * Discover all ports which are members of a given port set.
1569 * Because the waitq linkage mechanism was redesigned to save
1570 * significan amounts of memory, it no longer keeps back-pointers
1571 * from a port set to a port. Therefore, we must iterate over all
1572 * ports within a given IPC space and individually query them to
1573 * see if they are members of the given set. Port names of ports
1574 * found to be members of the given set will be gathered into the
1575 * provided 'names' array. Actual returned names are limited to
1576 * maxnames entries, but we keep counting the actual number of
1577 * members to let the caller decide to retry if necessary.
1580 * Locks may be held by callers, so this routine cannot block.
1581 * Caller holds reference on the message queue (via port set).
1584 ipc_mqueue_set_gather_member_names(
1586 ipc_mqueue_t set_mq
,
1587 ipc_entry_num_t maxnames
,
1588 mach_port_name_t
*names
,
1589 ipc_entry_num_t
*actualp
)
1592 ipc_entry_num_t tsize
;
1593 struct waitq_set
*wqset
;
1594 ipc_entry_num_t actual
= 0;
1596 assert(set_mq
!= IMQ_NULL
);
1597 wqset
= &set_mq
->imq_set_queue
;
1599 assert(space
!= IS_NULL
);
1600 is_read_lock(space
);
1601 if (!is_active(space
)) {
1602 is_read_unlock(space
);
1606 if (!waitq_set_is_valid(wqset
)) {
1607 is_read_unlock(space
);
1611 table
= space
->is_table
;
1612 tsize
= space
->is_table_size
;
1613 for (ipc_entry_num_t idx
= 0; idx
< tsize
; idx
++) {
1614 ipc_entry_t entry
= &table
[idx
];
1616 /* only receive rights can be members of port sets */
1617 if ((entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) != MACH_PORT_TYPE_NONE
) {
1618 ipc_port_t port
= ip_object_to_port(entry
->ie_object
);
1619 ipc_mqueue_t mq
= &port
->ip_messages
;
1621 assert(IP_VALID(port
));
1622 if (ip_active(port
) &&
1623 waitq_member(&mq
->imq_wait_queue
, wqset
)) {
1624 if (actual
< maxnames
) {
1625 names
[actual
] = mq
->imq_receiver_name
;
1632 is_read_unlock(space
);
1640 * Routine: ipc_mqueue_destroy_locked
1642 * Destroy a (non-set) message queue.
1643 * Set any blocked senders running.
1644 * Destroy the kmsgs in the queue.
1647 * Receivers were removed when the receive right was "changed"
1650 ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue
)
1652 ipc_kmsg_queue_t kmqueue
;
1654 boolean_t reap
= FALSE
;
1655 struct turnstile
*send_turnstile
= port_send_turnstile(ip_from_mq(mqueue
));
1657 assert(!imq_is_set(mqueue
));
1660 * rouse all blocked senders
1661 * (don't boost anyone - we're tearing this queue down)
1664 mqueue
->imq_fullwaiters
= FALSE
;
1666 if (send_turnstile
!= TURNSTILE_NULL
) {
1667 waitq_wakeup64_all(&send_turnstile
->ts_waitq
,
1670 WAITQ_ALL_PRIORITIES
);
1674 * Move messages from the specified queue to the per-thread
1675 * clean/drain queue while we have the mqueue lock.
1677 kmqueue
= &mqueue
->imq_messages
;
1678 while ((kmsg
= ipc_kmsg_dequeue(kmqueue
)) != IKM_NULL
) {
1680 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(mqueue
->imq_fport
)) {
1681 flipc_msg_ack(kmsg
->ikm_node
, mqueue
, TRUE
);
1685 first
= ipc_kmsg_delayed_destroy(kmsg
);
1692 * Wipe out message count, both for messages about to be
1693 * reaped and for reserved space for (previously) woken senders.
1694 * This is the indication to them that their reserved space is gone
1695 * (the mqueue was destroyed).
1697 mqueue
->imq_msgcount
= 0;
1699 /* invalidate the waitq for subsequent mqueue operations */
1700 waitq_invalidate_locked(&mqueue
->imq_wait_queue
);
1702 /* clear out any preposting we may have done */
1703 waitq_clear_prepost_locked(&mqueue
->imq_wait_queue
);
1706 * assert that we are destroying / invalidating a queue that's
1707 * not a member of any other queue.
1709 assert(mqueue
->imq_preposts
== 0);
1710 assert(mqueue
->imq_in_pset
== 0);
1716 * Routine: ipc_mqueue_set_qlimit
1718 * Changes a message queue limit; the maximum number
1719 * of messages which may be queued.
1725 ipc_mqueue_set_qlimit(
1726 ipc_mqueue_t mqueue
,
1727 mach_port_msgcount_t qlimit
)
1729 assert(qlimit
<= MACH_PORT_QLIMIT_MAX
);
1731 /* wake up senders allowed by the new qlimit */
1733 if (qlimit
> mqueue
->imq_qlimit
) {
1734 mach_port_msgcount_t i
, wakeup
;
1735 struct turnstile
*send_turnstile
= port_send_turnstile(ip_from_mq(mqueue
));
1737 /* caution: wakeup, qlimit are unsigned */
1738 wakeup
= qlimit
- mqueue
->imq_qlimit
;
1740 for (i
= 0; i
< wakeup
; i
++) {
1742 * boost the priority of the awoken thread
1743 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
1744 * the message queue slot we've just reserved.
1746 * NOTE: this will never prepost
1748 if (send_turnstile
== TURNSTILE_NULL
||
1749 waitq_wakeup64_one(&send_turnstile
->ts_waitq
,
1752 WAITQ_PROMOTE_PRIORITY
) == KERN_NOT_WAITING
) {
1753 mqueue
->imq_fullwaiters
= FALSE
;
1756 mqueue
->imq_msgcount
++; /* give it to the awakened thread */
1759 mqueue
->imq_qlimit
= (uint16_t)qlimit
;
1764 * Routine: ipc_mqueue_set_seqno
1766 * Changes an mqueue's sequence number.
1768 * Caller holds a reference to the queue's containing object.
1771 ipc_mqueue_set_seqno(
1772 ipc_mqueue_t mqueue
,
1773 mach_port_seqno_t seqno
)
1776 mqueue
->imq_seqno
= seqno
;
1782 * Routine: ipc_mqueue_copyin
1784 * Convert a name in a space to a message queue.
1786 * Nothing locked. If successful, the caller gets a ref for
1787 * for the object. This ref ensures the continued existence of
1790 * MACH_MSG_SUCCESS Found a message queue.
1791 * MACH_RCV_INVALID_NAME The space is dead.
1792 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1793 * MACH_RCV_INVALID_NAME
1794 * The denoted right is not receive or port set.
1795 * MACH_RCV_IN_SET Receive right is a member of a set.
1801 mach_port_name_t name
,
1802 ipc_mqueue_t
*mqueuep
,
1803 ipc_object_t
*objectp
)
1806 ipc_entry_bits_t bits
;
1807 ipc_object_t object
;
1808 ipc_mqueue_t mqueue
;
1810 is_read_lock(space
);
1811 if (!is_active(space
)) {
1812 is_read_unlock(space
);
1813 return MACH_RCV_INVALID_NAME
;
1816 entry
= ipc_entry_lookup(space
, name
);
1817 if (entry
== IE_NULL
) {
1818 is_read_unlock(space
);
1819 return MACH_RCV_INVALID_NAME
;
1822 bits
= entry
->ie_bits
;
1823 object
= entry
->ie_object
;
1825 if (bits
& MACH_PORT_TYPE_RECEIVE
) {
1826 ipc_port_t port
= ip_object_to_port(object
);
1828 assert(port
!= IP_NULL
);
1831 require_ip_active(port
);
1832 assert(port
->ip_receiver_name
== name
);
1833 assert(port
->ip_receiver
== space
);
1834 is_read_unlock(space
);
1835 mqueue
= &port
->ip_messages
;
1836 } else if (bits
& MACH_PORT_TYPE_PORT_SET
) {
1837 ipc_pset_t pset
= ips_object_to_pset(object
);
1839 assert(pset
!= IPS_NULL
);
1842 assert(ips_active(pset
));
1843 is_read_unlock(space
);
1845 mqueue
= &pset
->ips_messages
;
1847 is_read_unlock(space
);
1848 /* guard exception if we never held the receive right in this entry */
1849 if ((bits
& MACH_PORT_TYPE_EX_RECEIVE
) == 0) {
1850 mach_port_guard_exception(name
, 0, 0, kGUARD_EXC_RCV_INVALID_NAME
);
1852 return MACH_RCV_INVALID_NAME
;
1856 * At this point, the object is locked and active,
1857 * the space is unlocked, and mqueue is initialized.
1860 io_reference(object
);
1865 return MACH_MSG_SUCCESS
;
1869 imq_lock(ipc_mqueue_t mq
)
1871 ipc_object_t object
= imq_to_object(mq
);
1872 ipc_object_validate(object
);
1873 waitq_lock(&(mq
)->imq_wait_queue
);
1877 imq_lock_try(ipc_mqueue_t mq
)
1879 ipc_object_t object
= imq_to_object(mq
);
1880 ipc_object_validate(object
);
1881 return waitq_lock_try(&(mq
)->imq_wait_queue
);