+ipc_mqueue_release_peek_ref(ipc_mqueue_t mq)
+{
+ assert(!imq_is_set(mq));
+ assert(imq_held(mq));
+
+ /*
+ * clear any preposts this mq may have generated
+ * (which would cause subsequent immediate wakeups)
+ */
+ waitq_clear_prepost_locked(&mq->imq_wait_queue);
+
+ imq_unlock(mq);
+
+ /*
+ * release the port reference: we need to do this outside the lock
+ * because we might be holding the last port reference!
+ **/
+ ip_release_mq(mq);
+}
+
+/*
+ * peek at the contained port message queues, break prepost iteration as soon
+ * as we spot a message on one of the message queues referenced by the set's
+ * prepost list. No need to lock each message queue, as only the head of each
+ * queue is checked. If a message wasn't there before we entered here, no need
+ * to find it (if we do, great).
+ */
+static int
+mqueue_peek_iterator(void *ctx, struct waitq *waitq,
+ struct waitq_set *wqset)
+{
+ ipc_mqueue_t port_mq = (ipc_mqueue_t)waitq;
+ ipc_kmsg_queue_t kmsgs = &port_mq->imq_messages;
+
+ (void)ctx;
+ (void)wqset;
+
+ if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
+ return WQ_ITERATE_BREAK; /* break out of the prepost iteration */
+ }
+ return WQ_ITERATE_CONTINUE;
+}
+
+/*
+ * Routine: ipc_mqueue_set_peek
+ * Purpose:
+ * Peek at a message queue set to see if it has any ports
+ * with messages.
+ *
+ * Conditions:
+ * Locks may be held by callers, so this routine cannot block.
+ * Caller holds reference on the message queue.
+ */
+unsigned
+ipc_mqueue_set_peek(ipc_mqueue_t mq)
+{
+ int ret;
+
+ imq_lock(mq);
+
+ /*
+ * We may have raced with port destruction where the mqueue is marked
+ * as invalid. In that case, even though we don't have messages, we
+ * have an end-of-life event to deliver.
+ */
+ if (!imq_is_valid(mq)) {
+ return 1;
+ }
+
+ ret = waitq_set_iterate_preposts(&mq->imq_set_queue, NULL,
+ mqueue_peek_iterator);
+
+ imq_unlock(mq);
+
+ return ret == WQ_ITERATE_BREAK;
+}
+
+/*
+ * Routine: ipc_mqueue_set_gather_member_names
+ * Purpose:
+ * Discover all ports which are members of a given port set.
+ * Because the waitq linkage mechanism was redesigned to save
+ * significan amounts of memory, it no longer keeps back-pointers
+ * from a port set to a port. Therefore, we must iterate over all
+ * ports within a given IPC space and individually query them to
+ * see if they are members of the given set. Port names of ports
+ * found to be members of the given set will be gathered into the
+ * provided 'names' array. Actual returned names are limited to
+ * maxnames entries, but we keep counting the actual number of
+ * members to let the caller decide to retry if necessary.
+ *
+ * Conditions:
+ * Locks may be held by callers, so this routine cannot block.
+ * Caller holds reference on the message queue (via port set).
+ */
+void
+ipc_mqueue_set_gather_member_names(
+ ipc_space_t space,
+ ipc_mqueue_t set_mq,
+ ipc_entry_num_t maxnames,
+ mach_port_name_t *names,
+ ipc_entry_num_t *actualp)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t tsize;
+ struct waitq_set *wqset;
+ ipc_entry_num_t actual = 0;
+
+ assert(set_mq != IMQ_NULL);
+ wqset = &set_mq->imq_set_queue;
+
+ assert(space != IS_NULL);
+ is_read_lock(space);
+ if (!is_active(space)) {
+ is_read_unlock(space);
+ goto out;
+ }
+
+ if (!waitq_set_is_valid(wqset)) {
+ is_read_unlock(space);
+ goto out;
+ }
+
+ table = space->is_table;
+ tsize = space->is_table_size;
+ for (ipc_entry_num_t idx = 0; idx < tsize; idx++) {
+ ipc_entry_t entry = &table[idx];
+
+ /* only receive rights can be members of port sets */
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) != MACH_PORT_TYPE_NONE) {
+ ipc_port_t port = ip_object_to_port(entry->ie_object);
+ ipc_mqueue_t mq = &port->ip_messages;
+
+ assert(IP_VALID(port));
+ if (ip_active(port) &&
+ waitq_member(&mq->imq_wait_queue, wqset)) {
+ if (actual < maxnames) {
+ names[actual] = mq->imq_receiver_name;
+ }
+ actual++;
+ }
+ }
+ }
+
+ is_read_unlock(space);
+
+out:
+ *actualp = actual;
+}
+
+
+/*
+ * Routine: ipc_mqueue_destroy_locked
+ * Purpose:
+ * Destroy a (non-set) message queue.
+ * Set any blocked senders running.
+ * Destroy the kmsgs in the queue.
+ * Conditions:
+ * mqueue locked
+ * Receivers were removed when the receive right was "changed"
+ */
+boolean_t
+ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue)