+ ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
+ ipc_kmsg_t first;
+ int result = 0;
+
+ /* mqueue locked by caller */
+ assert(imq_held(mqueue));
+
+ if (hint == NOTE_REVOKE) {
+ kn->kn_flags |= EV_EOF | EV_ONESHOT;
+ result = 1;
+ } else if (imq_is_valid(mqueue)) {
+ assert(!imq_is_set(mqueue));
+ if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
+ if (kn->kn_sfflags & MACH_RCV_MSG)
+ knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override);
+ result = 1;
+ }
+ }
+
+ return result;
+}
+
+static int
+filt_machporttouch(
+ struct knote *kn,
+ struct kevent_internal_s *kev)
+{
+ ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
+ ipc_kmsg_t first;
+ int result = 0;
+
+ imq_lock(mqueue);
+
+ /* copy in new settings and save off new input fflags */
+ kn->kn_sfflags = kev->fflags;
+ kn->kn_ext[0] = kev->ext[0];
+ kn->kn_ext[1] = kev->ext[1];
+ if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
+ kn->kn_udata = kev->udata;
+
+ /*
+ * If the mqueue is a valid port and there is a message
+ * that will be direct-received from the knote, update
+ * the knote qos based on the first message and trigger
+ * the event. If there are no more messages, reset the
+ * QoS to the value provided by the kevent.
+ */
+ if (imq_is_valid(mqueue) && !imq_is_set(mqueue) &&
+ (first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
+ if (kn->kn_sfflags & MACH_RCV_MSG)
+ knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override);
+ result = 1;
+ } else if (kn->kn_sfflags & MACH_RCV_MSG) {
+ knote_adjust_qos(kn,
+ MACH_MSG_PRIORITY_UNSPECIFIED,
+ MACH_MSG_PRIORITY_UNSPECIFIED);
+ }
+ imq_unlock(mqueue);
+
+ return result;
+}
+
+static int
+filt_machportprocess(
+ struct knote *kn,
+ struct filt_process_s *process_data,
+ struct kevent_internal_s *kev)
+{
+ ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
+ ipc_object_t object = mqueue_to_object(mqueue);
+ thread_t self = current_thread();
+ boolean_t used_filtprocess_data = FALSE;
+
+ wait_result_t wresult;
+ mach_msg_option_t option;
+ mach_vm_address_t addr;
+ mach_msg_size_t size;
+
+ imq_lock(mqueue);
+
+ /* Capture current state */
+ *kev = kn->kn_kevent;
+
+ /* If already deallocated/moved return one last EOF event */
+ if (kev->flags & EV_EOF) {
+ imq_unlock(mqueue);
+ return 1;
+ }
+
+ /*
+ * Only honor supported receive options. If no options are
+ * provided, just force a MACH_RCV_TOO_LARGE to detect the
+ * name of the port and sizeof the waiting message.
+ */
+ option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY|
+ MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER);
+
+ if (option & MACH_RCV_MSG) {
+ addr = (mach_vm_address_t) kn->kn_ext[0];
+ size = (mach_msg_size_t) kn->kn_ext[1];
+
+ /*
+ * If the kevent didn't specify a buffer and length, carve a buffer
+ * from the filter processing data according to the flags.
+ */
+ if (size == 0 && process_data != NULL) {
+ used_filtprocess_data = TRUE;
+
+ addr = (mach_vm_address_t)process_data->fp_data_out;
+ size = (mach_msg_size_t)process_data->fp_data_resid;
+ option |= (MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY);
+ if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA)
+ option |= MACH_RCV_STACK;
+ }
+ } else {
+ /* just detect the port name (if a set) and size of the first message */
+ option = MACH_RCV_LARGE;
+ addr = 0;
+ size = 0;
+ }
+
+ /* just use the reference from here on out */
+ io_reference(object);
+
+ /*
+ * Set up to receive a message or the notification of a
+ * too large message. But never allow this call to wait.
+ * If the user provided aditional options, like trailer
+ * options, pass those through here. But we don't support
+ * scatter lists through this interface.
+ */
+ self->ith_object = object;
+ self->ith_msg_addr = addr;
+ self->ith_rsize = size;
+ self->ith_msize = 0;
+ self->ith_option = option;
+ self->ith_receiver_name = MACH_PORT_NULL;
+ self->ith_continuation = NULL;
+ option |= MACH_RCV_TIMEOUT; // never wait
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+
+ wresult = ipc_mqueue_receive_on_thread(
+ mqueue,
+ option,
+ size, /* max_size */
+ 0, /* immediate timeout */
+ THREAD_INTERRUPTIBLE,
+ self);
+ /* mqueue unlocked */
+
+ /*
+ * If we timed out, or the process is exiting, just release the
+ * reference on the ipc_object and return zero.
+ */
+ if (wresult == THREAD_RESTART || self->ith_state == MACH_RCV_TIMED_OUT) {
+ io_release(object);
+ return 0;
+ }