+ * Update the send turnstile inheritor for a port.
+ *
+ * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
+ *
+ * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
+ * to push on thread doing the sync ipc.
+ *
+ * 2. a receive right is in transit, and pushes on the send turnstile of its
+ * destination mqueue.
+ *
+ * 3. port was passed as an exec watchport and port is pushing on main thread
+ * of the task.
+ *
+ * 4. a receive right has been stashed on a knote it was copied out "through",
+ * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
+ * for the special reply port)
+ *
+ * 5. a receive right has been stashed on a knote it was copied out "through",
+ * as the second or more copied out port (same as
+ * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
+ *
+ * 6. a receive right has been copied out as a part of sync bootstrap checkin
+ * and needs to push on thread doing the sync bootstrap checkin.
+ *
+ * 7. the receive right is monitored by a knote, and pushes on any that is
+ * registered on a workloop. filt_machport makes sure that if such a knote
+ * exists, it is kept as the first item in the knote list, so we never need
+ * to walk.
+ */
+void
+ipc_port_send_update_inheritor(
+ ipc_port_t port,
+ struct turnstile *send_turnstile,
+ turnstile_update_flags_t flags)
+{
+ ipc_mqueue_t mqueue = &port->ip_messages;
+ turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+ struct knote *kn;
+ turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
+
+ assert(imq_held(mqueue));
+
+ if (!ip_active(port)) {
+ /* this port is no longer active, it should not push anywhere */
+ } else if (port->ip_specialreply) {
+ /* Case 1. */
+ if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
+ inheritor = port->ip_messages.imq_srp_owner_thread;
+ inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+ }
+ } else if (port->ip_receiver_name == MACH_PORT_NULL &&
+ port->ip_destination != NULL) {
+ /* Case 2. */
+ inheritor = port_send_turnstile(port->ip_destination);
+ } else if (ipc_port_watchport_elem(port) != NULL) {
+ /* Case 3. */
+ if (prioritize_launch) {
+ assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
+ inheritor = ipc_port_get_watchport_inheritor(port);
+ inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+ }
+ } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
+ /* Case 4. */
+ inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
+ } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
+ /* Case 5. */
+ inheritor = mqueue->imq_inheritor_turnstile;
+ } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
+ /* Case 6. */
+ if (prioritize_launch) {
+ inheritor = port->ip_messages.imq_inheritor_thread_ref;
+ inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+ }
+ } else if ((kn = SLIST_FIRST(&mqueue->imq_klist))) {
+ /* Case 7. Push on a workloop that is interested */
+ if (filt_machport_kqueue_has_turnstile(kn)) {
+ assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
+ inheritor = filt_ipc_kqueue_turnstile(kn);
+ }
+ }
+
+ turnstile_update_inheritor(send_turnstile, inheritor,
+ flags | inheritor_flags);
+}
+
+/*
+ * Routine: ipc_port_send_turnstile_prepare
+ * Purpose:
+ * Get a reference on port's send turnstile, if
+ * port does not have a send turnstile then allocate one.
+ *
+ * Conditions:
+ * Nothing is locked.
+ */
+void
+ipc_port_send_turnstile_prepare(ipc_port_t port)
+{
+ struct turnstile *turnstile = TURNSTILE_NULL;
+ struct turnstile *send_turnstile = TURNSTILE_NULL;
+
+retry_alloc:
+ imq_lock(&port->ip_messages);
+
+ if (port_send_turnstile(port) == NULL ||
+ port_send_turnstile(port)->ts_port_ref == 0) {
+ if (turnstile == TURNSTILE_NULL) {
+ imq_unlock(&port->ip_messages);
+ turnstile = turnstile_alloc();
+ goto retry_alloc;
+ }
+
+ send_turnstile = turnstile_prepare((uintptr_t)port,
+ port_send_turnstile_address(port),
+ turnstile, TURNSTILE_SYNC_IPC);
+ turnstile = TURNSTILE_NULL;
+
+ ipc_port_send_update_inheritor(port, send_turnstile,
+ TURNSTILE_IMMEDIATE_UPDATE);
+
+ /* turnstile complete will be called in ipc_port_send_turnstile_complete */
+ }
+
+ /* Increment turnstile counter */
+ port_send_turnstile(port)->ts_port_ref++;
+ imq_unlock(&port->ip_messages);
+
+ if (send_turnstile) {
+ turnstile_update_inheritor_complete(send_turnstile,
+ TURNSTILE_INTERLOCK_NOT_HELD);
+ }
+ if (turnstile != TURNSTILE_NULL) {
+ turnstile_deallocate(turnstile);
+ }
+}
+
+
+/*
+ * Routine: ipc_port_send_turnstile_complete
+ * Purpose:
+ * Drop a ref on the port's send turnstile, if the
+ * ref becomes zero, deallocate the turnstile.
+ *
+ * Conditions:
+ * The space might be locked, use safe deallocate.
+ */
+void
+ipc_port_send_turnstile_complete(ipc_port_t port)
+{
+ struct turnstile *turnstile = TURNSTILE_NULL;
+
+ /* Drop turnstile count on dest port */
+ imq_lock(&port->ip_messages);
+
+ port_send_turnstile(port)->ts_port_ref--;
+ if (port_send_turnstile(port)->ts_port_ref == 0) {
+ turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
+ &turnstile, TURNSTILE_SYNC_IPC);
+ assert(turnstile != TURNSTILE_NULL);
+ }
+ imq_unlock(&port->ip_messages);
+ turnstile_cleanup();
+
+ if (turnstile != TURNSTILE_NULL) {
+ turnstile_deallocate_safe(turnstile);
+ turnstile = TURNSTILE_NULL;
+ }
+}
+
+/*
+ * Routine: ipc_port_rcv_turnstile
+ * Purpose:
+ * Get the port's receive turnstile
+ *
+ * Conditions:
+ * mqueue locked or thread waiting on turnstile is locked.
+ */
+static struct turnstile *
+ipc_port_rcv_turnstile(ipc_port_t port)
+{
+ return *port_rcv_turnstile_address(port);
+}
+
+
+/*
+ * Routine: ipc_port_link_special_reply_port
+ * Purpose:
+ * Link the special reply port with the destination port.
+ * Allocates turnstile to dest port.
+ *
+ * Conditions:
+ * Nothing is locked.
+ */
+void
+ipc_port_link_special_reply_port(
+ ipc_port_t special_reply_port,
+ ipc_port_t dest_port,
+ boolean_t sync_bootstrap_checkin)
+{
+ boolean_t drop_turnstile_ref = FALSE;
+
+ /* Check if dest_port needs a turnstile */
+ ipc_port_send_turnstile_prepare(dest_port);
+
+ /* Lock the special reply port and establish the linkage */
+ ip_lock(special_reply_port);
+ imq_lock(&special_reply_port->ip_messages);
+
+ if (sync_bootstrap_checkin && special_reply_port->ip_specialreply) {
+ special_reply_port->ip_sync_bootstrap_checkin = 1;
+ }
+
+ /* Check if we need to drop the acquired turnstile ref on dest port */
+ if (!special_reply_port->ip_specialreply ||
+ special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
+ special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
+ drop_turnstile_ref = TRUE;
+ } else {
+ /* take a reference on dest_port */
+ ip_reference(dest_port);
+ special_reply_port->ip_sync_inheritor_port = dest_port;
+ special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
+ }
+
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+
+ if (drop_turnstile_ref) {
+ ipc_port_send_turnstile_complete(dest_port);
+ }
+
+ return;
+}
+
+#if DEVELOPMENT || DEBUG
+inline void
+ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
+{
+ special_reply_port->ip_srp_lost_link = 0;
+ special_reply_port->ip_srp_msg_sent = 0;
+}
+
+static inline void
+ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
+{
+ if (special_reply_port->ip_specialreply == 1) {
+ special_reply_port->ip_srp_msg_sent = 0;
+ }
+}
+
+inline void
+ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
+{
+ if (special_reply_port->ip_specialreply == 1) {
+ special_reply_port->ip_srp_msg_sent = 1;
+ }
+}
+
+static inline void
+ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
+{
+ if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
+ special_reply_port->ip_srp_lost_link = 1;
+ }
+}
+
+#else /* DEVELOPMENT || DEBUG */
+inline void
+ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+
+static inline void
+ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+
+inline void
+ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+
+static inline void
+ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+#endif /* DEVELOPMENT || DEBUG */
+
+/*
+ * Routine: ipc_port_adjust_special_reply_port_locked
+ * Purpose:
+ * If the special port has a turnstile, update its inheritor.
+ * Condition:
+ * Special reply port locked on entry.
+ * Special reply port unlocked on return.
+ * The passed in port is a special reply port.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_special_reply_port_locked(
+ ipc_port_t special_reply_port,
+ struct knote *kn,
+ uint8_t flags,
+ boolean_t get_turnstile)
+{
+ ipc_port_t dest_port = IPC_PORT_NULL;
+ int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
+ turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+ struct turnstile *ts = TURNSTILE_NULL;
+
+ ip_lock_held(special_reply_port); // ip_sync_link_state is touched
+ imq_lock(&special_reply_port->ip_messages);
+
+ if (!special_reply_port->ip_specialreply) {
+ // only mach_msg_receive_results_complete() calls this with any port
+ assert(get_turnstile);
+ goto not_special;
+ }
+
+ if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
+ ipc_special_reply_port_msg_sent_reset(special_reply_port);
+ }
+
+ if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
+ special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
+ }
+
+ if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
+ special_reply_port->ip_sync_bootstrap_checkin = 0;
+ }
+
+ /* Check if the special reply port is marked non-special */
+ if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
+not_special:
+ if (get_turnstile) {
+ turnstile_complete((uintptr_t)special_reply_port,
+ port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
+ }
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+ if (get_turnstile) {
+ turnstile_cleanup();
+ }
+ return;
+ }
+
+ if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
+ if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
+ inheritor = filt_machport_stash_port(kn, special_reply_port,
+ &sync_link_state);
+ }
+ } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
+ sync_link_state = PORT_SYNC_LINK_ANY;
+ }
+
+ /* Check if need to break linkage */
+ if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
+ special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+ return;
+ }
+
+ switch (special_reply_port->ip_sync_link_state) {
+ case PORT_SYNC_LINK_PORT:
+ dest_port = special_reply_port->ip_sync_inheritor_port;
+ special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_KNOTE:
+ special_reply_port->ip_sync_inheritor_knote = NULL;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_STASH:
+ special_reply_port->ip_sync_inheritor_ts = NULL;
+ break;
+ }
+
+ special_reply_port->ip_sync_link_state = sync_link_state;
+
+ switch (sync_link_state) {
+ case PORT_SYNC_LINK_WORKLOOP_KNOTE:
+ special_reply_port->ip_sync_inheritor_knote = kn;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_STASH:
+ special_reply_port->ip_sync_inheritor_ts = inheritor;
+ break;
+ case PORT_SYNC_LINK_NO_LINKAGE:
+ if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
+ ipc_special_reply_port_lost_link(special_reply_port);
+ }
+ break;
+ }
+
+ /* Get thread's turnstile donated to special reply port */
+ if (get_turnstile) {
+ turnstile_complete((uintptr_t)special_reply_port,
+ port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
+ } else {
+ ts = ipc_port_rcv_turnstile(special_reply_port);
+ if (ts) {
+ turnstile_reference(ts);
+ ipc_port_recv_update_inheritor(special_reply_port, ts,
+ TURNSTILE_IMMEDIATE_UPDATE);
+ }
+ }
+
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+
+ if (get_turnstile) {
+ turnstile_cleanup();
+ } else if (ts) {
+ /* Call turnstile cleanup after dropping the interlock */
+ turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
+ turnstile_deallocate_safe(ts);
+ }
+
+ /* Release the ref on the dest port and its turnstile */
+ if (dest_port) {
+ ipc_port_send_turnstile_complete(dest_port);
+ /* release the reference on the dest port */
+ ip_release(dest_port);
+ }
+}
+
+/*
+ * Routine: ipc_port_adjust_special_reply_port
+ * Purpose:
+ * If the special port has a turnstile, update its inheritor.
+ * Condition:
+ * Nothing locked.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_special_reply_port(
+ ipc_port_t port,
+ uint8_t flags)
+{
+ if (port->ip_specialreply) {
+ ip_lock(port);
+ ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
+ }
+}
+
+/*
+ * Routine: ipc_port_adjust_sync_link_state_locked
+ * Purpose:
+ * Update the sync link state of the port and the
+ * turnstile inheritor.
+ * Condition:
+ * Port and mqueue locked on entry.
+ * Port and mqueue locked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_sync_link_state_locked(
+ ipc_port_t port,
+ int sync_link_state,
+ turnstile_inheritor_t inheritor)
+{
+ switch (port->ip_sync_link_state) {
+ case PORT_SYNC_LINK_RCV_THREAD:
+ /* deallocate the thread reference for the inheritor */
+ thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
+ /* Fall through */
+
+ default:
+ klist_init(&port->ip_messages.imq_klist);
+ }
+
+ switch (sync_link_state) {
+ case PORT_SYNC_LINK_WORKLOOP_KNOTE:
+ port->ip_messages.imq_inheritor_knote = inheritor;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_STASH:
+ port->ip_messages.imq_inheritor_turnstile = inheritor;
+ break;
+ case PORT_SYNC_LINK_RCV_THREAD:
+ /* The thread could exit without clearing port state, take a thread ref */
+ thread_reference((thread_t)inheritor);
+ port->ip_messages.imq_inheritor_thread_ref = inheritor;
+ break;
+ default:
+ klist_init(&port->ip_messages.imq_klist);
+ sync_link_state = PORT_SYNC_LINK_ANY;
+ }
+
+ port->ip_sync_link_state = sync_link_state;
+}
+
+
+/*
+ * Routine: ipc_port_adjust_port_locked
+ * Purpose:
+ * If the port has a turnstile, update its inheritor.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_port_locked(
+ ipc_port_t port,
+ struct knote *kn,
+ boolean_t sync_bootstrap_checkin)
+{
+ int sync_link_state = PORT_SYNC_LINK_ANY;
+ turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+
+ ip_lock_held(port); // ip_sync_link_state is touched
+ imq_held(&port->ip_messages);
+
+ assert(!port->ip_specialreply);
+
+ if (kn) {
+ inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
+ if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
+ inheritor = kn;
+ }
+ } else if (sync_bootstrap_checkin) {
+ inheritor = current_thread();
+ sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
+ }
+
+ ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
+ port->ip_sync_bootstrap_checkin = 0;
+
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+}
+
+/*
+ * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
+ * Purpose:
+ * If the port is pushing on rcv thread, clear it.
+ * Condition:
+ * Port locked on entry
+ * mqueue is not locked.
+ * Port unlocked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_clear_sync_rcv_thread_boost_locked(
+ ipc_port_t port)
+{
+ ip_lock_held(port); // ip_sync_link_state is touched
+
+ if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
+ ip_unlock(port);
+ return;
+ }
+
+ imq_lock(&port->ip_messages);
+ ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
+
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+}
+
+/*
+ * Routine: ipc_port_add_watchport_elem_locked
+ * Purpose:
+ * Transfer the turnstile boost of watchport to task calling exec.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * KERN_SUCESS on success.
+ * KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_add_watchport_elem_locked(
+ ipc_port_t port,
+ struct task_watchport_elem *watchport_elem,
+ struct task_watchport_elem **old_elem)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ /* Watchport boost only works for non-special active ports mapped in an ipc space */
+ if (!ip_active(port) || port->ip_specialreply ||
+ port->ip_receiver_name == MACH_PORT_NULL) {
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return KERN_FAILURE;
+ }
+
+ if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
+ /* Sever the linkage if the port was pushing on knote */
+ ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
+ }
+
+ *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
+
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
+ * Purpose:
+ * Remove the turnstile boost of watchport and recompute the push.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * KERN_SUCESS on success.
+ * KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_clear_watchport_elem_internal_conditional_locked(
+ ipc_port_t port,
+ struct task_watchport_elem *watchport_elem)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ if (ipc_port_watchport_elem(port) != watchport_elem) {
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return KERN_FAILURE;
+ }
+
+ ipc_port_clear_watchport_elem_internal(port);
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_replace_watchport_elem_conditional_locked
+ * Purpose:
+ * Replace the turnstile boost of watchport and recompute the push.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * KERN_SUCESS on success.
+ * KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_replace_watchport_elem_conditional_locked(
+ ipc_port_t port,
+ struct task_watchport_elem *old_watchport_elem,
+ struct task_watchport_elem *new_watchport_elem)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ if (ipc_port_watchport_elem(port) != old_watchport_elem) {
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return KERN_FAILURE;
+ }
+
+ ipc_port_update_watchport_elem(port, new_watchport_elem);
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_clear_watchport_elem_internal
+ * Purpose:
+ * Remove the turnstile boost of watchport.
+ * Condition:
+ * Port locked on entry.
+ * Port locked on return.
+ * Returns:
+ * Old task_watchport_elem returned.
+ */
+struct task_watchport_elem *
+ipc_port_clear_watchport_elem_internal(
+ ipc_port_t port)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ return ipc_port_update_watchport_elem(port, NULL);
+}
+
+/*
+ * Routine: ipc_port_send_turnstile_recompute_push_locked
+ * Purpose:
+ * Update send turnstile inheritor of port and recompute the push.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * None.
+ */
+static void
+ipc_port_send_turnstile_recompute_push_locked(
+ ipc_port_t port)
+{
+ struct turnstile *send_turnstile = port_send_turnstile(port);
+ if (send_turnstile) {
+ turnstile_reference(send_turnstile);
+ ipc_port_send_update_inheritor(port, send_turnstile,
+ TURNSTILE_IMMEDIATE_UPDATE);
+ }
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+
+ if (send_turnstile) {
+ turnstile_update_inheritor_complete(send_turnstile,
+ TURNSTILE_INTERLOCK_NOT_HELD);
+ turnstile_deallocate_safe(send_turnstile);
+ }
+}
+
+/*
+ * Routine: ipc_port_get_watchport_inheritor
+ * Purpose:
+ * Returns inheritor for watchport.
+ *
+ * Conditions:
+ * mqueue locked.
+ * Returns:
+ * watchport inheritor.
+ */
+static thread_t
+ipc_port_get_watchport_inheritor(
+ ipc_port_t port)
+{
+ imq_held(&port->ip_messages);
+ return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
+}
+
+/*
+ * Routine: ipc_port_impcount_delta
+ * Purpose:
+ * Adjust only the importance count associated with a port.
+ * If there are any adjustments to be made to receiver task,
+ * those are handled elsewhere.
+ *
+ * For now, be defensive during deductions to make sure the
+ * impcount for the port doesn't underflow zero. This will
+ * go away when the port boost addition is made atomic (see
+ * note in ipc_port_importance_delta()).
+ * Conditions:
+ * The port is referenced and locked.
+ * Nothing else is locked.
+ */
+mach_port_delta_t
+ipc_port_impcount_delta(
+ ipc_port_t port,
+ mach_port_delta_t delta,
+ ipc_port_t __unused base)
+{
+ mach_port_delta_t absdelta;
+
+ if (!ip_active(port)) {
+ return 0;
+ }
+
+ /* adding/doing nothing is easy */
+ if (delta >= 0) {
+ port->ip_impcount += delta;
+ return delta;
+ }
+
+ absdelta = 0 - delta;
+ if (port->ip_impcount >= absdelta) {
+ port->ip_impcount -= absdelta;
+ return delta;
+ }
+
+#if (DEVELOPMENT || DEBUG)
+ if (port->ip_receiver_name != MACH_PORT_NULL) {
+ task_t target_task = port->ip_receiver->is_task;
+ ipc_importance_task_t target_imp = target_task->task_imp_base;
+ const char *target_procname;
+ int target_pid;
+
+ if (target_imp != IIT_NULL) {
+ target_procname = target_imp->iit_procname;
+ target_pid = target_imp->iit_bsd_pid;
+ } else {
+ target_procname = "unknown";
+ target_pid = -1;
+ }
+ printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
+ "dropping %d assertion(s) but port only has %d remaining.\n",
+ port->ip_receiver_name,
+ target_pid, target_procname,
+ absdelta, port->ip_impcount);
+ } else if (base != IP_NULL) {
+ task_t target_task = base->ip_receiver->is_task;
+ ipc_importance_task_t target_imp = target_task->task_imp_base;
+ const char *target_procname;
+ int target_pid;
+
+ if (target_imp != IIT_NULL) {
+ target_procname = target_imp->iit_procname;
+ target_pid = target_imp->iit_bsd_pid;
+ } else {
+ target_procname = "unknown";
+ target_pid = -1;
+ }
+ printf("Over-release of importance assertions for port 0x%lx "
+ "enqueued on port 0x%x with receiver pid %d (%s), "
+ "dropping %d assertion(s) but port only has %d remaining.\n",
+ (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
+ base->ip_receiver_name,
+ target_pid, target_procname,
+ absdelta, port->ip_impcount);
+ }
+#endif
+
+ delta = 0 - port->ip_impcount;
+ port->ip_impcount = 0;
+ return delta;
+}
+
+/*
+ * Routine: ipc_port_importance_delta_internal
+ * Purpose:
+ * Adjust the importance count through the given port.
+ * If the port is in transit, apply the delta throughout
+ * the chain. Determine if the there is a task at the
+ * base of the chain that wants/needs to be adjusted,
+ * and if so, apply the delta.
+ * Conditions:
+ * The port is referenced and locked on entry.
+ * Importance may be locked.
+ * Nothing else is locked.
+ * The lock may be dropped on exit.
+ * Returns TRUE if lock was dropped.
+ */
+#if IMPORTANCE_INHERITANCE
+
+boolean_t
+ipc_port_importance_delta_internal(
+ ipc_port_t port,
+ natural_t options,
+ mach_port_delta_t *deltap,
+ ipc_importance_task_t *imp_task)
+{
+ ipc_port_t next, base;
+ boolean_t dropped = FALSE;
+
+ *imp_task = IIT_NULL;
+
+ if (*deltap == 0) {
+ return FALSE;
+ }
+
+ assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
+
+ base = port;
+
+ /* if port is in transit, have to search for end of chain */
+ if (ip_active(port) &&
+ port->ip_destination != IP_NULL &&
+ port->ip_receiver_name == MACH_PORT_NULL) {
+ dropped = TRUE;
+
+ ip_unlock(port);
+ ipc_port_multiple_lock(); /* massive serialization */
+ ip_lock(base);
+
+ while (ip_active(base) &&
+ base->ip_destination != IP_NULL &&
+ base->ip_receiver_name == MACH_PORT_NULL) {
+ base = base->ip_destination;
+ ip_lock(base);
+ }
+ ipc_port_multiple_unlock();
+ }
+
+ /*
+ * If the port lock is dropped b/c the port is in transit, there is a
+ * race window where another thread can drain messages and/or fire a
+ * send possible notification before we get here.
+ *
+ * We solve this race by checking to see if our caller armed the send
+ * possible notification, whether or not it's been fired yet, and
+ * whether or not we've already set the port's ip_spimportant bit. If
+ * we don't need a send-possible boost, then we'll just apply a
+ * harmless 0-boost to the port.
+ */
+ if (options & IPID_OPTION_SENDPOSSIBLE) {
+ assert(*deltap == 1);
+ if (port->ip_sprequests && port->ip_spimportant == 0) {
+ port->ip_spimportant = 1;
+ } else {
+ *deltap = 0;
+ }
+ }
+
+ /* unlock down to the base, adjusting boost(s) at each level */
+ for (;;) {
+ *deltap = ipc_port_impcount_delta(port, *deltap, base);
+
+ if (port == base) {
+ break;
+ }
+
+ /* port is in transit */
+ assert(port->ip_tempowner == 0);
+ next = port->ip_destination;
+ ip_unlock(port);
+ port = next;
+ }
+
+ /* find the task (if any) to boost according to the base */
+ if (ip_active(base)) {
+ if (base->ip_tempowner != 0) {
+ if (IIT_NULL != base->ip_imp_task) {
+ *imp_task = base->ip_imp_task;
+ }
+ /* otherwise don't boost */
+ } else if (base->ip_receiver_name != MACH_PORT_NULL) {
+ ipc_space_t space = base->ip_receiver;
+
+ /* only spaces with boost-accepting tasks */
+ if (space->is_task != TASK_NULL &&
+ ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
+ *imp_task = space->is_task->task_imp_base;
+ }
+ }
+ }
+
+ /*
+ * Only the base is locked. If we have to hold or drop task
+ * importance assertions, we'll have to drop that lock as well.
+ */
+ if (*imp_task != IIT_NULL) {
+ /* take a reference before unlocking base */
+ ipc_importance_task_reference(*imp_task);
+ }
+
+ if (dropped == TRUE) {
+ ip_unlock(base);
+ }
+
+ return dropped;
+}
+#endif /* IMPORTANCE_INHERITANCE */
+
+/*
+ * Routine: ipc_port_importance_delta
+ * Purpose:
+ * Adjust the importance count through the given port.
+ * If the port is in transit, apply the delta throughout
+ * the chain.
+ *
+ * If there is a task at the base of the chain that wants/needs
+ * to be adjusted, apply the delta.
+ * Conditions:
+ * The port is referenced and locked on entry.
+ * Nothing else is locked.
+ * The lock may be dropped on exit.
+ * Returns TRUE if lock was dropped.
+ */
+#if IMPORTANCE_INHERITANCE
+
+boolean_t
+ipc_port_importance_delta(
+ ipc_port_t port,
+ natural_t options,
+ mach_port_delta_t delta)
+{
+ ipc_importance_task_t imp_task = IIT_NULL;
+ boolean_t dropped;
+
+ dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
+
+ if (IIT_NULL == imp_task || delta == 0) {
+ return dropped;
+ }
+
+ if (!dropped) {
+ ip_unlock(port);
+ }
+
+ assert(ipc_importance_task_is_any_receiver_type(imp_task));
+
+ if (delta > 0) {
+ ipc_importance_task_hold_internal_assertion(imp_task, delta);
+ } else {
+ ipc_importance_task_drop_internal_assertion(imp_task, -delta);
+ }
+
+ ipc_importance_task_release(imp_task);
+ return TRUE;
+}
+#endif /* IMPORTANCE_INHERITANCE */
+
+/*
+ * Routine: ipc_port_make_send_locked
+ * Purpose:
+ * Make a naked send right from a receive right.
+ *
+ * Conditions:
+ * port locked and active.
+ */
+ipc_port_t
+ipc_port_make_send_locked(
+ ipc_port_t port)
+{
+ require_ip_active(port);
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ return port;
+}
+
+/*
+ * Routine: ipc_port_make_send
+ * Purpose:
+ * Make a naked send right from a receive right.
+ */
+
+ipc_port_t
+ipc_port_make_send(
+ ipc_port_t port)
+{
+ if (!IP_VALID(port)) {
+ return port;
+ }
+
+ ip_lock(port);
+ if (ip_active(port)) {
+ ipc_port_make_send_locked(port);
+ ip_unlock(port);
+ return port;
+ }
+ ip_unlock(port);
+ return IP_DEAD;
+}
+
+/*
+ * Routine: ipc_port_copy_send_locked
+ * Purpose:
+ * Make a naked send right from another naked send right.
+ * Conditions:
+ * port locked and active.
+ */
+void
+ipc_port_copy_send_locked(
+ ipc_port_t port)
+{
+ assert(port->ip_srights > 0);
+ port->ip_srights++;
+ ip_reference(port);
+}
+
+/*
+ * Routine: ipc_port_copy_send
+ * Purpose:
+ * Make a naked send right from another naked send right.
+ * IP_NULL -> IP_NULL
+ * IP_DEAD -> IP_DEAD
+ * dead port -> IP_DEAD
+ * live port -> port + ref
+ * Conditions:
+ * Nothing locked except possibly a space.
+ */