+ case PORT_SYNC_LINK_WORKLOOP_STASH:
+ inheritor = port->ip_sync_inheritor_ts;
+ break;
+ }
+ }
+
+ turnstile_update_inheritor(rcv_turnstile, inheritor,
+ flags | TURNSTILE_INHERITOR_TURNSTILE);
+}
+
+/*
+ * Update the send turnstile inheritor for a port.
+ *
+ * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
+ *
+ * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
+ * to push on thread doing the sync ipc.
+ *
+ * 2. a receive right is in transit, and pushes on the send turnstile of its
+ * destination mqueue.
+ *
+ * 3. port was passed as an exec watchport and port is pushing on main thread
+ * of the task.
+ *
+ * 4. a receive right has been stashed on a knote it was copied out "through",
+ * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
+ * for the special reply port)
+ *
+ * 5. a receive right has been stashed on a knote it was copied out "through",
+ * as the second or more copied out port (same as
+ * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
+ *
+ * 6. a receive right has been copied out as a part of sync bootstrap checkin
+ * and needs to push on thread doing the sync bootstrap checkin.
+ *
+ * 7. the receive right is monitored by a knote, and pushes on any that is
+ * registered on a workloop. filt_machport makes sure that if such a knote
+ * exists, it is kept as the first item in the knote list, so we never need
+ * to walk.
+ */
+void
+ipc_port_send_update_inheritor(
+ ipc_port_t port,
+ struct turnstile *send_turnstile,
+ turnstile_update_flags_t flags)
+{
+ ipc_mqueue_t mqueue = &port->ip_messages;
+ turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+ struct knote *kn;
+ turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
+
+ assert(imq_held(mqueue));
+
+ if (!ip_active(port)) {
+ /* this port is no longer active, it should not push anywhere */
+ } else if (port->ip_specialreply) {
+ /* Case 1. */
+ if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
+ inheritor = port->ip_messages.imq_srp_owner_thread;
+ inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+ }
+ } else if (port->ip_receiver_name == MACH_PORT_NULL &&
+ port->ip_destination != NULL) {
+ /* Case 2. */
+ inheritor = port_send_turnstile(port->ip_destination);
+ } else if (ipc_port_watchport_elem(port) != NULL) {
+ /* Case 3. */
+ if (prioritize_launch) {
+ assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
+ inheritor = ipc_port_get_watchport_inheritor(port);
+ inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+ }
+ } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
+ /* Case 4. */
+ inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
+ } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
+ /* Case 5. */
+ inheritor = mqueue->imq_inheritor_turnstile;
+ } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
+ /* Case 6. */
+ if (prioritize_launch) {
+ inheritor = port->ip_messages.imq_inheritor_thread_ref;
+ inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+ }
+ } else if ((kn = SLIST_FIRST(&mqueue->imq_klist))) {
+ /* Case 7. Push on a workloop that is interested */
+ if (filt_machport_kqueue_has_turnstile(kn)) {
+ assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
+ inheritor = filt_ipc_kqueue_turnstile(kn);
+ }
+ }
+
+ turnstile_update_inheritor(send_turnstile, inheritor,
+ flags | inheritor_flags);
+}
+
+/*
+ * Routine: ipc_port_send_turnstile_prepare
+ * Purpose:
+ * Get a reference on port's send turnstile, if
+ * port does not have a send turnstile then allocate one.
+ *
+ * Conditions:
+ * Nothing is locked.
+ */
+void
+ipc_port_send_turnstile_prepare(ipc_port_t port)
+{
+ struct turnstile *turnstile = TURNSTILE_NULL;
+ struct turnstile *send_turnstile = TURNSTILE_NULL;
+
+retry_alloc:
+ imq_lock(&port->ip_messages);
+
+ if (port_send_turnstile(port) == NULL ||
+ port_send_turnstile(port)->ts_port_ref == 0) {
+ if (turnstile == TURNSTILE_NULL) {
+ imq_unlock(&port->ip_messages);
+ turnstile = turnstile_alloc();
+ goto retry_alloc;
+ }
+
+ send_turnstile = turnstile_prepare((uintptr_t)port,
+ port_send_turnstile_address(port),
+ turnstile, TURNSTILE_SYNC_IPC);
+ turnstile = TURNSTILE_NULL;
+
+ ipc_port_send_update_inheritor(port, send_turnstile,
+ TURNSTILE_IMMEDIATE_UPDATE);
+
+ /* turnstile complete will be called in ipc_port_send_turnstile_complete */
+ }
+
+ /* Increment turnstile counter */
+ port_send_turnstile(port)->ts_port_ref++;
+ imq_unlock(&port->ip_messages);
+
+ if (send_turnstile) {
+ turnstile_update_inheritor_complete(send_turnstile,
+ TURNSTILE_INTERLOCK_NOT_HELD);
+ }
+ if (turnstile != TURNSTILE_NULL) {
+ turnstile_deallocate(turnstile);
+ }
+}
+
+
+/*
+ * Routine: ipc_port_send_turnstile_complete
+ * Purpose:
+ * Drop a ref on the port's send turnstile, if the
+ * ref becomes zero, deallocate the turnstile.
+ *
+ * Conditions:
+ * The space might be locked, use safe deallocate.
+ */
+void
+ipc_port_send_turnstile_complete(ipc_port_t port)
+{
+ struct turnstile *turnstile = TURNSTILE_NULL;
+
+ /* Drop turnstile count on dest port */
+ imq_lock(&port->ip_messages);
+
+ port_send_turnstile(port)->ts_port_ref--;
+ if (port_send_turnstile(port)->ts_port_ref == 0) {
+ turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
+ &turnstile, TURNSTILE_SYNC_IPC);
+ assert(turnstile != TURNSTILE_NULL);
+ }
+ imq_unlock(&port->ip_messages);
+ turnstile_cleanup();
+
+ if (turnstile != TURNSTILE_NULL) {
+ turnstile_deallocate_safe(turnstile);
+ turnstile = TURNSTILE_NULL;
+ }
+}
+
+/*
+ * Routine: ipc_port_rcv_turnstile
+ * Purpose:
+ * Get the port's receive turnstile
+ *
+ * Conditions:
+ * mqueue locked or thread waiting on turnstile is locked.
+ */
+static struct turnstile *
+ipc_port_rcv_turnstile(ipc_port_t port)
+{
+ return *port_rcv_turnstile_address(port);
+}
+
+
+/*
+ * Routine: ipc_port_link_special_reply_port
+ * Purpose:
+ * Link the special reply port with the destination port.
+ * Allocates turnstile to dest port.
+ *
+ * Conditions:
+ * Nothing is locked.
+ */
+void
+ipc_port_link_special_reply_port(
+ ipc_port_t special_reply_port,
+ ipc_port_t dest_port,
+ boolean_t sync_bootstrap_checkin)
+{
+ boolean_t drop_turnstile_ref = FALSE;
+
+ /* Check if dest_port needs a turnstile */
+ ipc_port_send_turnstile_prepare(dest_port);
+
+ /* Lock the special reply port and establish the linkage */
+ ip_lock(special_reply_port);
+ imq_lock(&special_reply_port->ip_messages);
+
+ if (sync_bootstrap_checkin && special_reply_port->ip_specialreply) {
+ special_reply_port->ip_sync_bootstrap_checkin = 1;
+ }
+
+ /* Check if we need to drop the acquired turnstile ref on dest port */
+ if (!special_reply_port->ip_specialreply ||
+ special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
+ special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
+ drop_turnstile_ref = TRUE;
+ } else {
+ /* take a reference on dest_port */
+ ip_reference(dest_port);
+ special_reply_port->ip_sync_inheritor_port = dest_port;
+ special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
+ }
+
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+
+ if (drop_turnstile_ref) {
+ ipc_port_send_turnstile_complete(dest_port);
+ }
+
+ return;
+}
+
+#if DEVELOPMENT || DEBUG
+inline void
+ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
+{
+ special_reply_port->ip_srp_lost_link = 0;
+ special_reply_port->ip_srp_msg_sent = 0;
+}
+
+static inline void
+ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
+{
+ if (special_reply_port->ip_specialreply == 1) {
+ special_reply_port->ip_srp_msg_sent = 0;
+ }
+}
+
+inline void
+ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
+{
+ if (special_reply_port->ip_specialreply == 1) {
+ special_reply_port->ip_srp_msg_sent = 1;
+ }
+}
+
+static inline void
+ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
+{
+ if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
+ special_reply_port->ip_srp_lost_link = 1;
+ }
+}
+
+#else /* DEVELOPMENT || DEBUG */
+inline void
+ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+
+static inline void
+ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+
+inline void
+ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+
+static inline void
+ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
+{
+ return;
+}
+#endif /* DEVELOPMENT || DEBUG */
+
+/*
+ * Routine: ipc_port_adjust_special_reply_port_locked
+ * Purpose:
+ * If the special port has a turnstile, update its inheritor.
+ * Condition:
+ * Special reply port locked on entry.
+ * Special reply port unlocked on return.
+ * The passed in port is a special reply port.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_special_reply_port_locked(
+ ipc_port_t special_reply_port,
+ struct knote *kn,
+ uint8_t flags,
+ boolean_t get_turnstile)
+{
+ ipc_port_t dest_port = IPC_PORT_NULL;
+ int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
+ turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+ struct turnstile *ts = TURNSTILE_NULL;
+
+ ip_lock_held(special_reply_port); // ip_sync_link_state is touched
+ imq_lock(&special_reply_port->ip_messages);
+
+ if (!special_reply_port->ip_specialreply) {
+ // only mach_msg_receive_results_complete() calls this with any port
+ assert(get_turnstile);
+ goto not_special;
+ }
+
+ if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
+ ipc_special_reply_port_msg_sent_reset(special_reply_port);
+ }
+
+ if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
+ special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
+ }
+
+ if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
+ special_reply_port->ip_sync_bootstrap_checkin = 0;
+ }
+
+ /* Check if the special reply port is marked non-special */
+ if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
+not_special:
+ if (get_turnstile) {
+ turnstile_complete((uintptr_t)special_reply_port,
+ port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
+ }
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+ if (get_turnstile) {
+ turnstile_cleanup();
+ }
+ return;
+ }
+
+ if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
+ if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
+ inheritor = filt_machport_stash_port(kn, special_reply_port,
+ &sync_link_state);
+ }
+ } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
+ sync_link_state = PORT_SYNC_LINK_ANY;
+ }
+
+ /* Check if need to break linkage */
+ if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
+ special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+ return;
+ }
+
+ switch (special_reply_port->ip_sync_link_state) {
+ case PORT_SYNC_LINK_PORT:
+ dest_port = special_reply_port->ip_sync_inheritor_port;
+ special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_KNOTE:
+ special_reply_port->ip_sync_inheritor_knote = NULL;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_STASH:
+ special_reply_port->ip_sync_inheritor_ts = NULL;
+ break;
+ }
+
+ special_reply_port->ip_sync_link_state = sync_link_state;
+
+ switch (sync_link_state) {
+ case PORT_SYNC_LINK_WORKLOOP_KNOTE:
+ special_reply_port->ip_sync_inheritor_knote = kn;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_STASH:
+ special_reply_port->ip_sync_inheritor_ts = inheritor;
+ break;
+ case PORT_SYNC_LINK_NO_LINKAGE:
+ if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
+ ipc_special_reply_port_lost_link(special_reply_port);
+ }
+ break;
+ }
+
+ /* Get thread's turnstile donated to special reply port */
+ if (get_turnstile) {
+ turnstile_complete((uintptr_t)special_reply_port,
+ port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
+ } else {
+ ts = ipc_port_rcv_turnstile(special_reply_port);
+ if (ts) {
+ turnstile_reference(ts);
+ ipc_port_recv_update_inheritor(special_reply_port, ts,
+ TURNSTILE_IMMEDIATE_UPDATE);
+ }
+ }
+
+ imq_unlock(&special_reply_port->ip_messages);
+ ip_unlock(special_reply_port);
+
+ if (get_turnstile) {
+ turnstile_cleanup();
+ } else if (ts) {
+ /* Call turnstile cleanup after dropping the interlock */
+ turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
+ turnstile_deallocate_safe(ts);
+ }
+
+ /* Release the ref on the dest port and its turnstile */
+ if (dest_port) {
+ ipc_port_send_turnstile_complete(dest_port);
+ /* release the reference on the dest port */
+ ip_release(dest_port);
+ }
+}
+
+/*
+ * Routine: ipc_port_adjust_special_reply_port
+ * Purpose:
+ * If the special port has a turnstile, update its inheritor.
+ * Condition:
+ * Nothing locked.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_special_reply_port(
+ ipc_port_t port,
+ uint8_t flags)
+{
+ if (port->ip_specialreply) {
+ ip_lock(port);
+ ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
+ }
+}
+
+/*
+ * Routine: ipc_port_adjust_sync_link_state_locked
+ * Purpose:
+ * Update the sync link state of the port and the
+ * turnstile inheritor.
+ * Condition:
+ * Port and mqueue locked on entry.
+ * Port and mqueue locked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_sync_link_state_locked(
+ ipc_port_t port,
+ int sync_link_state,
+ turnstile_inheritor_t inheritor)
+{
+ switch (port->ip_sync_link_state) {
+ case PORT_SYNC_LINK_RCV_THREAD:
+ /* deallocate the thread reference for the inheritor */
+ thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
+ /* Fall through */
+
+ default:
+ klist_init(&port->ip_messages.imq_klist);
+ }
+
+ switch (sync_link_state) {
+ case PORT_SYNC_LINK_WORKLOOP_KNOTE:
+ port->ip_messages.imq_inheritor_knote = inheritor;
+ break;
+ case PORT_SYNC_LINK_WORKLOOP_STASH:
+ port->ip_messages.imq_inheritor_turnstile = inheritor;
+ break;
+ case PORT_SYNC_LINK_RCV_THREAD:
+ /* The thread could exit without clearing port state, take a thread ref */
+ thread_reference((thread_t)inheritor);
+ port->ip_messages.imq_inheritor_thread_ref = inheritor;
+ break;
+ default:
+ klist_init(&port->ip_messages.imq_klist);
+ sync_link_state = PORT_SYNC_LINK_ANY;
+ }
+
+ port->ip_sync_link_state = sync_link_state;
+}
+
+
+/*
+ * Routine: ipc_port_adjust_port_locked
+ * Purpose:
+ * If the port has a turnstile, update its inheritor.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_adjust_port_locked(
+ ipc_port_t port,
+ struct knote *kn,
+ boolean_t sync_bootstrap_checkin)
+{
+ int sync_link_state = PORT_SYNC_LINK_ANY;
+ turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+
+ ip_lock_held(port); // ip_sync_link_state is touched
+ imq_held(&port->ip_messages);
+
+ assert(!port->ip_specialreply);
+
+ if (kn) {
+ inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
+ if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
+ inheritor = kn;
+ }
+ } else if (sync_bootstrap_checkin) {
+ inheritor = current_thread();
+ sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
+ }
+
+ ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
+ port->ip_sync_bootstrap_checkin = 0;
+
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+}
+
+/*
+ * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
+ * Purpose:
+ * If the port is pushing on rcv thread, clear it.
+ * Condition:
+ * Port locked on entry
+ * mqueue is not locked.
+ * Port unlocked on return.
+ * Returns:
+ * None.
+ */
+void
+ipc_port_clear_sync_rcv_thread_boost_locked(
+ ipc_port_t port)
+{
+ ip_lock_held(port); // ip_sync_link_state is touched
+
+ if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
+ ip_unlock(port);
+ return;
+ }
+
+ imq_lock(&port->ip_messages);
+ ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
+
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+}
+
+/*
+ * Routine: ipc_port_add_watchport_elem_locked
+ * Purpose:
+ * Transfer the turnstile boost of watchport to task calling exec.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * KERN_SUCESS on success.
+ * KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_add_watchport_elem_locked(
+ ipc_port_t port,
+ struct task_watchport_elem *watchport_elem,
+ struct task_watchport_elem **old_elem)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ /* Watchport boost only works for non-special active ports mapped in an ipc space */
+ if (!ip_active(port) || port->ip_specialreply ||
+ port->ip_receiver_name == MACH_PORT_NULL) {
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return KERN_FAILURE;
+ }
+
+ if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
+ /* Sever the linkage if the port was pushing on knote */
+ ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
+ }
+
+ *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
+
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
+ * Purpose:
+ * Remove the turnstile boost of watchport and recompute the push.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * KERN_SUCESS on success.
+ * KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_clear_watchport_elem_internal_conditional_locked(
+ ipc_port_t port,
+ struct task_watchport_elem *watchport_elem)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ if (ipc_port_watchport_elem(port) != watchport_elem) {
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return KERN_FAILURE;
+ }
+
+ ipc_port_clear_watchport_elem_internal(port);
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_replace_watchport_elem_conditional_locked
+ * Purpose:
+ * Replace the turnstile boost of watchport and recompute the push.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * KERN_SUCESS on success.
+ * KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_replace_watchport_elem_conditional_locked(
+ ipc_port_t port,
+ struct task_watchport_elem *old_watchport_elem,
+ struct task_watchport_elem *new_watchport_elem)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ if (ipc_port_watchport_elem(port) != old_watchport_elem) {
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+ return KERN_FAILURE;
+ }
+
+ ipc_port_update_watchport_elem(port, new_watchport_elem);
+ ipc_port_send_turnstile_recompute_push_locked(port);
+ /* port and mqueue unlocked */
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_clear_watchport_elem_internal
+ * Purpose:
+ * Remove the turnstile boost of watchport.
+ * Condition:
+ * Port locked on entry.
+ * Port locked on return.
+ * Returns:
+ * Old task_watchport_elem returned.
+ */
+struct task_watchport_elem *
+ipc_port_clear_watchport_elem_internal(
+ ipc_port_t port)
+{
+ ip_lock_held(port);
+ imq_held(&port->ip_messages);
+
+ return ipc_port_update_watchport_elem(port, NULL);
+}
+
+/*
+ * Routine: ipc_port_send_turnstile_recompute_push_locked
+ * Purpose:
+ * Update send turnstile inheritor of port and recompute the push.
+ * Condition:
+ * Port locked on entry.
+ * Port unlocked on return.
+ * Returns:
+ * None.
+ */
+static void
+ipc_port_send_turnstile_recompute_push_locked(
+ ipc_port_t port)
+{
+ struct turnstile *send_turnstile = port_send_turnstile(port);
+ if (send_turnstile) {
+ turnstile_reference(send_turnstile);
+ ipc_port_send_update_inheritor(port, send_turnstile,
+ TURNSTILE_IMMEDIATE_UPDATE);
+ }
+ imq_unlock(&port->ip_messages);