]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/ipc/ipc_port.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_port.c
index ee9a7571e557e2bfb34c3c386cfe4ea9d16faac0..2b7c9217ca75050241a10d47ece164b46a1f0a6f 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  *
@@ -69,7 +69,6 @@
  *     Functions to manipulate IPC ports.
  */
 
-#include <zone_debug.h>
 #include <mach_assert.h>
 
 #include <mach/port.h>
@@ -84,6 +83,7 @@
 #include <ipc/ipc_entry.h>
 #include <ipc/ipc_space.h>
 #include <ipc/ipc_object.h>
+#include <ipc/ipc_right.h>
 #include <ipc/ipc_port.h>
 #include <ipc/ipc_pset.h>
 #include <ipc/ipc_kmsg.h>
 #include <ipc/ipc_notify.h>
 #include <ipc/ipc_table.h>
 #include <ipc/ipc_importance.h>
-#include <machine/machlimits.h>
+#include <machine/limits.h>
 #include <kern/turnstile.h>
+#include <kern/machine.h>
 
 #include <security/mac_mach_internal.h>
 
 #include <string.h>
 
-decl_lck_spin_data(, ipc_port_multiple_lock_data)
-ipc_port_timestamp_t    ipc_port_timestamp_data;
-int ipc_portbt;
+static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
+TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
+
+LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
+ipc_port_timestamp_t ipc_port_timestamp_data;
 
 #if     MACH_ASSERT
 void    ipc_port_init_debug(
@@ -114,6 +117,14 @@ void    ipc_port_callstack_init_debug(
 
 #endif  /* MACH_ASSERT */
 
+static void
+ipc_port_send_turnstile_recompute_push_locked(
+       ipc_port_t port);
+
+static thread_t
+ipc_port_get_watchport_inheritor(
+       ipc_port_t port);
+
 void
 ipc_port_release(ipc_port_t port)
 {
@@ -180,7 +191,7 @@ ipc_port_request_alloc(
        *importantp = FALSE;
 #endif /* IMPORTANCE_INHERITANCE */
 
-       assert(ip_active(port));
+       require_ip_active(port);
        assert(name != MACH_PORT_NULL);
        assert(soright != IP_NULL);
 
@@ -248,8 +259,7 @@ ipc_port_request_grow(
 {
        ipc_table_size_t its;
        ipc_port_request_t otable, ntable;
-
-       assert(ip_active(port));
+       require_ip_active(port);
 
        otable = port->ip_requests;
        if (otable == IPR_NULL) {
@@ -360,13 +370,13 @@ ipc_port_request_sparm(
        ipc_port_t                      port,
        __assert_only mach_port_name_t  name,
        ipc_port_request_index_t        index,
-       mach_msg_option_t       option,
-       mach_msg_priority_t override)
+       mach_msg_option_t               option,
+       mach_msg_priority_t             priority)
 {
        if (index != IE_REQ_NONE) {
                ipc_port_request_t ipr, table;
 
-               assert(ip_active(port));
+               require_ip_active(port);
 
                table = port->ip_requests;
                assert(table != IPR_NULL);
@@ -381,7 +391,15 @@ ipc_port_request_sparm(
 
                        if (option & MACH_SEND_OVERRIDE) {
                                /* apply override to message queue */
-                               ipc_mqueue_override_send(&port->ip_messages, override);
+                               mach_msg_qos_t qos_ovr;
+                               if (mach_msg_priority_is_pthread_priority(priority)) {
+                                       qos_ovr = _pthread_priority_thread_qos(priority);
+                               } else {
+                                       qos_ovr = mach_msg_priority_overide_qos(priority);
+                               }
+                               if (qos_ovr) {
+                                       ipc_mqueue_override_send(&port->ip_messages, qos_ovr);
+                               }
                        }
 
 #if IMPORTANCE_INHERITANCE
@@ -456,7 +474,7 @@ ipc_port_request_cancel(
        ipc_port_request_t ipr, table;
        ipc_port_t request = IP_NULL;
 
-       assert(ip_active(port));
+       require_ip_active(port);
        table = port->ip_requests;
        assert(table != IPR_NULL);
 
@@ -492,8 +510,7 @@ ipc_port_pdrequest(
        ipc_port_t      *previousp)
 {
        ipc_port_t previous;
-
-       assert(ip_active(port));
+       require_ip_active(port);
 
        previous = port->ip_pdrequest;
        port->ip_pdrequest = notify;
@@ -523,8 +540,7 @@ ipc_port_nsrequest(
 {
        ipc_port_t previous;
        mach_port_mscount_t mscount;
-
-       assert(ip_active(port));
+       require_ip_active(port);
 
        previous = port->ip_nsrequest;
        mscount = port->ip_mscount;
@@ -579,7 +595,7 @@ ipc_port_clear_receiver(
 
        /*
         * Send anyone waiting on the port's queue directly away.
-        * Also clear the mscount and seqno.
+        * Also clear the mscount, seqno, guard bits
         */
        imq_lock(mqueue);
        if (port->ip_receiver_name) {
@@ -590,6 +606,11 @@ ipc_port_clear_receiver(
        port->ip_mscount = 0;
        mqueue->imq_seqno = 0;
        port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
+       /*
+        * clear the immovable bit so the port can move back to anyone listening
+        * for the port destroy notification
+        */
+       port->ip_immovable_receive = 0;
 
        if (should_destroy) {
                /*
@@ -621,12 +642,15 @@ ipc_port_clear_receiver(
  *     Purpose:
  *             Initializes a newly-allocated port.
  *             Doesn't touch the ip_object fields.
+ *
+ *             The memory is expected to be zero initialized (allocated with Z_ZERO).
  */
 
 void
 ipc_port_init(
        ipc_port_t              port,
        ipc_space_t             space,
+       ipc_port_init_flags_t   flags,
        mach_port_name_t        name)
 {
        /* port->ip_kobject doesn't have to be initialized */
@@ -634,35 +658,29 @@ ipc_port_init(
        port->ip_receiver = space;
        port->ip_receiver_name = name;
 
-       port->ip_mscount = 0;
-       port->ip_srights = 0;
-       port->ip_sorights = 0;
-
-       port->ip_nsrequest = IP_NULL;
-       port->ip_pdrequest = IP_NULL;
-       port->ip_requests = IPR_NULL;
+       if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
+               port->ip_srights = 1;
+               port->ip_mscount = 1;
+       }
 
-       port->ip_premsg = IKM_NULL;
-       port->ip_context = 0;
+       if (flags & IPC_PORT_INIT_FILTER_MESSAGE) {
+               port->ip_object.io_bits |= IP_BIT_FILTER_MSG;
+       }
 
-       port->ip_sprequests  = 0;
-       port->ip_spimportant = 0;
-       port->ip_impdonation = 0;
-       port->ip_tempowner   = 0;
+       port->ip_tg_block_tracking = (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) != 0;
 
-       port->ip_guarded      = 0;
-       port->ip_strict_guard = 0;
-       port->ip_impcount    = 0;
+       if (flags & IPC_PORT_INIT_SPECIAL_REPLY) {
+               port->ip_specialreply = true;
+               port->ip_immovable_receive = true;
+       }
 
-       port->ip_specialreply = 0;
        port->ip_sync_link_state = PORT_SYNC_LINK_ANY;
 
-       reset_ip_srp_bits(port);
-
-       port->ip_send_turnstile = TURNSTILE_NULL;
-
-       ipc_mqueue_init(&port->ip_messages,
-           FALSE /* !set */);
+       ipc_mqueue_kind_t kind = IPC_MQUEUE_KIND_NONE;
+       if (flags & IPC_PORT_INIT_MESSAGE_QUEUE) {
+               kind = IPC_MQUEUE_KIND_PORT;
+       }
+       ipc_mqueue_init(&port->ip_messages, kind);
 }
 
 /*
@@ -682,27 +700,33 @@ ipc_port_init(
 kern_return_t
 ipc_port_alloc(
        ipc_space_t             space,
+       ipc_port_init_flags_t   flags,
        mach_port_name_t        *namep,
        ipc_port_t              *portp)
 {
        ipc_port_t port;
        mach_port_name_t name;
        kern_return_t kr;
+       mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
+       mach_port_urefs_t urefs = 0;
 
 #if     MACH_ASSERT
        uintptr_t buf[IP_CALLSTACK_MAX];
        ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
 #endif /* MACH_ASSERT */
 
-       kr = ipc_object_alloc(space, IOT_PORT,
-           MACH_PORT_TYPE_RECEIVE, 0,
+       if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
+               type |= MACH_PORT_TYPE_SEND;
+               urefs = 1;
+       }
+       kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
            &name, (ipc_object_t *) &port);
        if (kr != KERN_SUCCESS) {
                return kr;
        }
 
        /* port and space are locked */
-       ipc_port_init(port, space, name);
+       ipc_port_init(port, space, flags, name);
 
 #if     MACH_ASSERT
        ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
@@ -734,19 +758,25 @@ ipc_port_alloc(
 kern_return_t
 ipc_port_alloc_name(
        ipc_space_t             space,
+       ipc_port_init_flags_t   flags,
        mach_port_name_t        name,
        ipc_port_t              *portp)
 {
        ipc_port_t port;
        kern_return_t kr;
+       mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
+       mach_port_urefs_t urefs = 0;
 
 #if     MACH_ASSERT
        uintptr_t buf[IP_CALLSTACK_MAX];
        ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
 #endif /* MACH_ASSERT */
 
-       kr = ipc_object_alloc_name(space, IOT_PORT,
-           MACH_PORT_TYPE_RECEIVE, 0,
+       if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
+               type |= MACH_PORT_TYPE_SEND;
+               urefs = 1;
+       }
+       kr = ipc_object_alloc_name(space, IOT_PORT, type, urefs,
            name, (ipc_object_t *) &port);
        if (kr != KERN_SUCCESS) {
                return kr;
@@ -754,7 +784,7 @@ ipc_port_alloc_name(
 
        /* port is locked */
 
-       ipc_port_init(port, space, name);
+       ipc_port_init(port, space, flags, name);
 
 #if     MACH_ASSERT
        ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
@@ -898,6 +928,7 @@ ipc_port_destroy(ipc_port_t port)
        ipc_mqueue_t mqueue;
        ipc_kmsg_t kmsg;
        boolean_t special_reply = port->ip_specialreply;
+       struct task_watchport_elem *watchport_elem = NULL;
 
 #if IMPORTANCE_INHERITANCE
        ipc_importance_task_t release_imp_task = IIT_NULL;
@@ -906,13 +937,25 @@ ipc_port_destroy(ipc_port_t port)
        natural_t assertcnt = 0;
 #endif /* IMPORTANCE_INHERITANCE */
 
-       assert(ip_active(port));
+       require_ip_active(port);
        /* port->ip_receiver_name is garbage */
        /* port->ip_receiver/port->ip_destination is garbage */
 
+       /* clear any reply-port context */
+       port->ip_reply_context = 0;
+
        /* check for a backup port */
        pdrequest = port->ip_pdrequest;
 
+       /*
+        * Panic if a special reply has ip_pdrequest or ip_tempowner
+        * set, as this causes a type confusion while accessing the
+        * kdata union.
+        */
+       if (special_reply && (pdrequest || port->ip_tempowner)) {
+               panic("ipc_port_destroy: invalid state");
+       }
+
 #if IMPORTANCE_INHERITANCE
        /* determine how many assertions to drop and from whom */
        if (port->ip_tempowner != 0) {
@@ -944,11 +987,20 @@ ipc_port_destroy(ipc_port_t port)
 
                /* we assume the ref for pdrequest */
                port->ip_pdrequest = IP_NULL;
-               ip_unlock(port);
+
+               imq_lock(&port->ip_messages);
+               watchport_elem = ipc_port_clear_watchport_elem_internal(port);
+               ipc_port_send_turnstile_recompute_push_locked(port);
+               /* mqueue and port unlocked */
 
                if (special_reply) {
                        ipc_port_adjust_special_reply_port(port,
-                           IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE);
+                           IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
+               }
+
+               if (watchport_elem) {
+                       task_watchport_elem_deallocate(watchport_elem);
+                       watchport_elem = NULL;
                }
                /* consumes our refs for port and pdrequest */
                ipc_notify_port_destroyed(pdrequest, port);
@@ -956,8 +1008,6 @@ ipc_port_destroy(ipc_port_t port)
                goto drop_assertions;
        }
 
-       nsrequest = port->ip_nsrequest;
-
        /*
         * The mach_msg_* paths don't hold a port lock, they only hold a
         * reference to the port object. If a thread raced us and is now
@@ -973,6 +1023,11 @@ ipc_port_destroy(ipc_port_t port)
        assert(port->ip_in_pset == 0);
        assert(port->ip_mscount == 0);
 
+       imq_lock(&port->ip_messages);
+       watchport_elem = ipc_port_clear_watchport_elem_internal(port);
+       imq_unlock(&port->ip_messages);
+       nsrequest = port->ip_nsrequest;
+
        /*
         * If the port has a preallocated message buffer and that buffer
         * is not inuse, free it.  If it has an inuse one, then the kmsg
@@ -988,20 +1043,32 @@ ipc_port_destroy(ipc_port_t port)
                assert(kmsg != IKM_NULL);
                inuse_port = ikm_prealloc_inuse_port(kmsg);
                ipc_kmsg_clear_prealloc(kmsg, port);
-               ip_unlock(port);
+
+               imq_lock(&port->ip_messages);
+               ipc_port_send_turnstile_recompute_push_locked(port);
+               /* mqueue and port unlocked */
+
                if (inuse_port != IP_NULL) {
                        assert(inuse_port == port);
                } else {
                        ipc_kmsg_free(kmsg);
                }
        } else {
-               ip_unlock(port);
+               imq_lock(&port->ip_messages);
+               ipc_port_send_turnstile_recompute_push_locked(port);
+               /* mqueue and port unlocked */
+       }
+
+       /* Deallocate the watchport element */
+       if (watchport_elem) {
+               task_watchport_elem_deallocate(watchport_elem);
+               watchport_elem = NULL;
        }
 
        /* unlink the kmsg from special reply port */
        if (special_reply) {
                ipc_port_adjust_special_reply_port(port,
-                   IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE, FALSE);
+                   IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
        }
 
        /* throw away no-senders request */
@@ -1050,6 +1117,57 @@ drop_assertions:
 #endif /* IMPORTANCE_INHERITANCE */
 }
 
+/*
+ *     Routine:        ipc_port_destination_chain_lock
+ *     Purpose:
+ *             Search for the end of the chain (a port not in transit),
+ *             acquiring locks along the way, and return it in `base`.
+ *
+ *             Returns true if a reference was taken on `base`
+ *
+ *     Conditions:
+ *             No ports locked.
+ *             ipc_port_multiple_lock held.
+ */
+boolean_t
+ipc_port_destination_chain_lock(
+       ipc_port_t port,
+       ipc_port_t *base)
+{
+       for (;;) {
+               ip_lock(port);
+
+               if (!ip_active(port)) {
+                       /*
+                        * Active ports that are ip_lock()ed cannot go away.
+                        *
+                        * But inactive ports at the end of walking
+                        * an ip_destination chain are only protected
+                        * from space termination cleanup while the entire
+                        * chain of ports leading to them is held.
+                        *
+                        * Callers of this code tend to unlock the chain
+                        * in the same order than this walk which doesn't
+                        * protect `base` properly when it's inactive.
+                        *
+                        * In that case, take a reference that the caller
+                        * is responsible for releasing.
+                        */
+                       ip_reference(port);
+                       *base = port;
+                       return true;
+               }
+               if ((port->ip_receiver_name != MACH_PORT_NULL) ||
+                   (port->ip_destination == IP_NULL)) {
+                       *base = port;
+                       return false;
+               }
+
+               port = port->ip_destination;
+       }
+}
+
+
 /*
  *     Routine:        ipc_port_check_circularity
  *     Purpose:
@@ -1077,6 +1195,8 @@ ipc_port_check_circularity(
        return ipc_importance_check_circularity(port, dest);
 #else
        ipc_port_t base;
+       struct task_watchport_elem *watchport_elem = NULL;
+       bool took_base_ref = false;
 
        assert(port != IP_NULL);
        assert(dest != IP_NULL);
@@ -1114,18 +1234,7 @@ ipc_port_check_circularity(
         *      acquiring locks along the way.
         */
 
-       for (;;) {
-               ip_lock(base);
-
-               if (!ip_active(base) ||
-                   (base->ip_receiver_name != MACH_PORT_NULL) ||
-                   (base->ip_destination == IP_NULL)) {
-                       break;
-               }
-
-               base = base->ip_destination;
-       }
-
+       took_base_ref = ipc_port_destination_chain_lock(dest, &base);
        /* all ports in chain from dest to base, inclusive, are locked */
 
        if (port == base) {
@@ -1134,18 +1243,17 @@ ipc_port_check_circularity(
                ipc_port_multiple_unlock();
 
                /* port (== base) is in limbo */
-
-               assert(ip_active(port));
+               require_ip_active(port);
                assert(port->ip_receiver_name == MACH_PORT_NULL);
                assert(port->ip_destination == IP_NULL);
+               assert(!took_base_ref);
 
                base = dest;
                while (base != IP_NULL) {
                        ipc_port_t next;
 
                        /* dest is in transit or in limbo */
-
-                       assert(ip_active(base));
+                       require_ip_active(base);
                        assert(base->ip_receiver_name == MACH_PORT_NULL);
 
                        next = base->ip_destination;
@@ -1170,11 +1278,18 @@ not_circular:
        imq_lock(&port->ip_messages);
 
        /* port is in limbo */
-
-       assert(ip_active(port));
+       require_ip_active(port);
        assert(port->ip_receiver_name == MACH_PORT_NULL);
        assert(port->ip_destination == IP_NULL);
 
+       /* Clear the watchport boost */
+       watchport_elem = ipc_port_clear_watchport_elem_internal(port);
+
+       /* Check if the port is being enqueued as a part of sync bootstrap checkin */
+       if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
+               port->ip_sync_bootstrap_checkin = 1;
+       }
+
        ip_reference(dest);
        port->ip_destination = dest;
 
@@ -1185,6 +1300,13 @@ not_circular:
                    port_send_turnstile_address(port),
                    TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
 
+               /*
+                * What ipc_port_adjust_port_locked would do,
+                * but we need to also drop even more locks before
+                * calling turnstile_update_inheritor_complete().
+                */
+               ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
+
                turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
                    (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
 
@@ -1204,8 +1326,7 @@ not_circular:
                }
 
                /* port is in transit */
-
-               assert(ip_active(dest));
+               require_ip_active(dest);
                assert(dest->ip_receiver_name == MACH_PORT_NULL);
                assert(dest->ip_destination != IP_NULL);
 
@@ -1220,6 +1341,9 @@ not_circular:
            (base->ip_destination == IP_NULL));
 
        ip_unlock(base);
+       if (took_base_ref) {
+               ip_release(base);
+       }
 
        /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
        if (send_turnstile) {
@@ -1227,35 +1351,215 @@ not_circular:
 
                /* Take the mq lock to call turnstile complete */
                imq_lock(&port->ip_messages);
-               turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL);
+               turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
                send_turnstile = TURNSTILE_NULL;
                imq_unlock(&port->ip_messages);
                turnstile_cleanup();
        }
 
+       if (watchport_elem) {
+               task_watchport_elem_deallocate(watchport_elem);
+       }
+
        return FALSE;
 #endif /* !IMPORTANCE_INHERITANCE */
 }
 
-struct turnstile *
-ipc_port_get_inheritor(ipc_port_t port)
+/*
+ *     Routine:        ipc_port_watchport_elem
+ *     Purpose:
+ *             Get the port's watchport elem field
+ *
+ *     Conditions:
+ *             mqueue locked
+ */
+static struct task_watchport_elem *
+ipc_port_watchport_elem(ipc_port_t port)
 {
-       ipc_mqueue_t mqueue = &port->ip_messages;
+       return port->ip_messages.imq_wait_queue.waitq_tspriv;
+}
+
+/*
+ *     Routine:        ipc_port_update_watchport_elem
+ *     Purpose:
+ *             Set the port's watchport elem field
+ *
+ *     Conditions:
+ *             mqueue locked
+ */
+static inline struct task_watchport_elem *
+ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
+{
+       assert(!port->ip_specialreply);
+       struct task_watchport_elem *old_we = ipc_port_watchport_elem(port);
+       port->ip_messages.imq_wait_queue.waitq_tspriv = we;
+       return old_we;
+}
+
+/*
+ *     Routine:        ipc_special_reply_stash_pid_locked
+ *     Purpose:
+ *             Set the pid of process that copied out send once right to special reply port.
+ *
+ *     Conditions:
+ *             port locked
+ */
+static inline void
+ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
+{
+       assert(port->ip_specialreply);
+       port->ip_messages.imq_wait_queue.waitq_priv_pid = pid;
+       return;
+}
+
+/*
+ *     Routine:        ipc_special_reply_get_pid_locked
+ *     Purpose:
+ *             Get the pid of process that copied out send once right to special reply port.
+ *
+ *     Conditions:
+ *             port locked
+ */
+int
+ipc_special_reply_get_pid_locked(ipc_port_t port)
+{
+       assert(port->ip_specialreply);
+       return port->ip_messages.imq_wait_queue.waitq_priv_pid;
+}
+
+/*
+ * Update the recv turnstile inheritor for a port.
+ *
+ * Sync IPC through the port receive turnstile only happens for the special
+ * reply port case. It has three sub-cases:
+ *
+ * 1. a send-once right is in transit, and pushes on the send turnstile of its
+ *    destination mqueue.
+ *
+ * 2. a send-once right has been stashed on a knote it was copied out "through",
+ *    as the first such copied out port.
+ *
+ * 3. a send-once right has been stashed on a knote it was copied out "through",
+ *    as the second or more copied out port.
+ */
+void
+ipc_port_recv_update_inheritor(
+       ipc_port_t port,
+       struct turnstile *rcv_turnstile,
+       turnstile_update_flags_t flags)
+{
+       struct turnstile *inheritor = TURNSTILE_NULL;
        struct knote *kn;
 
-       assert(imq_held(mqueue));
+       if (ip_active(port) && port->ip_specialreply) {
+               imq_held(&port->ip_messages);
+
+               switch (port->ip_sync_link_state) {
+               case PORT_SYNC_LINK_PORT:
+                       if (port->ip_sync_inheritor_port != NULL) {
+                               inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
+                       }
+                       break;
+
+               case PORT_SYNC_LINK_WORKLOOP_KNOTE:
+                       kn = port->ip_sync_inheritor_knote;
+                       inheritor = filt_ipc_kqueue_turnstile(kn);
+                       break;
 
-       if (!IMQ_KLIST_VALID(mqueue)) {
-               return IMQ_INHERITOR(mqueue);
+               case PORT_SYNC_LINK_WORKLOOP_STASH:
+                       inheritor = port->ip_sync_inheritor_ts;
+                       break;
+               }
        }
 
-       SLIST_FOREACH(kn, &port->ip_messages.imq_klist, kn_selnext) {
-               if ((kn->kn_sfflags & MACH_RCV_MSG) && (kn->kn_status & KN_DISPATCH)) {
-                       return filt_machport_kqueue_turnstile(kn);
+       turnstile_update_inheritor(rcv_turnstile, inheritor,
+           flags | TURNSTILE_INHERITOR_TURNSTILE);
+}
+
+/*
+ * Update the send turnstile inheritor for a port.
+ *
+ * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
+ *
+ * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
+ *    to push on thread doing the sync ipc.
+ *
+ * 2. a receive right is in transit, and pushes on the send turnstile of its
+ *    destination mqueue.
+ *
+ * 3. port was passed as an exec watchport and port is pushing on main thread
+ *    of the task.
+ *
+ * 4. a receive right has been stashed on a knote it was copied out "through",
+ *    as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
+ *    for the special reply port)
+ *
+ * 5. a receive right has been stashed on a knote it was copied out "through",
+ *    as the second or more copied out port (same as
+ *    PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
+ *
+ * 6. a receive right has been copied out as a part of sync bootstrap checkin
+ *    and needs to push on thread doing the sync bootstrap checkin.
+ *
+ * 7. the receive right is monitored by a knote, and pushes on any that is
+ *    registered on a workloop. filt_machport makes sure that if such a knote
+ *    exists, it is kept as the first item in the knote list, so we never need
+ *    to walk.
+ */
+void
+ipc_port_send_update_inheritor(
+       ipc_port_t port,
+       struct turnstile *send_turnstile,
+       turnstile_update_flags_t flags)
+{
+       ipc_mqueue_t mqueue = &port->ip_messages;
+       turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+       struct knote *kn;
+       turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
+
+       imq_held(mqueue);
+
+       if (!ip_active(port)) {
+               /* this port is no longer active, it should not push anywhere */
+       } else if (port->ip_specialreply) {
+               /* Case 1. */
+               if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
+                       inheritor = port->ip_messages.imq_srp_owner_thread;
+                       inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+               }
+       } else if (port->ip_receiver_name == MACH_PORT_NULL &&
+           port->ip_destination != NULL) {
+               /* Case 2. */
+               inheritor = port_send_turnstile(port->ip_destination);
+       } else if (ipc_port_watchport_elem(port) != NULL) {
+               /* Case 3. */
+               if (prioritize_launch) {
+                       assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
+                       inheritor = ipc_port_get_watchport_inheritor(port);
+                       inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+               }
+       } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
+               /* Case 4. */
+               inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
+       } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
+               /* Case 5. */
+               inheritor = mqueue->imq_inheritor_turnstile;
+       } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
+               /* Case 6. */
+               if (prioritize_launch) {
+                       inheritor = port->ip_messages.imq_inheritor_thread_ref;
+                       inheritor_flags = TURNSTILE_INHERITOR_THREAD;
+               }
+       } else if ((kn = SLIST_FIRST(&mqueue->imq_klist))) {
+               /* Case 7. Push on a workloop that is interested */
+               if (filt_machport_kqueue_has_turnstile(kn)) {
+                       assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
+                       inheritor = filt_ipc_kqueue_turnstile(kn);
                }
        }
 
-       return TURNSTILE_NULL;
+       turnstile_update_inheritor(send_turnstile, inheritor,
+           flags | inheritor_flags);
 }
 
 /*
@@ -1271,7 +1575,6 @@ void
 ipc_port_send_turnstile_prepare(ipc_port_t port)
 {
        struct turnstile *turnstile = TURNSTILE_NULL;
-       struct turnstile *inheritor = TURNSTILE_NULL;
        struct turnstile *send_turnstile = TURNSTILE_NULL;
 
 retry_alloc:
@@ -1290,22 +1593,9 @@ retry_alloc:
                    turnstile, TURNSTILE_SYNC_IPC);
                turnstile = TURNSTILE_NULL;
 
-               /*
-                * if port in transit, setup linkage for its turnstile,
-                * otherwise the link it to WL turnstile.
-                */
-               if (ip_active(port) &&
-                   port->ip_receiver_name == MACH_PORT_NULL &&
-                   port->ip_destination != IP_NULL) {
-                       assert(port->ip_receiver_name == MACH_PORT_NULL);
-                       assert(port->ip_destination != IP_NULL);
+               ipc_port_send_update_inheritor(port, send_turnstile,
+                   TURNSTILE_IMMEDIATE_UPDATE);
 
-                       inheritor = port_send_turnstile(port->ip_destination);
-               } else {
-                       inheritor = ipc_port_get_inheritor(port);
-               }
-               turnstile_update_inheritor(send_turnstile, inheritor,
-                   TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE);
                /* turnstile complete will be called in ipc_port_send_turnstile_complete */
        }
 
@@ -1343,7 +1633,7 @@ ipc_port_send_turnstile_complete(ipc_port_t port)
        port_send_turnstile(port)->ts_port_ref--;
        if (port_send_turnstile(port)->ts_port_ref == 0) {
                turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
-                   &turnstile);
+                   &turnstile, TURNSTILE_SYNC_IPC);
                assert(turnstile != TURNSTILE_NULL);
        }
        imq_unlock(&port->ip_messages);
@@ -1355,35 +1645,6 @@ ipc_port_send_turnstile_complete(ipc_port_t port)
        }
 }
 
-
-/*
- *     Routine:        ipc_port_rcv_turnstile_waitq
- *     Purpose:
- *             Given the mqueue's waitq, find the port's
- *              rcv turnstile and return its waitq.
- *
- *     Conditions:
- *             mqueue locked or thread waiting on turnstile is locked.
- */
-struct waitq *
-ipc_port_rcv_turnstile_waitq(struct waitq *waitq)
-{
-       struct waitq *safeq;
-
-       ipc_mqueue_t mqueue = imq_from_waitq(waitq);
-       ipc_port_t port = ip_from_mq(mqueue);
-       struct turnstile *rcv_turnstile = ipc_port_rcv_turnstile(port);
-
-       /* Check if the port has a rcv turnstile */
-       if (rcv_turnstile != TURNSTILE_NULL) {
-               safeq = &rcv_turnstile->ts_waitq;
-       } else {
-               safeq = global_eventq(waitq);
-       }
-       return safeq;
-}
-
-
 /*
  *     Routine:        ipc_port_rcv_turnstile
  *     Purpose:
@@ -1392,10 +1653,10 @@ ipc_port_rcv_turnstile_waitq(struct waitq *waitq)
  *     Conditions:
  *             mqueue locked or thread waiting on turnstile is locked.
  */
-struct turnstile *
+static struct turnstile *
 ipc_port_rcv_turnstile(ipc_port_t port)
 {
-       return turnstile_lookup_by_proprietor((uintptr_t)port);
+       return *port_rcv_turnstile_address(port);
 }
 
 
@@ -1411,9 +1672,11 @@ ipc_port_rcv_turnstile(ipc_port_t port)
 void
 ipc_port_link_special_reply_port(
        ipc_port_t special_reply_port,
-       ipc_port_t dest_port)
+       ipc_port_t dest_port,
+       boolean_t sync_bootstrap_checkin)
 {
        boolean_t drop_turnstile_ref = FALSE;
+       boolean_t special_reply = FALSE;
 
        /* Check if dest_port needs a turnstile */
        ipc_port_send_turnstile_prepare(dest_port);
@@ -1422,8 +1685,14 @@ ipc_port_link_special_reply_port(
        ip_lock(special_reply_port);
        imq_lock(&special_reply_port->ip_messages);
 
+       special_reply = special_reply_port->ip_specialreply;
+
+       if (sync_bootstrap_checkin && special_reply) {
+               special_reply_port->ip_sync_bootstrap_checkin = 1;
+       }
+
        /* Check if we need to drop the acquired turnstile ref on dest port */
-       if (!special_reply_port->ip_specialreply ||
+       if (!special_reply ||
            special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
            special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
                drop_turnstile_ref = TRUE;
@@ -1437,6 +1706,15 @@ ipc_port_link_special_reply_port(
        imq_unlock(&special_reply_port->ip_messages);
        ip_unlock(special_reply_port);
 
+       if (special_reply) {
+               /*
+                * For special reply ports, if the destination port is
+                * marked with the thread group blocked tracking flag,
+                * callout to the performance controller.
+                */
+               ipc_port_thread_group_blocked(dest_port);
+       }
+
        if (drop_turnstile_ref) {
                ipc_port_send_turnstile_complete(dest_port);
        }
@@ -1444,16 +1722,78 @@ ipc_port_link_special_reply_port(
        return;
 }
 
+/*
+ *     Routine:        ipc_port_thread_group_blocked
+ *     Purpose:
+ *             Call thread_group_blocked callout if the port
+ *             has ip_tg_block_tracking bit set and the thread
+ *             has not made this callout already.
+ *
+ *     Conditions:
+ *             Nothing is locked.
+ */
+void
+ipc_port_thread_group_blocked(ipc_port_t port __unused)
+{
+#if CONFIG_THREAD_GROUPS
+       bool port_tg_block_tracking = false;
+       thread_t self = current_thread();
+
+       if (self->thread_group == NULL ||
+           (self->options & TH_OPT_IPC_TG_BLOCKED)) {
+               return;
+       }
+
+       port_tg_block_tracking = port->ip_tg_block_tracking;
+       if (!port_tg_block_tracking) {
+               return;
+       }
+
+       machine_thread_group_blocked(self->thread_group, NULL,
+           PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
+
+       self->options |= TH_OPT_IPC_TG_BLOCKED;
+#endif
+}
+
+/*
+ *     Routine:        ipc_port_thread_group_unblocked
+ *     Purpose:
+ *             Call thread_group_unblocked callout if the
+ *             thread had previously made a thread_group_blocked
+ *             callout before (indicated by TH_OPT_IPC_TG_BLOCKED
+ *             flag on the thread).
+ *
+ *     Conditions:
+ *             Nothing is locked.
+ */
+void
+ipc_port_thread_group_unblocked(void)
+{
+#if CONFIG_THREAD_GROUPS
+       thread_t self = current_thread();
+
+       if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
+               return;
+       }
+
+       machine_thread_group_unblocked(self->thread_group, NULL,
+           PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
+
+       self->options &= ~TH_OPT_IPC_TG_BLOCKED;
+#endif
+}
+
 #if DEVELOPMENT || DEBUG
 inline void
-reset_ip_srp_bits(ipc_port_t special_reply_port)
+ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
 {
        special_reply_port->ip_srp_lost_link = 0;
        special_reply_port->ip_srp_msg_sent = 0;
 }
 
-inline void
-reset_ip_srp_msg_sent(ipc_port_t special_reply_port)
+static inline void
+ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
 {
        if (special_reply_port->ip_specialreply == 1) {
                special_reply_port->ip_srp_msg_sent = 0;
@@ -1461,15 +1801,15 @@ reset_ip_srp_msg_sent(ipc_port_t special_reply_port)
 }
 
 inline void
-set_ip_srp_msg_sent(ipc_port_t special_reply_port)
+ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
 {
        if (special_reply_port->ip_specialreply == 1) {
                special_reply_port->ip_srp_msg_sent = 1;
        }
 }
 
-inline void
-set_ip_srp_lost_link(ipc_port_t special_reply_port)
+static inline void
+ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
 {
        if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
                special_reply_port->ip_srp_lost_link = 1;
@@ -1478,25 +1818,25 @@ set_ip_srp_lost_link(ipc_port_t special_reply_port)
 
 #else /* DEVELOPMENT || DEBUG */
 inline void
-reset_ip_srp_bits(__unused ipc_port_t special_reply_port)
+ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
 {
        return;
 }
 
-inline void
-reset_ip_srp_msg_sent(__unused ipc_port_t special_reply_port)
+static inline void
+ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
 {
        return;
 }
 
 inline void
-set_ip_srp_msg_sent(__unused ipc_port_t special_reply_port)
+ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
 {
        return;
 }
 
-inline void
-set_ip_srp_lost_link(__unused ipc_port_t special_reply_port)
+static inline void
+ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
 {
        return;
 }
@@ -1505,10 +1845,11 @@ set_ip_srp_lost_link(__unused ipc_port_t special_reply_port)
 /*
  *     Routine:        ipc_port_adjust_special_reply_port_locked
  *     Purpose:
- *             If the special port has a turnstile, update it's inheritor.
+ *             If the special port has a turnstile, update its inheritor.
  *     Condition:
  *             Special reply port locked on entry.
  *             Special reply port unlocked on return.
+ *             The passed in port is a special reply port.
  *     Returns:
  *             None.
  */
@@ -1522,21 +1863,35 @@ ipc_port_adjust_special_reply_port_locked(
        ipc_port_t dest_port = IPC_PORT_NULL;
        int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
        turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
-       struct turnstile *dest_ts = TURNSTILE_NULL, *ts = TURNSTILE_NULL;
+       struct turnstile *ts = TURNSTILE_NULL;
 
+       ip_lock_held(special_reply_port); // ip_sync_link_state is touched
        imq_lock(&special_reply_port->ip_messages);
 
+       if (!special_reply_port->ip_specialreply) {
+               // only mach_msg_receive_results_complete() calls this with any port
+               assert(get_turnstile);
+               goto not_special;
+       }
+
        if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
-               reset_ip_srp_msg_sent(special_reply_port);
+               ipc_special_reply_port_msg_sent_reset(special_reply_port);
+       }
+
+       if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
+               special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
+       }
+
+       if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
+               special_reply_port->ip_sync_bootstrap_checkin = 0;
        }
 
        /* Check if the special reply port is marked non-special */
-       if (special_reply_port->ip_specialreply == 0 ||
-           special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
+       if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
+not_special:
                if (get_turnstile) {
                        turnstile_complete((uintptr_t)special_reply_port,
-                           port_rcv_turnstile_address(special_reply_port),
-                           NULL);
+                           port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
                }
                imq_unlock(&special_reply_port->ip_messages);
                ip_unlock(special_reply_port);
@@ -1546,32 +1901,23 @@ ipc_port_adjust_special_reply_port_locked(
                return;
        }
 
-       /* Clear thread's special reply port and clear linkage */
-       if (flags & IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY) {
-               /* This option should only be specified by a non blocking thread */
-               assert(get_turnstile == FALSE);
-               special_reply_port->ip_specialreply = 0;
-
-               reset_ip_srp_bits(special_reply_port);
-
-               /* Check if need to break linkage */
-               if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
-                       imq_unlock(&special_reply_port->ip_messages);
-                       ip_unlock(special_reply_port);
-                       return;
-               }
-       } else if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
-               if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY ||
-                   special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT) {
-                       if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
-                               inheritor = filt_machport_stash_port(kn, special_reply_port,
-                                   &sync_link_state);
-                       }
+       if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
+               if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
+                       inheritor = filt_machport_stash_port(kn, special_reply_port,
+                           &sync_link_state);
                }
        } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
                sync_link_state = PORT_SYNC_LINK_ANY;
        }
 
+       /* Check if need to break linkage */
+       if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
+           special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
+               imq_unlock(&special_reply_port->ip_messages);
+               ip_unlock(special_reply_port);
+               return;
+       }
+
        switch (special_reply_port->ip_sync_link_state) {
        case PORT_SYNC_LINK_PORT:
                dest_port = special_reply_port->ip_sync_inheritor_port;
@@ -1581,11 +1927,24 @@ ipc_port_adjust_special_reply_port_locked(
                special_reply_port->ip_sync_inheritor_knote = NULL;
                break;
        case PORT_SYNC_LINK_WORKLOOP_STASH:
-               dest_ts = special_reply_port->ip_sync_inheritor_ts;
                special_reply_port->ip_sync_inheritor_ts = NULL;
                break;
        }
 
+       /*
+        * Stash (or unstash) the server's PID in the ip_sorights field of the
+        * special reply port, so that stackshot can later retrieve who the client
+        * is blocked on.
+        */
+       if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
+           sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
+               ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
+       } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
+           sync_link_state == PORT_SYNC_LINK_ANY) {
+               /* If we are resetting the special reply port, remove the stashed pid. */
+               ipc_special_reply_stash_pid_locked(special_reply_port, 0);
+       }
+
        special_reply_port->ip_sync_link_state = sync_link_state;
 
        switch (sync_link_state) {
@@ -1593,12 +1952,11 @@ ipc_port_adjust_special_reply_port_locked(
                special_reply_port->ip_sync_inheritor_knote = kn;
                break;
        case PORT_SYNC_LINK_WORKLOOP_STASH:
-               turnstile_reference(inheritor);
                special_reply_port->ip_sync_inheritor_ts = inheritor;
                break;
        case PORT_SYNC_LINK_NO_LINKAGE:
                if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
-                       set_ip_srp_lost_link(special_reply_port);
+                       ipc_special_reply_port_lost_link(special_reply_port);
                }
                break;
        }
@@ -1606,14 +1964,13 @@ ipc_port_adjust_special_reply_port_locked(
        /* Get thread's turnstile donated to special reply port */
        if (get_turnstile) {
                turnstile_complete((uintptr_t)special_reply_port,
-                   port_rcv_turnstile_address(special_reply_port),
-                   NULL);
+                   port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
        } else {
                ts = ipc_port_rcv_turnstile(special_reply_port);
                if (ts) {
                        turnstile_reference(ts);
-                       turnstile_update_inheritor(ts, inheritor,
-                           (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
+                       ipc_port_recv_update_inheritor(special_reply_port, ts,
+                           TURNSTILE_IMMEDIATE_UPDATE);
                }
        }
 
@@ -1628,22 +1985,18 @@ ipc_port_adjust_special_reply_port_locked(
                turnstile_deallocate_safe(ts);
        }
 
-       /* Release the ref on the dest port and it's turnstile */
+       /* Release the ref on the dest port and its turnstile */
        if (dest_port) {
                ipc_port_send_turnstile_complete(dest_port);
                /* release the reference on the dest port */
                ip_release(dest_port);
        }
-
-       if (dest_ts) {
-               turnstile_deallocate_safe(dest_ts);
-       }
 }
 
 /*
  *     Routine:        ipc_port_adjust_special_reply_port
  *     Purpose:
- *             If the special port has a turnstile, update it's inheritor.
+ *             If the special port has a turnstile, update its inheritor.
  *     Condition:
  *             Nothing locked.
  *     Returns:
@@ -1651,43 +2004,341 @@ ipc_port_adjust_special_reply_port_locked(
  */
 void
 ipc_port_adjust_special_reply_port(
-       ipc_port_t special_reply_port,
-       uint8_t flags,
-       boolean_t get_turnstile)
+       ipc_port_t port,
+       uint8_t flags)
 {
-       ip_lock(special_reply_port);
-       ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL, flags, get_turnstile);
-       /* special_reply_port unlocked */
+       if (port->ip_specialreply) {
+               ip_lock(port);
+               ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
+       }
 }
 
 /*
- *     Routine:        ipc_port_get_special_reply_port_inheritor
+ *     Routine:        ipc_port_adjust_sync_link_state_locked
  *     Purpose:
- *             Returns the current inheritor of the special reply port
+ *             Update the sync link state of the port and the
+ *             turnstile inheritor.
  *     Condition:
- *             mqueue is locked, port is a special reply port
+ *             Port and mqueue locked on entry.
+ *             Port and mqueue locked on return.
  *     Returns:
- *             the current inheritor
+ *              None.
  */
-turnstile_inheritor_t
-ipc_port_get_special_reply_port_inheritor(
-       ipc_port_t port)
+void
+ipc_port_adjust_sync_link_state_locked(
+       ipc_port_t port,
+       int sync_link_state,
+       turnstile_inheritor_t inheritor)
 {
-       assert(port->ip_specialreply);
-       imq_held(&port->ip_messages);
-
        switch (port->ip_sync_link_state) {
-       case PORT_SYNC_LINK_PORT:
-               if (port->ip_sync_inheritor_port != NULL) {
-                       return port_send_turnstile(port->ip_sync_inheritor_port);
-               }
-               break;
+       case PORT_SYNC_LINK_RCV_THREAD:
+               /* deallocate the thread reference for the inheritor */
+               thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
+               OS_FALLTHROUGH;
+       default:
+               klist_init(&port->ip_messages.imq_klist);
+       }
+
+       switch (sync_link_state) {
        case PORT_SYNC_LINK_WORKLOOP_KNOTE:
-               return filt_machport_stashed_special_reply_port_turnstile(port);
+               port->ip_messages.imq_inheritor_knote = inheritor;
+               break;
        case PORT_SYNC_LINK_WORKLOOP_STASH:
-               return port->ip_sync_inheritor_ts;
+               port->ip_messages.imq_inheritor_turnstile = inheritor;
+               break;
+       case PORT_SYNC_LINK_RCV_THREAD:
+               /* The thread could exit without clearing port state, take a thread ref */
+               thread_reference((thread_t)inheritor);
+               port->ip_messages.imq_inheritor_thread_ref = inheritor;
+               break;
+       default:
+               klist_init(&port->ip_messages.imq_klist);
+               sync_link_state = PORT_SYNC_LINK_ANY;
+       }
+
+       port->ip_sync_link_state = sync_link_state;
+}
+
+
+/*
+ *     Routine:        ipc_port_adjust_port_locked
+ *     Purpose:
+ *             If the port has a turnstile, update its inheritor.
+ *     Condition:
+ *             Port locked on entry.
+ *             Port unlocked on return.
+ *     Returns:
+ *             None.
+ */
+void
+ipc_port_adjust_port_locked(
+       ipc_port_t port,
+       struct knote *kn,
+       boolean_t sync_bootstrap_checkin)
+{
+       int sync_link_state = PORT_SYNC_LINK_ANY;
+       turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
+
+       ip_lock_held(port); // ip_sync_link_state is touched
+       imq_held(&port->ip_messages);
+
+       assert(!port->ip_specialreply);
+
+       if (kn) {
+               inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
+               if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
+                       inheritor = kn;
+               }
+       } else if (sync_bootstrap_checkin) {
+               inheritor = current_thread();
+               sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
+       }
+
+       ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
+       port->ip_sync_bootstrap_checkin = 0;
+
+       ipc_port_send_turnstile_recompute_push_locked(port);
+       /* port and mqueue unlocked */
+}
+
+/*
+ *     Routine:        ipc_port_clear_sync_rcv_thread_boost_locked
+ *     Purpose:
+ *             If the port is pushing on rcv thread, clear it.
+ *     Condition:
+ *             Port locked on entry
+ *             mqueue is not locked.
+ *             Port unlocked on return.
+ *     Returns:
+ *             None.
+ */
+void
+ipc_port_clear_sync_rcv_thread_boost_locked(
+       ipc_port_t port)
+{
+       ip_lock_held(port); // ip_sync_link_state is touched
+
+       if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
+               ip_unlock(port);
+               return;
+       }
+
+       imq_lock(&port->ip_messages);
+       ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
+
+       ipc_port_send_turnstile_recompute_push_locked(port);
+       /* port and mqueue unlocked */
+}
+
+/*
+ *     Routine:        ipc_port_add_watchport_elem_locked
+ *     Purpose:
+ *             Transfer the turnstile boost of watchport to task calling exec.
+ *     Condition:
+ *             Port locked on entry.
+ *             Port unlocked on return.
+ *     Returns:
+ *             KERN_SUCESS on success.
+ *             KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_add_watchport_elem_locked(
+       ipc_port_t                 port,
+       struct task_watchport_elem *watchport_elem,
+       struct task_watchport_elem **old_elem)
+{
+       ip_lock_held(port);
+       imq_held(&port->ip_messages);
+
+       /* Watchport boost only works for non-special active ports mapped in an ipc space */
+       if (!ip_active(port) || port->ip_specialreply ||
+           port->ip_receiver_name == MACH_PORT_NULL) {
+               imq_unlock(&port->ip_messages);
+               ip_unlock(port);
+               return KERN_FAILURE;
+       }
+
+       if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
+               /* Sever the linkage if the port was pushing on knote */
+               ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
+       }
+
+       *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
+
+       ipc_port_send_turnstile_recompute_push_locked(port);
+       /* port and mqueue unlocked */
+       return KERN_SUCCESS;
+}
+
+/*
+ *     Routine:        ipc_port_clear_watchport_elem_internal_conditional_locked
+ *     Purpose:
+ *             Remove the turnstile boost of watchport and recompute the push.
+ *     Condition:
+ *             Port locked on entry.
+ *             Port unlocked on return.
+ *     Returns:
+ *             KERN_SUCESS on success.
+ *             KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_clear_watchport_elem_internal_conditional_locked(
+       ipc_port_t                 port,
+       struct task_watchport_elem *watchport_elem)
+{
+       ip_lock_held(port);
+       imq_held(&port->ip_messages);
+
+       if (ipc_port_watchport_elem(port) != watchport_elem) {
+               imq_unlock(&port->ip_messages);
+               ip_unlock(port);
+               return KERN_FAILURE;
+       }
+
+       ipc_port_clear_watchport_elem_internal(port);
+       ipc_port_send_turnstile_recompute_push_locked(port);
+       /* port and mqueue unlocked */
+       return KERN_SUCCESS;
+}
+
+/*
+ *     Routine:        ipc_port_replace_watchport_elem_conditional_locked
+ *     Purpose:
+ *             Replace the turnstile boost of watchport and recompute the push.
+ *     Condition:
+ *             Port locked on entry.
+ *             Port unlocked on return.
+ *     Returns:
+ *             KERN_SUCESS on success.
+ *             KERN_FAILURE otherwise.
+ */
+kern_return_t
+ipc_port_replace_watchport_elem_conditional_locked(
+       ipc_port_t                 port,
+       struct task_watchport_elem *old_watchport_elem,
+       struct task_watchport_elem *new_watchport_elem)
+{
+       ip_lock_held(port);
+       imq_held(&port->ip_messages);
+
+       if (ipc_port_watchport_elem(port) != old_watchport_elem) {
+               imq_unlock(&port->ip_messages);
+               ip_unlock(port);
+               return KERN_FAILURE;
+       }
+
+       ipc_port_update_watchport_elem(port, new_watchport_elem);
+       ipc_port_send_turnstile_recompute_push_locked(port);
+       /* port and mqueue unlocked */
+       return KERN_SUCCESS;
+}
+
+/*
+ *     Routine:        ipc_port_clear_watchport_elem_internal
+ *     Purpose:
+ *             Remove the turnstile boost of watchport.
+ *     Condition:
+ *             Port locked on entry.
+ *             Port locked on return.
+ *     Returns:
+ *             Old task_watchport_elem returned.
+ */
+struct task_watchport_elem *
+ipc_port_clear_watchport_elem_internal(
+       ipc_port_t                 port)
+{
+       ip_lock_held(port);
+       imq_held(&port->ip_messages);
+
+       if (port->ip_specialreply) {
+               return NULL;
+       }
+
+       return ipc_port_update_watchport_elem(port, NULL);
+}
+
+/*
+ *     Routine:        ipc_port_send_turnstile_recompute_push_locked
+ *     Purpose:
+ *             Update send turnstile inheritor of port and recompute the push.
+ *     Condition:
+ *             Port locked on entry.
+ *             Port unlocked on return.
+ *     Returns:
+ *             None.
+ */
+static void
+ipc_port_send_turnstile_recompute_push_locked(
+       ipc_port_t port)
+{
+       struct turnstile *send_turnstile = port_send_turnstile(port);
+       if (send_turnstile) {
+               turnstile_reference(send_turnstile);
+               ipc_port_send_update_inheritor(port, send_turnstile,
+                   TURNSTILE_IMMEDIATE_UPDATE);
+       }
+       imq_unlock(&port->ip_messages);
+       ip_unlock(port);
+
+       if (send_turnstile) {
+               turnstile_update_inheritor_complete(send_turnstile,
+                   TURNSTILE_INTERLOCK_NOT_HELD);
+               turnstile_deallocate_safe(send_turnstile);
+       }
+}
+
+/*
+ *     Routine:        ipc_port_get_watchport_inheritor
+ *     Purpose:
+ *             Returns inheritor for watchport.
+ *
+ *     Conditions:
+ *             mqueue locked.
+ *     Returns:
+ *             watchport inheritor.
+ */
+static thread_t
+ipc_port_get_watchport_inheritor(
+       ipc_port_t port)
+{
+       imq_held(&port->ip_messages);
+       return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
+}
+
+/*
+ *     Routine:        ipc_port_get_receiver_task
+ *     Purpose:
+ *             Returns receiver task pointer and its pid (if any) for port.
+ *
+ *     Conditions:
+ *             Nothing locked.
+ */
+pid_t
+ipc_port_get_receiver_task(ipc_port_t port, uintptr_t *task)
+{
+       task_t receiver = TASK_NULL;
+       pid_t pid = -1;
+
+       if (!port) {
+               goto out;
+       }
+
+       ip_lock(port);
+       if (ip_active(port) &&
+           MACH_PORT_VALID(port->ip_receiver_name) &&
+           port->ip_receiver &&
+           port->ip_receiver != ipc_space_kernel &&
+           port->ip_receiver != ipc_space_reply) {
+               receiver = port->ip_receiver->is_task;
+               pid = task_pid(receiver);
+       }
+       ip_unlock(port);
+
+out:
+       if (task) {
+               *task = (uintptr_t)receiver;
        }
-       return TURNSTILE_INHERITOR_NULL;
+       return pid;
 }
 
 /*
@@ -1801,7 +2452,8 @@ ipc_port_importance_delta_internal(
        ipc_importance_task_t   *imp_task)
 {
        ipc_port_t next, base;
-       boolean_t dropped = FALSE;
+       bool dropped = false;
+       bool took_base_ref = false;
 
        *imp_task = IIT_NULL;
 
@@ -1817,18 +2469,14 @@ ipc_port_importance_delta_internal(
        if (ip_active(port) &&
            port->ip_destination != IP_NULL &&
            port->ip_receiver_name == MACH_PORT_NULL) {
-               dropped = TRUE;
+               dropped = true;
 
                ip_unlock(port);
                ipc_port_multiple_lock(); /* massive serialization */
-               ip_lock(base);
 
-               while (ip_active(base) &&
-                   base->ip_destination != IP_NULL &&
-                   base->ip_receiver_name == MACH_PORT_NULL) {
-                       base = base->ip_destination;
-                       ip_lock(base);
-               }
+               took_base_ref = ipc_port_destination_chain_lock(port, &base);
+               /* all ports in chain from port to base, inclusive, are locked */
+
                ipc_port_multiple_unlock();
        }
 
@@ -1894,8 +2542,11 @@ ipc_port_importance_delta_internal(
                ipc_importance_task_reference(*imp_task);
        }
 
-       if (dropped == TRUE) {
+       if (dropped) {
                ip_unlock(base);
+               if (took_base_ref) {
+                       ip_release(base);
+               }
        }
 
        return dropped;
@@ -1951,51 +2602,6 @@ ipc_port_importance_delta(
 }
 #endif /* IMPORTANCE_INHERITANCE */
 
-/*
- *     Routine:        ipc_port_lookup_notify
- *     Purpose:
- *             Make a send-once notify port from a receive right.
- *             Returns IP_NULL if name doesn't denote a receive right.
- *     Conditions:
- *             The space must be locked (read or write) and active.
- *              Being the active space, we can rely on thread server_id
- *             context to give us the proper server level sub-order
- *             within the space.
- */
-
-ipc_port_t
-ipc_port_lookup_notify(
-       ipc_space_t             space,
-       mach_port_name_t        name)
-{
-       ipc_port_t port;
-       ipc_entry_t entry;
-
-       assert(is_active(space));
-
-       entry = ipc_entry_lookup(space, name);
-       if (entry == IE_NULL) {
-               return IP_NULL;
-       }
-       if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
-               return IP_NULL;
-       }
-
-       __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object);
-       assert(port != IP_NULL);
-
-       ip_lock(port);
-       assert(ip_active(port));
-       assert(port->ip_receiver_name == name);
-       assert(port->ip_receiver == space);
-
-       ip_reference(port);
-       port->ip_sorights++;
-       ip_unlock(port);
-
-       return port;
-}
-
 /*
  *     Routine:        ipc_port_make_send_locked
  *     Purpose:
@@ -2008,7 +2614,7 @@ ipc_port_t
 ipc_port_make_send_locked(
        ipc_port_t      port)
 {
-       assert(ip_active(port));
+       require_ip_active(port);
        port->ip_mscount++;
        port->ip_srights++;
        ip_reference(port);
@@ -2031,9 +2637,7 @@ ipc_port_make_send(
 
        ip_lock(port);
        if (ip_active(port)) {
-               port->ip_mscount++;
-               port->ip_srights++;
-               ip_reference(port);
+               ipc_port_make_send_locked(port);
                ip_unlock(port);
                return port;
        }
@@ -2041,6 +2645,22 @@ ipc_port_make_send(
        return IP_DEAD;
 }
 
+/*
+ *     Routine:        ipc_port_copy_send_locked
+ *     Purpose:
+ *             Make a naked send right from another naked send right.
+ *     Conditions:
+ *             port locked and active.
+ */
+void
+ipc_port_copy_send_locked(
+       ipc_port_t      port)
+{
+       assert(port->ip_srights > 0);
+       port->ip_srights++;
+       ip_reference(port);
+}
+
 /*
  *     Routine:        ipc_port_copy_send
  *     Purpose:
@@ -2065,10 +2685,7 @@ ipc_port_copy_send(
 
        ip_lock(port);
        if (ip_active(port)) {
-               assert(port->ip_srights > 0);
-
-               ip_reference(port);
-               port->ip_srights++;
+               ipc_port_copy_send_locked(port);
                sright = port;
        } else {
                sright = IP_DEAD;
@@ -2087,21 +2704,20 @@ ipc_port_copy_send(
  *             Nothing locked.
  */
 
-mach_port_name_t
-ipc_port_copyout_send(
+static mach_port_name_t
+ipc_port_copyout_send_internal(
        ipc_port_t      sright,
-       ipc_space_t     space)
+       ipc_space_t     space,
+       ipc_object_copyout_flags_t flags)
 {
        mach_port_name_t name;
 
        if (IP_VALID(sright)) {
                kern_return_t kr;
 
-               kr = ipc_object_copyout(space, (ipc_object_t) sright,
-                   MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
+               kr = ipc_object_copyout(space, ip_to_object(sright),
+                   MACH_MSG_TYPE_PORT_SEND, flags, NULL, NULL, &name);
                if (kr != KERN_SUCCESS) {
-                       ipc_port_release_send(sright);
-
                        if (kr == KERN_INVALID_CAPABILITY) {
                                name = MACH_PORT_DEAD;
                        } else {
@@ -2115,64 +2731,38 @@ ipc_port_copyout_send(
        return name;
 }
 
-/*
- *     Routine:        ipc_port_copyout_name_send
- *     Purpose:
- *             Copyout a naked send right (possibly null/dead) to given name,
- *             or if that fails, destroy the right.
- *     Conditions:
- *             Nothing locked.
- */
-
 mach_port_name_t
-ipc_port_copyout_name_send(
+ipc_port_copyout_send(
        ipc_port_t      sright,
-       ipc_space_t     space,
-       mach_port_name_t name)
+       ipc_space_t     space)
 {
-       if (IP_VALID(sright)) {
-               kern_return_t kr;
-
-               kr = ipc_object_copyout_name(space, (ipc_object_t) sright,
-                   MACH_MSG_TYPE_PORT_SEND, TRUE, name);
-               if (kr != KERN_SUCCESS) {
-                       ipc_port_release_send(sright);
-
-                       if (kr == KERN_INVALID_CAPABILITY) {
-                               name = MACH_PORT_DEAD;
-                       } else {
-                               name = MACH_PORT_NULL;
-                       }
-               }
-       } else {
-               name = CAST_MACH_PORT_TO_NAME(sright);
-       }
+       return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
+}
 
-       return name;
+mach_port_name_t
+ipc_port_copyout_send_pinned(
+       ipc_port_t      sright,
+       ipc_space_t     space)
+{
+       return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_PINNED);
 }
 
 /*
- *     Routine:        ipc_port_release_send
+ *     Routine:        ipc_port_release_send_and_unlock
  *     Purpose:
  *             Release a naked send right.
  *             Consumes a ref for the port.
  *     Conditions:
- *             Nothing locked.
+ *             Port is valid and locked on entry
+ *             Port is unlocked on exit.
  */
-
 void
-ipc_port_release_send(
+ipc_port_release_send_and_unlock(
        ipc_port_t      port)
 {
        ipc_port_t nsrequest = IP_NULL;
        mach_port_mscount_t mscount;
 
-       if (!IP_VALID(port)) {
-               return;
-       }
-
-       ip_lock(port);
-
        assert(port->ip_srights > 0);
        if (port->ip_srights == 0) {
                panic("Over-release of port %p send right!", port);
@@ -2200,6 +2790,25 @@ ipc_port_release_send(
        }
 }
 
+/*
+ *     Routine:        ipc_port_release_send
+ *     Purpose:
+ *             Release a naked send right.
+ *             Consumes a ref for the port.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+void
+ipc_port_release_send(
+       ipc_port_t      port)
+{
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               ipc_port_release_send_and_unlock(port);
+       }
+}
+
 /*
  *     Routine:        ipc_port_make_sonce_locked
  *     Purpose:
@@ -2212,7 +2821,7 @@ ipc_port_t
 ipc_port_make_sonce_locked(
        ipc_port_t      port)
 {
-       assert(ip_active(port));
+       require_ip_active(port);
        port->ip_sorights++;
        ip_reference(port);
        return port;
@@ -2236,8 +2845,7 @@ ipc_port_make_sonce(
 
        ip_lock(port);
        if (ip_active(port)) {
-               port->ip_sorights++;
-               ip_reference(port);
+               ipc_port_make_sonce_locked(port);
                ip_unlock(port);
                return port;
        }
@@ -2267,7 +2875,7 @@ ipc_port_release_sonce(
                return;
        }
 
-       ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_SR_NONE, FALSE);
+       ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN);
 
        ip_lock(port);
 
@@ -2302,7 +2910,7 @@ ipc_port_release_receive(
        }
 
        ip_lock(port);
-       assert(ip_active(port));
+       require_ip_active(port);
        assert(port->ip_receiver_name == MACH_PORT_NULL);
        dest = port->ip_destination;
 
@@ -2326,26 +2934,26 @@ ipc_port_release_receive(
 
 ipc_port_t
 ipc_port_alloc_special(
-       ipc_space_t     space)
+       ipc_space_t             space,
+       ipc_port_init_flags_t   flags)
 {
        ipc_port_t port;
 
-       __IGNORE_WCASTALIGN(port = (ipc_port_t) io_alloc(IOT_PORT));
+       port = ip_object_to_port(io_alloc(IOT_PORT, Z_WAITOK | Z_ZERO));
        if (port == IP_NULL) {
                return IP_NULL;
        }
 
-#if     MACH_ASSERT
+#if MACH_ASSERT
        uintptr_t buf[IP_CALLSTACK_MAX];
        ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
 #endif /* MACH_ASSERT */
 
-       bzero((char *)port, sizeof(*port));
-       io_lock_init(&port->ip_object);
+       io_lock_init(ip_to_object(port));
        port->ip_references = 1;
        port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
 
-       ipc_port_init(port, space, 1);
+       ipc_port_init(port, space, flags, 1);
 
 #if     MACH_ASSERT
        ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
@@ -2369,7 +2977,7 @@ ipc_port_dealloc_special(
        __assert_only ipc_space_t       space)
 {
        ip_lock(port);
-       assert(ip_active(port));
+       require_ip_active(port);
 //     assert(port->ip_receiver_name != MACH_PORT_NULL);
        assert(port->ip_receiver == space);
 
@@ -2384,7 +2992,7 @@ ipc_port_dealloc_special(
        imq_unlock(&port->ip_messages);
 
        /* relevant part of ipc_port_clear_receiver */
-       ipc_port_set_mscount(port, 0);
+       port->ip_mscount = 0;
        port->ip_messages.imq_seqno = 0;
 
        ipc_port_destroy(port);
@@ -2406,7 +3014,9 @@ ipc_port_finalize(
        ipc_port_request_t requests = port->ip_requests;
 
        assert(port_send_turnstile(port) == TURNSTILE_NULL);
-       assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
+       if (imq_is_turnstile_proxy(&port->ip_messages)) {
+               assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
+       }
 
        if (ip_active(port)) {
                panic("Trying to free an active port. port %p", port);
@@ -2447,8 +3057,9 @@ kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event,
        assert(waitq_is_turnstile_queue(waitq));
 
        turnstile = waitq_to_turnstile(waitq);
-       ipc_port_t port     = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
-       assert(kdp_is_in_zone(port, "ipc ports"));
+       ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
+
+       zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
 
        waitinfo->owner = 0;
        waitinfo->context  = VM_KERNEL_UNSLIDE_OR_PERM(port);
@@ -2512,7 +3123,8 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event,
        waitinfo->owner     = 0;
        if (imq_is_set(mqueue)) { /* we are waiting on a port set */
                ipc_pset_t set = ips_from_mq(mqueue);
-               assert(kdp_is_in_zone(set, "ipc port sets"));
+
+               zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set);
 
                /* Reset wait type to specify waiting on port set receive */
                waitinfo->wait_type = kThreadWaitPortSetReceive;
@@ -2523,7 +3135,8 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event,
                /* There is no specific owner "at the other end" of a port set, so leave unset. */
        } else {
                ipc_port_t port   = ip_from_mq(mqueue);
-               assert(kdp_is_in_zone(port, "ipc ports"));
+
+               zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
 
                waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
                if (ip_lock_held_kdp(port)) {
@@ -2550,8 +3163,8 @@ kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event,
  *     deallocation is intercepted via io_free.
  */
 #if 0
-queue_head_t    port_alloc_queue;
-lck_spin_t      port_alloc_queue_lock;
+queue_head_t    port_alloc_queue = QUEUE_HEAD_INITIALIZER(port_alloc_queue);
+LCK_SPIN_DECLARE(port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr);
 #endif
 
 unsigned long   port_count = 0;
@@ -2568,23 +3181,6 @@ int             db_port_walk(
        unsigned int    ref_search,
        unsigned int    ref_target);
 
-/*
- *     Initialize global state needed for run-time
- *     port debugging.
- */
-void
-ipc_port_debug_init(void)
-{
-#if 0
-       queue_init(&port_alloc_queue);
-       lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr);
-#endif
-
-       if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof(ipc_portbt))) {
-               ipc_portbt = 0;
-       }
-}
-
 #ifdef MACH_BSD
 extern int proc_pid(struct proc*);
 #endif /* MACH_BSD */