]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/ipc_tt.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
index 04071f778fa10baa83c80dffef08e0c7971d190a..f02ed471a03c6c83d2db19e416cfc05327e2bcff 100644 (file)
@@ -1,52 +1,64 @@
 /*
- * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
- * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
- * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
+ *
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
- * 
- * @APPLE_LICENSE_HEADER_END@
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
+ *
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
-/* 
+/*
  * Mach Operating System
  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
  * All Rights Reserved.
- * 
+ *
  * Permission to use, copy, modify and distribute this software and its
  * documentation is hereby granted, provided that both the copyright
  * notice and this permission notice appear in all copies of the
  * software, derivative works or modified versions, and any portions
  * thereof, and that both notices appear in supporting documentation.
- * 
+ *
  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- * 
+ *
  * Carnegie Mellon requests users of this software to return to
- * 
+ *
  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
  *  School of Computer Science
  *  Carnegie Mellon University
  *  Pittsburgh PA 15213-3890
- * 
+ *
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
+/*
+ * NOTICE: This file was modified by McAfee Research in 2004 to introduce
+ * support for mandatory and extensible security protections.  This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ */
 /*
  */
 
 
 #include <mach/mach_types.h>
 #include <mach/boolean.h>
-#include <mach_rt.h>
 #include <mach/kern_return.h>
 #include <mach/mach_param.h>
 #include <mach/task_special_ports.h>
 #include <mach/thread_special_ports.h>
 #include <mach/thread_status.h>
 #include <mach/exception_types.h>
+#include <mach/memory_object_types.h>
 #include <mach/mach_traps.h>
 #include <mach/task_server.h>
 #include <mach/thread_act_server.h>
 #include <mach/mach_host_server.h>
+#include <mach/host_priv_server.h>
 #include <mach/vm_map_server.h>
+
+#include <kern/kern_types.h>
 #include <kern/host.h>
+#include <kern/ipc_kobject.h>
 #include <kern/ipc_tt.h>
-#include <kern/thread_act.h>
+#include <kern/kalloc.h>
+#include <kern/thread.h>
 #include <kern/misc_protos.h>
+#include <kdp/kdp_dyld.h>
+
+#include <vm/vm_map.h>
 #include <vm/vm_pageout.h>
+#include <vm/vm_protos.h>
+
+#include <security/mac_mach_internal.h>
+
+#if CONFIG_CSR
+#include <sys/csr.h>
+#endif
+
+#if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
+extern int cs_relax_platform_task_ports;
+#endif
+
+extern boolean_t IOTaskHasEntitlement(task_t, const char *);
+
+/* forward declarations */
+static kern_return_t port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
+static kern_return_t port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
+static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
+static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
+kern_return_t task_conversion_eval(task_t caller, task_t victim);
+static ipc_space_t convert_port_to_space_no_eval(ipc_port_t port);
+static thread_t convert_port_to_thread_no_eval(ipc_port_t port);
+static ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor);
+static ipc_port_t convert_thread_to_port_with_flavor(thread_t thread, mach_thread_flavor_t flavor);
+static task_read_t convert_port_to_task_read_no_eval(ipc_port_t port);
+static thread_read_t convert_port_to_thread_read_no_eval(ipc_port_t port);
+static ipc_space_read_t convert_port_to_space_read_no_eval(ipc_port_t port);
 
 /*
  *     Routine:        ipc_task_init
 
 void
 ipc_task_init(
-       task_t          task,
-       task_t          parent)
+       task_t          task,
+       task_t          parent)
 {
        ipc_space_t space;
        ipc_port_t kport;
+       ipc_port_t nport;
+       ipc_port_t pport;
        kern_return_t kr;
        int i;
 
 
-       kr = ipc_space_create(&ipc_table_entries[0], &space);
-       if (kr != KERN_SUCCESS)
+       kr = ipc_space_create(&ipc_table_entries[0], IPC_LABEL_NONE, &space);
+       if (kr != KERN_SUCCESS) {
                panic("ipc_task_init");
+       }
+
+       space->is_task = task;
+
+       if (immovable_control_port_enabled) {
+               ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
+               if (pinned_control_port_enabled) {
+                       options |= IPC_KOBJECT_ALLOC_PINNED;
+               }
+               pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_NONE, options);
 
+               kport = ipc_kobject_alloc_labeled_port(IKO_NULL, IKOT_TASK_CONTROL,
+                   IPC_LABEL_SUBST_TASK, IPC_KOBJECT_ALLOC_NONE);
+               kport->ip_alt_port = pport;
+       } else {
+               kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
+                   IPC_KOBJECT_ALLOC_NONE);
+
+               pport = kport;
+       }
+
+       nport = ipc_port_alloc_kernel();
+       if (nport == IP_NULL) {
+               panic("ipc_task_init");
+       }
 
-       kport = ipc_port_alloc_kernel();
-       if (kport == IP_NULL)
+       if (pport == IP_NULL) {
                panic("ipc_task_init");
+       }
 
        itk_lock_init(task);
-       task->itk_self = kport;
-       task->itk_sself = ipc_port_make_send(kport);
+       task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
+       task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
+
+       /* Lazily allocated on-demand */
+       task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
+       task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
+       task->itk_dyld_notify = NULL;
+
+       task->itk_self = pport;
+       task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
+       if (task_is_a_corpse_fork(task)) {
+               /*
+                * No sender's notification for corpse would not
+                * work with a naked send right in kernel.
+                */
+               task->itk_settable_self = IP_NULL;
+       } else {
+               task->itk_settable_self = ipc_port_make_send(kport);
+       }
+       task->itk_debug_control = IP_NULL;
        task->itk_space = space;
-       space->is_fast = FALSE;
+
+#if CONFIG_MACF
+       task->exc_actions[0].label = NULL;
+       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+               mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
+       }
+#endif
+
+       /* always zero-out the first (unused) array element */
+       bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
 
        if (parent == TASK_NULL) {
                ipc_port_t port;
-
                for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
                        task->exc_actions[i].port = IP_NULL;
+                       task->exc_actions[i].flavor = 0;
+                       task->exc_actions[i].behavior = 0;
+                       task->exc_actions[i].privileged = FALSE;
                }/* for */
-               
+
                kr = host_get_host_port(host_priv_self(), &port);
                assert(kr == KERN_SUCCESS);
                task->itk_host = port;
 
                task->itk_bootstrap = IP_NULL;
+               task->itk_seatbelt = IP_NULL;
+               task->itk_gssd = IP_NULL;
+               task->itk_task_access = IP_NULL;
 
-               for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+               for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
                        task->itk_registered[i] = IP_NULL;
+               }
        } else {
                itk_lock(parent);
-               assert(parent->itk_self != IP_NULL);
+               assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
 
                /* inherit registered ports */
 
-               for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
+               for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
                        task->itk_registered[i] =
-                               ipc_port_copy_send(parent->itk_registered[i]);
+                           ipc_port_copy_send(parent->itk_registered[i]);
+               }
 
                /* inherit exception and bootstrap ports */
 
                for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
-                   task->exc_actions[i].port =
-                               ipc_port_copy_send(parent->exc_actions[i].port);
-                   task->exc_actions[i].flavor =
-                               parent->exc_actions[i].flavor;
-                   task->exc_actions[i].behavior = 
-                               parent->exc_actions[i].behavior;
+                       task->exc_actions[i].port =
+                           ipc_port_copy_send(parent->exc_actions[i].port);
+                       task->exc_actions[i].flavor =
+                           parent->exc_actions[i].flavor;
+                       task->exc_actions[i].behavior =
+                           parent->exc_actions[i].behavior;
+                       task->exc_actions[i].privileged =
+                           parent->exc_actions[i].privileged;
+#if CONFIG_MACF
+                       mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
+#endif
                }/* for */
                task->itk_host =
-                       ipc_port_copy_send(parent->itk_host);
+                   ipc_port_copy_send(parent->itk_host);
 
                task->itk_bootstrap =
-                       ipc_port_copy_send(parent->itk_bootstrap);
+                   ipc_port_copy_send(parent->itk_bootstrap);
+
+               task->itk_seatbelt =
+                   ipc_port_copy_send(parent->itk_seatbelt);
+
+               task->itk_gssd =
+                   ipc_port_copy_send(parent->itk_gssd);
+
+               task->itk_task_access =
+                   ipc_port_copy_send(parent->itk_task_access);
 
                itk_unlock(parent);
        }
@@ -168,14 +289,40 @@ ipc_task_init(
 
 void
 ipc_task_enable(
-       task_t          task)
+       task_t          task)
 {
        ipc_port_t kport;
+       ipc_port_t nport;
+       ipc_port_t iport;
+       ipc_port_t rdport;
+       ipc_port_t pport;
 
        itk_lock(task);
-       kport = task->itk_self;
-       if (kport != IP_NULL)
-               ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
+
+       assert(!task->ipc_active || task_is_a_corpse(task));
+       task->ipc_active = true;
+
+       kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
+       if (kport != IP_NULL) {
+               ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK_CONTROL);
+       }
+       nport = task->itk_task_ports[TASK_FLAVOR_NAME];
+       if (nport != IP_NULL) {
+               ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
+       }
+       iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
+       if (iport != IP_NULL) {
+               ipc_kobject_set(iport, (ipc_kobject_t) task, IKOT_TASK_INSPECT);
+       }
+       rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+       if (rdport != IP_NULL) {
+               ipc_kobject_set(rdport, (ipc_kobject_t) task, IKOT_TASK_READ);
+       }
+       pport = task->itk_self;
+       if (immovable_control_port_enabled && pport != IP_NULL) {
+               ipc_kobject_set(pport, (ipc_kobject_t) task, IKOT_TASK_CONTROL);
+       }
+
        itk_unlock(task);
 }
 
@@ -189,14 +336,69 @@ ipc_task_enable(
 
 void
 ipc_task_disable(
-       task_t          task)
+       task_t          task)
 {
        ipc_port_t kport;
+       ipc_port_t nport;
+       ipc_port_t iport;
+       ipc_port_t rdport;
+       ipc_port_t rport;
+       ipc_port_t pport;
 
        itk_lock(task);
-       kport = task->itk_self;
-       if (kport != IP_NULL)
-               ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+
+       /*
+        * This innocuous looking line is load bearing.
+        *
+        * It is used to disable the creation of lazy made ports.
+        * We must do so before we drop the last reference on the task,
+        * as task ports do not own a reference on the task, and
+        * convert_port_to_task* will crash trying to resurect a task.
+        */
+       task->ipc_active = false;
+
+       kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
+       if (kport != IP_NULL) {
+               ip_lock(kport);
+               kport->ip_alt_port = IP_NULL;
+               ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+               ip_unlock(kport);
+       }
+       nport = task->itk_task_ports[TASK_FLAVOR_NAME];
+       if (nport != IP_NULL) {
+               ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
+       }
+       iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
+       if (iport != IP_NULL) {
+               ipc_kobject_set(iport, IKO_NULL, IKOT_NONE);
+       }
+       rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+       if (rdport != IP_NULL) {
+               ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE);
+       }
+       pport = task->itk_self;
+       if (pport != kport && pport != IP_NULL) {
+               assert(immovable_control_port_enabled);
+               assert(pport->ip_immovable_send);
+               ipc_kobject_set(pport, IKO_NULL, IKOT_NONE);
+       }
+
+       rport = task->itk_resume;
+       if (rport != IP_NULL) {
+               /*
+                * From this point onwards this task is no longer accepting
+                * resumptions.
+                *
+                * There are still outstanding suspensions on this task,
+                * even as it is being torn down. Disconnect the task
+                * from the rport, thereby "orphaning" the rport. The rport
+                * itself will go away only when the last suspension holder
+                * destroys his SO right to it -- when he either
+                * exits, or tries to actually use that last SO right to
+                * resume this (now non-existent) task.
+                */
+               ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
+       }
        itk_unlock(task);
 }
 
@@ -211,55 +413,149 @@ ipc_task_disable(
 
 void
 ipc_task_terminate(
-       task_t          task)
+       task_t          task)
 {
        ipc_port_t kport;
-       int i;
+       ipc_port_t nport;
+       ipc_port_t iport;
+       ipc_port_t rdport;
+       ipc_port_t rport;
+       ipc_port_t pport;
+       ipc_port_t sself;
+       ipc_port_t *notifiers_ptr = NULL;
 
        itk_lock(task);
-       kport = task->itk_self;
+
+       /*
+        * If we ever failed to clear ipc_active before the last reference
+        * was dropped, lazy ports might be made and used after the last
+        * reference is dropped and cause use after free (see comment in
+        * ipc_task_disable()).
+        */
+       assert(!task->ipc_active);
+
+       kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
+       sself = task->itk_settable_self;
 
        if (kport == IP_NULL) {
                /* the task is already terminated (can this happen?) */
                itk_unlock(task);
                return;
        }
+       task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
+
+       rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+       task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
+
+       iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
+       task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
+
+       nport = task->itk_task_ports[TASK_FLAVOR_NAME];
+       assert(nport != IP_NULL);
+       task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
+
+       if (task->itk_dyld_notify) {
+               notifiers_ptr = task->itk_dyld_notify;
+               task->itk_dyld_notify = NULL;
+       }
+
+       if (immovable_control_port_enabled) {
+               pport = task->itk_self;
+               assert(pport != IP_NULL);
+       }
 
        task->itk_self = IP_NULL;
+
+       rport = task->itk_resume;
+       task->itk_resume = IP_NULL;
+
        itk_unlock(task);
 
        /* release the naked send rights */
+       if (IP_VALID(sself)) {
+               ipc_port_release_send(sself);
+       }
 
-       if (IP_VALID(task->itk_sself))
-               ipc_port_release_send(task->itk_sself);
+       if (notifiers_ptr) {
+               for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
+                       if (IP_VALID(notifiers_ptr[i])) {
+                               ipc_port_release_send(notifiers_ptr[i]);
+                       }
+               }
+               kfree(notifiers_ptr, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT * sizeof(ipc_port_t));
+       }
 
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+       for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
                if (IP_VALID(task->exc_actions[i].port)) {
                        ipc_port_release_send(task->exc_actions[i].port);
                }
-       }/* for */
-       if (IP_VALID(task->itk_host))
+#if CONFIG_MACF
+               mac_exc_free_action_label(task->exc_actions + i);
+#endif
+       }
+
+       if (IP_VALID(task->itk_host)) {
                ipc_port_release_send(task->itk_host);
+       }
 
-       if (IP_VALID(task->itk_bootstrap))
+       if (IP_VALID(task->itk_bootstrap)) {
                ipc_port_release_send(task->itk_bootstrap);
+       }
 
-       for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
-               if (IP_VALID(task->itk_registered[i]))
+       if (IP_VALID(task->itk_seatbelt)) {
+               ipc_port_release_send(task->itk_seatbelt);
+       }
+
+       if (IP_VALID(task->itk_gssd)) {
+               ipc_port_release_send(task->itk_gssd);
+       }
+
+       if (IP_VALID(task->itk_task_access)) {
+               ipc_port_release_send(task->itk_task_access);
+       }
+
+       if (IP_VALID(task->itk_debug_control)) {
+               ipc_port_release_send(task->itk_debug_control);
+       }
+
+       for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
+               if (IP_VALID(task->itk_registered[i])) {
                        ipc_port_release_send(task->itk_registered[i]);
+               }
+       }
 
-       ipc_port_release_send(task->wired_ledger_port);
-       ipc_port_release_send(task->paged_ledger_port);
+       /* destroy the kernel ports */
+       if (immovable_control_port_enabled) {
+               ip_lock(kport);
+               kport->ip_alt_port = IP_NULL;
+               ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+               ip_unlock(kport);
 
-       /* destroy the kernel port */
+               /* pport == kport if immovability is off */
+               ipc_port_dealloc_kernel(pport);
+       }
        ipc_port_dealloc_kernel(kport);
+       ipc_port_dealloc_kernel(nport);
+       if (iport != IP_NULL) {
+               ipc_port_dealloc_kernel(iport);
+       }
+       if (rdport != IP_NULL) {
+               ipc_port_dealloc_kernel(rdport);
+       }
+       if (rport != IP_NULL) {
+               ipc_port_dealloc_kernel(rport);
+       }
+
+       itk_lock_destroy(task);
 }
 
 /*
  *     Routine:        ipc_task_reset
  *     Purpose:
  *             Reset a task's IPC state to protect it when
- *             it enters an elevated security context.
+ *             it enters an elevated security context. The
+ *             task name port can remain the same - since it
+ *              represents no specific privilege.
  *     Conditions:
  *             Nothing locked.  The task must be suspended.
  *             (Or the current thread must be in the task.)
@@ -267,60 +563,149 @@ ipc_task_terminate(
 
 void
 ipc_task_reset(
-       task_t          task)
+       task_t          task)
 {
-       ipc_port_t old_kport, new_kport;
+       ipc_port_t old_kport, old_pport, new_kport, new_pport;
        ipc_port_t old_sself;
-#if 0
+       ipc_port_t old_rdport;
+       ipc_port_t old_iport;
        ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
-       int i;
+       ipc_port_t *notifiers_ptr = NULL;
+
+#if CONFIG_MACF
+       /* Fresh label to unset credentials in existing labels. */
+       struct label *unset_label = mac_exc_create_label();
 #endif
 
-       new_kport = ipc_port_alloc_kernel();
-       if (new_kport == IP_NULL)
-               panic("ipc_task_reset");
+       if (immovable_control_port_enabled) {
+               ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
+               if (pinned_control_port_enabled) {
+                       options |= IPC_KOBJECT_ALLOC_PINNED;
+               }
+
+               new_pport = ipc_kobject_alloc_port((ipc_kobject_t)task,
+                   IKOT_TASK_CONTROL, options);
+
+               new_kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)task,
+                   IKOT_TASK_CONTROL, IPC_LABEL_SUBST_TASK,
+                   IPC_KOBJECT_ALLOC_NONE);
+               new_kport->ip_alt_port = new_pport;
+       } else {
+               new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
+                   IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
+
+               new_pport = new_kport;
+       }
 
        itk_lock(task);
 
-       old_kport = task->itk_self;
+       old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
+       old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+       old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
 
-       if (old_kport == IP_NULL) {
+       old_pport = task->itk_self;
+
+       if (old_pport == IP_NULL) {
                /* the task is already terminated (can this happen?) */
                itk_unlock(task);
                ipc_port_dealloc_kernel(new_kport);
+               if (immovable_control_port_enabled) {
+                       ipc_port_dealloc_kernel(new_pport);
+               }
+#if CONFIG_MACF
+               mac_exc_free_label(unset_label);
+#endif
                return;
        }
 
-       task->itk_self = new_kport;
-       old_sself = task->itk_sself;
-       task->itk_sself = ipc_port_make_send(new_kport);
-       ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
-       ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
+       old_sself = task->itk_settable_self;
+       task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
+       task->itk_self = new_pport;
 
-#if 0
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
-               old_exc_actions[i] = task->exc_action[i].port;
-               task->exc_actions[i].port = IP_NULL;
-       }/* for */
+       task->itk_settable_self = ipc_port_make_send(new_kport);
+
+       /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
+       ip_lock(old_kport);
+       old_kport->ip_alt_port = IP_NULL;
+       ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
+       task->exec_token += 1;
+       ip_unlock(old_kport);
+
+       /* Reset the read and inspect flavors of task port */
+       task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
+       task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
+
+       if (immovable_control_port_enabled) {
+               ip_lock(old_pport);
+               ipc_kobject_set_atomically(old_pport, IKO_NULL, IKOT_NONE);
+               task->exec_token += 1;
+               ip_unlock(old_pport);
+       }
+
+       for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+               old_exc_actions[i] = IP_NULL;
+
+               if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
+                       continue;
+               }
+
+               if (!task->exc_actions[i].privileged) {
+#if CONFIG_MACF
+                       mac_exc_update_action_label(task->exc_actions + i, unset_label);
 #endif
+                       old_exc_actions[i] = task->exc_actions[i].port;
+                       task->exc_actions[i].port = IP_NULL;
+               }
+       }/* for */
+
+       if (IP_VALID(task->itk_debug_control)) {
+               ipc_port_release_send(task->itk_debug_control);
+       }
+       task->itk_debug_control = IP_NULL;
+
+       if (task->itk_dyld_notify) {
+               notifiers_ptr = task->itk_dyld_notify;
+               task->itk_dyld_notify = NULL;
+       }
 
        itk_unlock(task);
 
+#if CONFIG_MACF
+       mac_exc_free_label(unset_label);
+#endif
+
        /* release the naked send rights */
 
-       if (IP_VALID(old_sself))
+       if (IP_VALID(old_sself)) {
                ipc_port_release_send(old_sself);
+       }
 
-#if 0
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+       if (notifiers_ptr) {
+               for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
+                       if (IP_VALID(notifiers_ptr[i])) {
+                               ipc_port_release_send(notifiers_ptr[i]);
+                       }
+               }
+               kfree(notifiers_ptr, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT * sizeof(ipc_port_t));
+       }
+
+       for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
                if (IP_VALID(old_exc_actions[i])) {
                        ipc_port_release_send(old_exc_actions[i]);
                }
        }/* for */
-#endif
 
-       /* destroy the kernel port */
+       /* destroy all task port flavors */
        ipc_port_dealloc_kernel(old_kport);
+       if (immovable_control_port_enabled) {
+               ipc_port_dealloc_kernel(old_pport);
+       }
+       if (old_rdport != IP_NULL) {
+               ipc_port_dealloc_kernel(old_rdport);
+       }
+       if (old_iport != IP_NULL) {
+               ipc_port_dealloc_kernel(old_iport);
+       }
 }
 
 /*
@@ -333,191 +718,510 @@ ipc_task_reset(
 
 void
 ipc_thread_init(
-       thread_t        thread)
+       thread_t        thread,
+       ipc_thread_init_options_t options)
 {
-       ipc_kmsg_queue_init(&thread->ith_messages);
-       thread->ith_mig_reply = MACH_PORT_NULL;
-       thread->ith_rpc_reply = IP_NULL;
-}
+       ipc_port_t      kport;
+       ipc_port_t      pport;
+       ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
 
-/*
- *     Routine:        ipc_thread_terminate
- *     Purpose:
- *             Clean up and destroy a thread's IPC state.
- *     Conditions:
- *             Nothing locked.  The thread must be suspended.
- *             (Or be the current thread.)
- */
+       /*
+        * Having immovable_control_port_enabled boot-arg set does not guarantee
+        * thread control port should be made immovable/pinned, also check options.
+        *
+        * raw mach threads created via thread_create() have neither of INIT_PINNED
+        * or INIT_IMMOVABLE set.
+        */
+       if (immovable_control_port_enabled && (options & IPC_THREAD_INIT_IMMOVABLE)) {
+               alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
 
-void
-ipc_thread_terminate(
-       thread_t        thread)
-{
-       assert(ipc_kmsg_queue_empty(&thread->ith_messages));
+               if (pinned_control_port_enabled && (options & IPC_THREAD_INIT_PINNED)) {
+                       alloc_options |= IPC_KOBJECT_ALLOC_PINNED;
+               }
 
-        if (thread->ith_rpc_reply != IP_NULL)
-            ipc_port_dealloc_reply(thread->ith_rpc_reply);
-       thread->ith_rpc_reply = IP_NULL;
-}
+               pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+                   IKOT_THREAD_CONTROL, alloc_options);
 
-/*
- *     Routine:        ipc_thr_act_init
- *     Purpose:
- *             Initialize an thr_act's IPC state.
- *     Conditions:
- *             Nothing locked.
- */
+               kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
+                   IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
+               kport->ip_alt_port = pport;
+       } else {
+               kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+                   IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
 
-void
-ipc_thr_act_init(task_t task, thread_act_t thr_act)
-{
-       ipc_port_t kport; int i;
+               pport = kport;
+       }
 
-       kport = ipc_port_alloc_kernel();
-       if (kport == IP_NULL)
-               panic("ipc_thr_act_init");
+       thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = kport;
 
-       thr_act->ith_self = kport;
-       thr_act->ith_sself = ipc_port_make_send(kport);
+       thread->ith_settable_self = ipc_port_make_send(kport);
 
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
-               thr_act->exc_actions[i].port = IP_NULL;
+       thread->ith_self = pport;
 
-       ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
-}
+       thread->ith_special_reply_port = NULL;
+       thread->exc_actions = NULL;
 
-void
-ipc_thr_act_disable(thread_act_t thr_act)
-{
-       int i;
-       ipc_port_t kport;
+#if IMPORTANCE_INHERITANCE
+       thread->ith_assertions = 0;
+#endif
 
-       kport = thr_act->ith_self;
+       thread->ipc_active = true;
+       ipc_kmsg_queue_init(&thread->ith_messages);
 
-       if (kport != IP_NULL)
-               ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+       thread->ith_rpc_reply = IP_NULL;
 }
 
 void
-ipc_thr_act_terminate(thread_act_t thr_act)
+ipc_thread_init_exc_actions(
+       thread_t        thread)
 {
-       ipc_port_t kport; int i;
+       assert(thread->exc_actions == NULL);
 
-       kport = thr_act->ith_self;
+       thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
+       bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
 
-       if (kport == IP_NULL) {
-               /* the thread is already terminated (can this happen?) */
-               return;
+#if CONFIG_MACF
+       for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
+               mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
        }
+#endif
+}
 
-       thr_act->ith_self = IP_NULL;
-
-       /* release the naked send rights */
-
-       if (IP_VALID(thr_act->ith_sself))
-               ipc_port_release_send(thr_act->ith_sself);
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
-           if (IP_VALID(thr_act->exc_actions[i].port))
-               ipc_port_release_send(thr_act->exc_actions[i].port);
-        }
+void
+ipc_thread_destroy_exc_actions(
+       thread_t        thread)
+{
+       if (thread->exc_actions != NULL) {
+#if CONFIG_MACF
+               for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
+                       mac_exc_free_action_label(thread->exc_actions + i);
+               }
+#endif
 
-       /* destroy the kernel port */
-       ipc_port_dealloc_kernel(kport);
+               kfree(thread->exc_actions,
+                   sizeof(struct exception_action) * EXC_TYPES_COUNT);
+               thread->exc_actions = NULL;
+       }
 }
 
 /*
- *     Routine:        retrieve_task_self_fast
+ *     Routine:        ipc_thread_disable
  *     Purpose:
- *             Optimized version of retrieve_task_self,
- *             that only works for the current task.
- *
- *             Return a send right (possibly null/dead)
- *             for the task's user-visible self port.
+ *             Clean up and destroy a thread's IPC state.
  *     Conditions:
- *             Nothing locked.
+ *             Thread locked.
  */
-
-ipc_port_t
-retrieve_task_self_fast(
-       register task_t         task)
+void
+ipc_thread_disable(
+       thread_t        thread)
 {
-       register ipc_port_t port;
+       ipc_port_t      kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
+       ipc_port_t      iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
+       ipc_port_t      rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
+       ipc_port_t      pport = thread->ith_self;
 
-       assert(task == current_task());
+       /*
+        * This innocuous looking line is load bearing.
+        *
+        * It is used to disable the creation of lazy made ports.
+        * We must do so before we drop the last reference on the thread,
+        * as thread ports do not own a reference on the thread, and
+        * convert_port_to_thread* will crash trying to resurect a thread.
+        */
+       thread->ipc_active = false;
 
-       itk_lock(task);
-       assert(task->itk_self != IP_NULL);
+       if (kport != IP_NULL) {
+               ip_lock(kport);
+               kport->ip_alt_port = IP_NULL;
+               ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+               ip_unlock(kport);
+       }
 
-       if ((port = task->itk_sself) == task->itk_self) {
-               /* no interposing */
+       if (iport != IP_NULL) {
+               ipc_kobject_set(iport, IKO_NULL, IKOT_NONE);
+       }
 
-               ip_lock(port);
-               assert(ip_active(port));
-               ip_reference(port);
-               port->ip_srights++;
-               ip_unlock(port);
-       } else
-               port = ipc_port_copy_send(port);
-       itk_unlock(task);
+       if (rdport != IP_NULL) {
+               ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE);
+       }
 
-       return port;
+       if (pport != kport && pport != IP_NULL) {
+               assert(immovable_control_port_enabled);
+               assert(pport->ip_immovable_send);
+               ipc_kobject_set(pport, IKO_NULL, IKOT_NONE);
+       }
+
+       /* unbind the thread special reply port */
+       if (IP_VALID(thread->ith_special_reply_port)) {
+               ipc_port_unbind_special_reply_port(thread, TRUE);
+       }
 }
 
 /*
- *     Routine:        retrieve_act_self_fast
+ *     Routine:        ipc_thread_terminate
  *     Purpose:
- *             Optimized version of retrieve_thread_self,
- *             that only works for the current thread.
- *
- *             Return a send right (possibly null/dead)
- *             for the thread's user-visible self port.
+ *             Clean up and destroy a thread's IPC state.
  *     Conditions:
  *             Nothing locked.
  */
 
-ipc_port_t
-retrieve_act_self_fast(thread_act_t thr_act)
+void
+ipc_thread_terminate(
+       thread_t        thread)
 {
-       register ipc_port_t port;
+       ipc_port_t kport = IP_NULL;
+       ipc_port_t iport = IP_NULL;
+       ipc_port_t rdport = IP_NULL;
+       ipc_port_t ith_rpc_reply = IP_NULL;
+       ipc_port_t pport = IP_NULL;
 
-       assert(thr_act == current_act());
-       act_lock(thr_act);
-       assert(thr_act->ith_self != IP_NULL);
+       thread_mtx_lock(thread);
 
-       if ((port = thr_act->ith_sself) == thr_act->ith_self) {
-               /* no interposing */
+       /*
+        * If we ever failed to clear ipc_active before the last reference
+        * was dropped, lazy ports might be made and used after the last
+        * reference is dropped and cause use after free (see comment in
+        * ipc_thread_disable()).
+        */
+       assert(!thread->ipc_active);
 
-               ip_lock(port);
-               assert(ip_active(port));
-               ip_reference(port);
-               port->ip_srights++;
-               ip_unlock(port);
-       } else
-               port = ipc_port_copy_send(port);
-       act_unlock(thr_act);
+       kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
+       iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
+       rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
+       pport = thread->ith_self;
 
-       return port;
-}
+       if (kport != IP_NULL) {
+               if (IP_VALID(thread->ith_settable_self)) {
+                       ipc_port_release_send(thread->ith_settable_self);
+               }
 
-/*
- *     Routine:        task_self_trap [mach trap]
- *     Purpose:
- *             Give the caller send rights for his own task port.
- *     Conditions:
- *             Nothing locked.
- *     Returns:
- *             MACH_PORT_NULL if there are any resource failures
- *             or other errors.
+               thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = IP_NULL;
+               thread->ith_thread_ports[THREAD_FLAVOR_READ] = IP_NULL;
+               thread->ith_thread_ports[THREAD_FLAVOR_INSPECT] = IP_NULL;
+               thread->ith_settable_self = IP_NULL;
+               thread->ith_self = IP_NULL;
+
+               if (thread->exc_actions != NULL) {
+                       for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+                               if (IP_VALID(thread->exc_actions[i].port)) {
+                                       ipc_port_release_send(thread->exc_actions[i].port);
+                               }
+                       }
+                       ipc_thread_destroy_exc_actions(thread);
+               }
+       }
+
+#if IMPORTANCE_INHERITANCE
+       assert(thread->ith_assertions == 0);
+#endif
+
+       assert(ipc_kmsg_queue_empty(&thread->ith_messages));
+       ith_rpc_reply = thread->ith_rpc_reply;
+       thread->ith_rpc_reply = IP_NULL;
+
+       thread_mtx_unlock(thread);
+
+       if (pport != kport && pport != IP_NULL) {
+               /* this thread has immovable contorl port */
+               ip_lock(kport);
+               kport->ip_alt_port = IP_NULL;
+               ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+               ip_unlock(kport);
+               ipc_port_dealloc_kernel(pport);
+       }
+       if (kport != IP_NULL) {
+               ipc_port_dealloc_kernel(kport);
+       }
+       if (iport != IP_NULL) {
+               ipc_port_dealloc_kernel(iport);
+       }
+       if (rdport != IP_NULL) {
+               ipc_port_dealloc_kernel(rdport);
+       }
+       if (ith_rpc_reply != IP_NULL) {
+               ipc_port_dealloc_reply(ith_rpc_reply);
+       }
+}
+
+/*
+ *     Routine:        ipc_thread_reset
+ *     Purpose:
+ *             Reset the IPC state for a given Mach thread when
+ *             its task enters an elevated security context.
+ *             All flavors of thread port and its exception ports have
+ *             to be reset.  Its RPC reply port cannot have any
+ *             rights outstanding, so it should be fine. The thread
+ *             inspect and read port are set to NULL.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+void
+ipc_thread_reset(
+       thread_t        thread)
+{
+       ipc_port_t old_kport, new_kport, old_pport, new_pport;
+       ipc_port_t old_sself;
+       ipc_port_t old_rdport;
+       ipc_port_t old_iport;
+       ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
+       boolean_t  has_old_exc_actions = FALSE;
+       boolean_t thread_is_immovable, thread_is_pinned;
+       int i;
+
+#if CONFIG_MACF
+       struct label *new_label = mac_exc_create_label();
+#endif
+
+       thread_is_immovable = thread->ith_self->ip_immovable_send;
+       thread_is_pinned = thread->ith_self->ip_pinned;
+
+       if (thread_is_immovable) {
+               ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
+
+               if (thread_is_pinned) {
+                       assert(pinned_control_port_enabled);
+                       alloc_options |= IPC_KOBJECT_ALLOC_PINNED;
+               }
+               if (thread_is_immovable) {
+                       alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
+               }
+               new_pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+                   IKOT_THREAD_CONTROL, alloc_options);
+
+               new_kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
+                   IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD,
+                   IPC_KOBJECT_ALLOC_NONE);
+               new_kport->ip_alt_port = new_pport;
+       } else {
+               new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+                   IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
+
+               new_pport = new_kport;
+       }
+
+       thread_mtx_lock(thread);
+
+       old_kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
+       old_rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
+       old_iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
+
+       old_sself = thread->ith_settable_self;
+       old_pport = thread->ith_self;
+
+       if (old_kport == IP_NULL && thread->inspection == FALSE) {
+               /* thread is already terminated (can this happen?) */
+               thread_mtx_unlock(thread);
+               ipc_port_dealloc_kernel(new_kport);
+               if (thread_is_immovable) {
+                       ipc_port_dealloc_kernel(new_pport);
+               }
+#if CONFIG_MACF
+               mac_exc_free_label(new_label);
+#endif
+               return;
+       }
+
+       thread->ipc_active = true;
+       thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = new_kport;
+       thread->ith_self = new_pport;
+       thread->ith_settable_self = ipc_port_make_send(new_kport);
+       thread->ith_thread_ports[THREAD_FLAVOR_INSPECT] = IP_NULL;
+       thread->ith_thread_ports[THREAD_FLAVOR_READ] = IP_NULL;
+
+       if (old_kport != IP_NULL) {
+               ip_lock(old_kport);
+               old_kport->ip_alt_port = IP_NULL;
+               ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
+               ip_unlock(old_kport);
+       }
+       if (old_rdport != IP_NULL) {
+               ipc_kobject_set(old_rdport, IKO_NULL, IKOT_NONE);
+       }
+       if (old_iport != IP_NULL) {
+               ipc_kobject_set(old_iport, IKO_NULL, IKOT_NONE);
+       }
+       if (thread_is_immovable && old_pport != IP_NULL) {
+               ipc_kobject_set(old_pport, IKO_NULL, IKOT_NONE);
+       }
+
+       /*
+        * Only ports that were set by root-owned processes
+        * (privileged ports) should survive
+        */
+       if (thread->exc_actions != NULL) {
+               has_old_exc_actions = TRUE;
+               for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+                       if (thread->exc_actions[i].privileged) {
+                               old_exc_actions[i] = IP_NULL;
+                       } else {
+#if CONFIG_MACF
+                               mac_exc_update_action_label(thread->exc_actions + i, new_label);
+#endif
+                               old_exc_actions[i] = thread->exc_actions[i].port;
+                               thread->exc_actions[i].port = IP_NULL;
+                       }
+               }
+       }
+
+       thread_mtx_unlock(thread);
+
+#if CONFIG_MACF
+       mac_exc_free_label(new_label);
+#endif
+
+       /* release the naked send rights */
+
+       if (IP_VALID(old_sself)) {
+               ipc_port_release_send(old_sself);
+       }
+
+       if (has_old_exc_actions) {
+               for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+                       ipc_port_release_send(old_exc_actions[i]);
+               }
+       }
+
+       /* destroy the kernel port */
+       if (old_kport != IP_NULL) {
+               ipc_port_dealloc_kernel(old_kport);
+       }
+       if (old_rdport != IP_NULL) {
+               ipc_port_dealloc_kernel(old_rdport);
+       }
+       if (old_iport != IP_NULL) {
+               ipc_port_dealloc_kernel(old_iport);
+       }
+
+       if (thread_is_immovable && old_pport != IP_NULL) {
+               ipc_port_dealloc_kernel(old_pport);
+       }
+
+       /* unbind the thread special reply port */
+       if (IP_VALID(thread->ith_special_reply_port)) {
+               ipc_port_unbind_special_reply_port(thread, TRUE);
+       }
+}
+
+/*
+ *     Routine:        retrieve_task_self_fast
+ *     Purpose:
+ *             Optimized version of retrieve_task_self,
+ *             that only works for the current task.
+ *
+ *             Return a send right (possibly null/dead)
+ *             for the task's user-visible self port.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+ipc_port_t
+retrieve_task_self_fast(
+       task_t          task)
+{
+       ipc_port_t port = IP_NULL;
+
+       assert(task == current_task());
+
+       itk_lock(task);
+       assert(task->itk_self != IP_NULL);
+
+       if (task->itk_settable_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
+               /* no interposing, return the IMMOVABLE port */
+               port = ipc_port_make_send(task->itk_self);
+               if (immovable_control_port_enabled) {
+                       assert(port->ip_immovable_send == 1);
+                       if (pinned_control_port_enabled) {
+                               /* pinned port is also immovable */
+                               assert(port->ip_pinned == 1);
+                       }
+               }
+       } else {
+               port = ipc_port_copy_send(task->itk_settable_self);
+       }
+       itk_unlock(task);
+
+       return port;
+}
+
+/*
+ *     Routine:        mach_task_is_self
+ *     Purpose:
+ *      [MIG call] Checks if the task (control/read/inspect/name/movable)
+ *      port is pointing to current_task.
+ */
+kern_return_t
+mach_task_is_self(
+       task_t         task,
+       boolean_t     *is_self)
+{
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       *is_self = (task == current_task());
+
+       return KERN_SUCCESS;
+}
+
+/*
+ *     Routine:        retrieve_thread_self_fast
+ *     Purpose:
+ *             Return a send right (possibly null/dead)
+ *             for the thread's user-visible self port.
+ *
+ *             Only works for the current thread.
+ *
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+ipc_port_t
+retrieve_thread_self_fast(
+       thread_t                thread)
+{
+       ipc_port_t port = IP_NULL;
+
+       assert(thread == current_thread());
+
+       thread_mtx_lock(thread);
+
+       assert(thread->ith_self != IP_NULL);
+
+       if (thread->ith_settable_self == thread->ith_thread_ports[THREAD_FLAVOR_CONTROL]) {
+               /* no interposing, return IMMOVABLE_PORT */
+               port = ipc_port_make_send(thread->ith_self);
+       } else {
+               port = ipc_port_copy_send(thread->ith_settable_self);
+       }
+
+       thread_mtx_unlock(thread);
+
+       return port;
+}
+
+/*
+ *     Routine:        task_self_trap [mach trap]
+ *     Purpose:
+ *             Give the caller send rights for his own task port.
+ *     Conditions:
+ *             Nothing locked.
+ *     Returns:
+ *             MACH_PORT_NULL if there are any resource failures
+ *             or other errors.
  */
 
 mach_port_name_t
-task_self_trap(void)
+task_self_trap(
+       __unused struct task_self_trap_args *args)
 {
        task_t task = current_task();
        ipc_port_t sright;
+       mach_port_name_t name;
 
        sright = retrieve_task_self_fast(task);
-       return ipc_port_copyout_send(sright, task->itk_space);
+       name = ipc_port_copyout_send(sright, task->itk_space);
+       return name;
 }
 
 /*
@@ -532,14 +1236,17 @@ task_self_trap(void)
  */
 
 mach_port_name_t
-thread_self_trap(void)
+thread_self_trap(
+       __unused struct thread_self_trap_args *args)
 {
-       thread_act_t  thr_act  = current_act();
-       task_t task = thr_act->task;
+       thread_t  thread = current_thread();
+       task_t task = thread->task;
        ipc_port_t sright;
+       mach_port_name_t name;
 
-       sright = retrieve_act_self_fast(thr_act);
-       return ipc_port_copyout_send(sright, task->itk_space);
+       sright = retrieve_thread_self_fast(thread);
+       name = ipc_port_copyout_send(sright, task->itk_space);
+       return name;
 }
 
 /*
@@ -554,746 +1261,2375 @@ thread_self_trap(void)
  */
 
 mach_port_name_t
-mach_reply_port(void)
+mach_reply_port(
+       __unused struct mach_reply_port_args *args)
 {
        ipc_port_t port;
        mach_port_name_t name;
        kern_return_t kr;
 
-       kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
-       if (kr == KERN_SUCCESS)
+       kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
+           &name, &port);
+       if (kr == KERN_SUCCESS) {
                ip_unlock(port);
-       else
+       } else {
                name = MACH_PORT_NULL;
-
+       }
        return name;
 }
 
 /*
- *     Routine:        task_get_special_port [kernel call]
+ *     Routine:        thread_get_special_reply_port [mach trap]
  *     Purpose:
- *             Clones a send right for one of the task's
- *             special ports.
+ *             Allocate a special reply port for the calling thread.
  *     Conditions:
  *             Nothing locked.
  *     Returns:
- *             KERN_SUCCESS            Extracted a send right.
- *             KERN_INVALID_ARGUMENT   The task is null.
- *             KERN_FAILURE            The task/space is dead.
- *             KERN_INVALID_ARGUMENT   Invalid special port.
+ *             mach_port_name_t: send right & receive right for special reply port.
+ *             MACH_PORT_NULL if there are any resource failures
+ *             or other errors.
  */
 
-kern_return_t
-task_get_special_port(
-       task_t          task,
-       int             which,
-       ipc_port_t      *portp)
+mach_port_name_t
+thread_get_special_reply_port(
+       __unused struct thread_get_special_reply_port_args *args)
 {
-       ipc_port_t *whichp;
        ipc_port_t port;
+       mach_port_name_t name;
+       kern_return_t kr;
+       thread_t thread = current_thread();
+       ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
+           IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
+
+       /* unbind the thread special reply port */
+       if (IP_VALID(thread->ith_special_reply_port)) {
+               kr = ipc_port_unbind_special_reply_port(thread, TRUE);
+               if (kr != KERN_SUCCESS) {
+                       return MACH_PORT_NULL;
+               }
+       }
 
-       if (task == TASK_NULL)
-               return KERN_INVALID_ARGUMENT;
+       kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
+       if (kr == KERN_SUCCESS) {
+               ipc_port_bind_special_reply_port_locked(port);
+               ip_unlock(port);
+       } else {
+               name = MACH_PORT_NULL;
+       }
+       return name;
+}
 
-       switch (which) {
-           case TASK_KERNEL_PORT:
-               whichp = &task->itk_sself;
-               break;
+/*
+ *     Routine:        ipc_port_bind_special_reply_port_locked
+ *     Purpose:
+ *             Bind the given port to current thread as a special reply port.
+ *     Conditions:
+ *             Port locked.
+ *     Returns:
+ *             None.
+ */
 
-           case TASK_HOST_PORT:
-               whichp = &task->itk_host;
-               break;
+static void
+ipc_port_bind_special_reply_port_locked(
+       ipc_port_t port)
+{
+       thread_t thread = current_thread();
+       assert(thread->ith_special_reply_port == NULL);
+       assert(port->ip_specialreply);
+       assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
 
-           case TASK_BOOTSTRAP_PORT:
-               whichp = &task->itk_bootstrap;
-               break;
+       ip_reference(port);
+       thread->ith_special_reply_port = port;
+       port->ip_messages.imq_srp_owner_thread = thread;
 
-            case TASK_WIRED_LEDGER_PORT:
-                whichp = &task->wired_ledger_port;
-                break;
+       ipc_special_reply_port_bits_reset(port);
+}
 
-            case TASK_PAGED_LEDGER_PORT:
-                whichp = &task->paged_ledger_port;
-                break;
-                    
-           default:
-               return KERN_INVALID_ARGUMENT;
-       }
+/*
+ *     Routine:        ipc_port_unbind_special_reply_port
+ *     Purpose:
+ *             Unbind the thread's special reply port.
+ *             If the special port has threads waiting on turnstile,
+ *             update it's inheritor.
+ *     Condition:
+ *             Nothing locked.
+ *     Returns:
+ *             None.
+ */
+static kern_return_t
+ipc_port_unbind_special_reply_port(
+       thread_t thread,
+       boolean_t unbind_active_port)
+{
+       ipc_port_t special_reply_port = thread->ith_special_reply_port;
 
-       itk_lock(task);
-       if (task->itk_self == IP_NULL) {
-               itk_unlock(task);
+       ip_lock(special_reply_port);
+
+       /* Return error if port active and unbind_active_port set to FALSE */
+       if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
+               ip_unlock(special_reply_port);
                return KERN_FAILURE;
        }
 
-       port = ipc_port_copy_send(*whichp);
-       itk_unlock(task);
+       thread->ith_special_reply_port = NULL;
+       ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
+           IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
+       /* port unlocked */
 
-       *portp = port;
+       ip_release(special_reply_port);
        return KERN_SUCCESS;
 }
 
 /*
- *     Routine:        task_set_special_port [kernel call]
+ *     Routine:        thread_get_special_port [kernel call]
  *     Purpose:
- *             Changes one of the task's special ports,
- *             setting it to the supplied send right.
+ *             Clones a send right for one of the thread's
+ *             special ports.
  *     Conditions:
- *             Nothing locked.  If successful, consumes
- *             the supplied send right.
+ *             Nothing locked.
  *     Returns:
- *             KERN_SUCCESS            Changed the special port.
- *             KERN_INVALID_ARGUMENT   The task is null.
- *             KERN_FAILURE            The task/space is dead.
+ *             KERN_SUCCESS            Extracted a send right.
+ *             KERN_INVALID_ARGUMENT   The thread is null.
+ *             KERN_FAILURE            The thread is dead.
  *             KERN_INVALID_ARGUMENT   Invalid special port.
  */
 
 kern_return_t
-task_set_special_port(
-       task_t          task,
-       int             which,
-       ipc_port_t      port)
+thread_get_special_port(
+       thread_inspect_t         thread,
+       int                      which,
+       ipc_port_t              *portp);
+
+static kern_return_t
+thread_get_special_port_internal(
+       thread_inspect_t         thread,
+       int                      which,
+       ipc_port_t              *portp,
+       mach_thread_flavor_t     flavor)
 {
-       ipc_port_t *whichp;
-       ipc_port_t old;
+       kern_return_t      kr;
+       ipc_port_t port;
 
-       if (task == TASK_NULL)
+       if (thread == THREAD_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       switch (which) {
-           case TASK_KERNEL_PORT:
-               whichp = &task->itk_sself;
-               break;
+       if ((kr = port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
+               return kr;
+       }
 
-           case TASK_HOST_PORT:
-               whichp = &task->itk_host;
-               break;
+       thread_mtx_lock(thread);
+       if (!thread->active) {
+               thread_mtx_unlock(thread);
+               return KERN_FAILURE;
+       }
 
-           case TASK_BOOTSTRAP_PORT:
-               whichp = &task->itk_bootstrap;
+       switch (which) {
+       case THREAD_KERNEL_PORT:
+               port = ipc_port_copy_send(thread->ith_settable_self);
+               thread_mtx_unlock(thread);
                break;
 
-            case TASK_WIRED_LEDGER_PORT:
-                whichp = &task->wired_ledger_port;
-                break;
+       case THREAD_READ_PORT:
+       case THREAD_INSPECT_PORT:
+               thread_mtx_unlock(thread);
+               mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
+                   THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
+               /* convert_thread_to_port_with_flavor consumes a thread reference */
+               thread_reference(thread);
+               port = convert_thread_to_port_with_flavor(thread, current_flavor);
+               break;
 
-            case TASK_PAGED_LEDGER_PORT:
-                whichp = &task->paged_ledger_port;
-                break;
-                    
-           default:
+       default:
+               thread_mtx_unlock(thread);
                return KERN_INVALID_ARGUMENT;
-       }/* switch */
-
-       itk_lock(task);
-       if (task->itk_self == IP_NULL) {
-               itk_unlock(task);
-               return KERN_FAILURE;
        }
 
-       old = *whichp;
-       *whichp = port;
-       itk_unlock(task);
-
-       if (IP_VALID(old))
-               ipc_port_release_send(old);
+       *portp = port;
        return KERN_SUCCESS;
 }
 
+kern_return_t
+thread_get_special_port(
+       thread_inspect_t         thread,
+       int                      which,
+       ipc_port_t              *portp)
+{
+       return thread_get_special_port_internal(thread, which, portp, THREAD_FLAVOR_CONTROL);
+}
 
-/*
- *     Routine:        mach_ports_register [kernel call]
- *     Purpose:
- *             Stash a handful of port send rights in the task.
- *             Child tasks will inherit these rights, but they
- *             must use mach_ports_lookup to acquire them.
- *
- *             The rights are supplied in a (wired) kalloc'd segment.
- *             Rights which aren't supplied are assumed to be null.
+static ipc_port_t
+thread_get_non_substituted_self(thread_t thread)
+{
+       ipc_port_t port = IP_NULL;
+
+       thread_mtx_lock(thread);
+       port = thread->ith_settable_self;
+       if (IP_VALID(port)) {
+               ip_reference(port);
+       }
+       thread_mtx_unlock(thread);
+
+       if (IP_VALID(port)) {
+               /* consumes the port reference */
+               return ipc_kobject_alloc_subst_once(port);
+       }
+
+       return port;
+}
+
+kern_return_t
+thread_get_special_port_from_user(
+       mach_port_t     port,
+       int             which,
+       ipc_port_t      *portp)
+{
+       ipc_kobject_type_t kotype;
+       mach_thread_flavor_t flavor;
+       kern_return_t kr = KERN_SUCCESS;
+
+       thread_t thread = convert_port_to_thread_check_type(port, &kotype,
+           THREAD_FLAVOR_INSPECT, FALSE);
+
+       if (thread == THREAD_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (which == THREAD_KERNEL_PORT && thread->task == current_task()) {
+#if CONFIG_MACF
+               /*
+                * only check for threads belong to current_task,
+                * because foreign thread ports are always movable
+                */
+               if (mac_task_check_get_movable_control_port()) {
+                       kr = KERN_DENIED;
+                       goto out;
+               }
+#endif
+               if (kotype == IKOT_THREAD_CONTROL) {
+                       *portp = thread_get_non_substituted_self(thread);
+                       goto out;
+               }
+       }
+
+       switch (kotype) {
+       case IKOT_THREAD_CONTROL:
+               flavor = THREAD_FLAVOR_CONTROL;
+               break;
+       case IKOT_THREAD_READ:
+               flavor = THREAD_FLAVOR_READ;
+               break;
+       case IKOT_THREAD_INSPECT:
+               flavor = THREAD_FLAVOR_INSPECT;
+               break;
+       default:
+               panic("strange kobject type");
+       }
+
+       kr = thread_get_special_port_internal(thread, which, portp, flavor);
+out:
+       thread_deallocate(thread);
+       return kr;
+}
+
+static kern_return_t
+port_allowed_with_thread_flavor(
+       int                  which,
+       mach_thread_flavor_t flavor)
+{
+       switch (flavor) {
+       case THREAD_FLAVOR_CONTROL:
+               return KERN_SUCCESS;
+
+       case THREAD_FLAVOR_READ:
+
+               switch (which) {
+               case THREAD_READ_PORT:
+               case THREAD_INSPECT_PORT:
+                       return KERN_SUCCESS;
+               default:
+                       return KERN_INVALID_CAPABILITY;
+               }
+
+       case THREAD_FLAVOR_INSPECT:
+
+               switch (which) {
+               case THREAD_INSPECT_PORT:
+                       return KERN_SUCCESS;
+               default:
+                       return KERN_INVALID_CAPABILITY;
+               }
+
+       default:
+               return KERN_INVALID_CAPABILITY;
+       }
+}
+
+/*
+ *     Routine:        thread_set_special_port [kernel call]
+ *     Purpose:
+ *             Changes one of the thread's special ports,
+ *             setting it to the supplied send right.
  *     Conditions:
  *             Nothing locked.  If successful, consumes
- *             the supplied rights and memory.
+ *             the supplied send right.
  *     Returns:
- *             KERN_SUCCESS            Stashed the port rights.
- *             KERN_INVALID_ARGUMENT   The task is null.
- *             KERN_INVALID_ARGUMENT   The task is dead.
- *             KERN_INVALID_ARGUMENT   Too many port rights supplied.
+ *             KERN_SUCCESS            Changed the special port.
+ *             KERN_INVALID_ARGUMENT   The thread is null.
+ *      KERN_INVALID_RIGHT      Port is marked as immovable.
+ *             KERN_FAILURE            The thread is dead.
+ *             KERN_INVALID_ARGUMENT   Invalid special port.
+ *             KERN_NO_ACCESS          Restricted access to set port.
  */
 
 kern_return_t
-mach_ports_register(
-       task_t                  task,
-       mach_port_array_t       memory,
-       mach_msg_type_number_t  portsCnt)
+thread_set_special_port(
+       thread_t                thread,
+       int                     which,
+       ipc_port_t              port)
 {
-       ipc_port_t ports[TASK_PORT_REGISTER_MAX];
-       int i;
+       kern_return_t   result = KERN_SUCCESS;
+       ipc_port_t              *whichp, old = IP_NULL;
 
-       if ((task == TASK_NULL) ||
-           (portsCnt > TASK_PORT_REGISTER_MAX))
+       if (thread == THREAD_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       /*
-        *      Pad the port rights with nulls.
-        */
+       if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
+               return KERN_INVALID_RIGHT;
+       }
 
-       for (i = 0; i < portsCnt; i++)
-               ports[i] = memory[i];
-       for (; i < TASK_PORT_REGISTER_MAX; i++)
-               ports[i] = IP_NULL;
+       switch (which) {
+       case THREAD_KERNEL_PORT:
+#if CONFIG_CSR
+               if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
+                       /*
+                        * Only allow setting of thread-self
+                        * special port from user-space when SIP is
+                        * disabled (for Mach-on-Mach emulation).
+                        */
+                       return KERN_NO_ACCESS;
+               }
+#endif
+               whichp = &thread->ith_settable_self;
+               break;
 
-       itk_lock(task);
-       if (task->itk_self == IP_NULL) {
-               itk_unlock(task);
+       default:
                return KERN_INVALID_ARGUMENT;
        }
 
-       /*
-        *      Replace the old send rights with the new.
-        *      Release the old rights after unlocking.
-        */
-
-       for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
-               ipc_port_t old;
+       thread_mtx_lock(thread);
 
-               old = task->itk_registered[i];
-               task->itk_registered[i] = ports[i];
-               ports[i] = old;
+       if (thread->active) {
+               old = *whichp;
+               *whichp = port;
+       } else {
+               result = KERN_FAILURE;
        }
 
-       itk_unlock(task);
-
-       for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
-               if (IP_VALID(ports[i]))
-                       ipc_port_release_send(ports[i]);
-
-       /*
-        *      Now that the operation is known to be successful,
-        *      we can free the memory.
-        */
+       thread_mtx_unlock(thread);
 
-       if (portsCnt != 0)
-               kfree((vm_offset_t) memory,
-                     (vm_size_t) (portsCnt * sizeof(mach_port_t)));
+       if (IP_VALID(old)) {
+               ipc_port_release_send(old);
+       }
 
-       return KERN_SUCCESS;
+       return result;
 }
 
 /*
- *     Routine:        mach_ports_lookup [kernel call]
+ *     Routine:        task_get_special_port [kernel call]
  *     Purpose:
- *             Retrieves (clones) the stashed port send rights.
+ *             Clones a send right for one of the task's
+ *             special ports.
  *     Conditions:
- *             Nothing locked.  If successful, the caller gets
- *             rights and memory.
+ *             Nothing locked.
  *     Returns:
- *             KERN_SUCCESS            Retrieved the send rights.
+ *             KERN_SUCCESS                Extracted a send right.
  *             KERN_INVALID_ARGUMENT   The task is null.
- *             KERN_INVALID_ARGUMENT   The task is dead.
- *             KERN_RESOURCE_SHORTAGE  Couldn't allocate memory.
+ *             KERN_FAILURE                The task/space is dead.
+ *             KERN_INVALID_ARGUMENT   Invalid special port.
  */
 
 kern_return_t
-mach_ports_lookup(
-       task_t                  task,
-       mach_port_array_t       *portsp,
-       mach_msg_type_number_t  *portsCnt)
+task_get_special_port(
+       task_t          task,
+       int             which,
+       ipc_port_t      *portp);
+
+static kern_return_t
+task_get_special_port_internal(
+       task_t          task,
+       int             which,
+       ipc_port_t      *portp,
+       mach_task_flavor_t        flavor)
 {
-       vm_offset_t memory;
-       vm_size_t size;
-       ipc_port_t *ports;
-       int i;
-
        kern_return_t kr;
-       
-       if (task == TASK_NULL)
-               return KERN_INVALID_ARGUMENT;
+       ipc_port_t port;
 
-       size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       memory = kalloc(size);
-       if (memory == 0)
-               return KERN_RESOURCE_SHORTAGE;
+       if ((kr = port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
+               return kr;
+       }
 
        itk_lock(task);
-       if (task->itk_self == IP_NULL) {
+       if (!task->ipc_active) {
                itk_unlock(task);
+               return KERN_FAILURE;
+       }
 
-               kfree(memory, size);
+       switch (which) {
+       case TASK_KERNEL_PORT:
+               port = ipc_port_copy_send(task->itk_settable_self);
+               itk_unlock(task);
+               break;
+
+       case TASK_READ_PORT:
+       case TASK_INSPECT_PORT:
+               itk_unlock(task);
+               mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
+                   TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
+               /* convert_task_to_port_with_flavor consumes a task reference */
+               task_reference(task);
+               port = convert_task_to_port_with_flavor(task, current_flavor);
+               break;
+
+       case TASK_NAME_PORT:
+               port = ipc_port_make_send(task->itk_task_ports[TASK_FLAVOR_NAME]);
+               itk_unlock(task);
+               break;
+
+       case TASK_HOST_PORT:
+               port = ipc_port_copy_send(task->itk_host);
+               itk_unlock(task);
+               break;
+
+       case TASK_BOOTSTRAP_PORT:
+               port = ipc_port_copy_send(task->itk_bootstrap);
+               itk_unlock(task);
+               break;
+
+       case TASK_SEATBELT_PORT:
+               port = ipc_port_copy_send(task->itk_seatbelt);
+               itk_unlock(task);
+               break;
+
+       case TASK_ACCESS_PORT:
+               port = ipc_port_copy_send(task->itk_task_access);
+               itk_unlock(task);
+               break;
+
+       case TASK_DEBUG_CONTROL_PORT:
+               port = ipc_port_copy_send(task->itk_debug_control);
+               itk_unlock(task);
+               break;
+
+       default:
+               itk_unlock(task);
                return KERN_INVALID_ARGUMENT;
        }
 
-       ports = (ipc_port_t *) memory;
+       *portp = port;
+       return KERN_SUCCESS;
+}
 
-       /*
-        *      Clone port rights.  Because kalloc'd memory
-        *      is wired, we won't fault while holding the task lock.
-        */
+kern_return_t
+task_get_special_port(
+       task_t          task,
+       int             which,
+       ipc_port_t      *portp)
+{
+       return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
+}
 
-       for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
-               ports[i] = ipc_port_copy_send(task->itk_registered[i]);
+static ipc_port_t
+task_get_non_substituted_self(task_t task)
+{
+       ipc_port_t port = IP_NULL;
 
+       itk_lock(task);
+       port = task->itk_settable_self;
+       if (IP_VALID(port)) {
+               ip_reference(port);
+       }
        itk_unlock(task);
 
-       *portsp = (mach_port_array_t) ports;
-       *portsCnt = TASK_PORT_REGISTER_MAX;
-       return KERN_SUCCESS;
-}
+       if (IP_VALID(port)) {
+               /* consumes the port reference */
+               return ipc_kobject_alloc_subst_once(port);
+       }
 
-/*
- *     Routine: convert_port_to_locked_task
- *     Purpose:
- *             Internal helper routine to convert from a port to a locked
- *             task.  Used by several routines that try to convert from a
- *             task port to a reference on some task related object.
- *     Conditions:
- *             Nothing locked, blocking OK.
- */
-task_t
-convert_port_to_locked_task(ipc_port_t port)
+       return port;
+}
+kern_return_t
+task_get_special_port_from_user(
+       mach_port_t     port,
+       int             which,
+       ipc_port_t      *portp)
 {
-       while (IP_VALID(port)) {
-               task_t task;
+       ipc_kobject_type_t kotype;
+       mach_task_flavor_t flavor;
+       kern_return_t kr = KERN_SUCCESS;
 
-               ip_lock(port);
-               if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
-                       ip_unlock(port);
-                       return TASK_NULL;
-               }
-               task = (task_t) port->ip_kobject;
-               assert(task != TASK_NULL);
+       task_t task = convert_port_to_task_check_type(port, &kotype,
+           TASK_FLAVOR_INSPECT, FALSE);
+
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
+       if (which == TASK_KERNEL_PORT && task == current_task()) {
+#if CONFIG_MACF
                /*
-                * Normal lock ordering puts task_lock() before ip_lock().
-                * Attempt out-of-order locking here.
+                * only check for current_task,
+                * because foreign task ports are always movable
                 */
-               if (task_lock_try(task)) {
-                       ip_unlock(port);
-                       return(task);
+               if (mac_task_check_get_movable_control_port()) {
+                       kr = KERN_DENIED;
+                       goto out;
+               }
+#endif
+               if (kotype == IKOT_TASK_CONTROL) {
+                       *portp = task_get_non_substituted_self(task);
+                       goto out;
                }
+       }
 
-               ip_unlock(port);
-               mutex_pause();
+       switch (kotype) {
+       case IKOT_TASK_CONTROL:
+               flavor = TASK_FLAVOR_CONTROL;
+               break;
+       case IKOT_TASK_READ:
+               flavor = TASK_FLAVOR_READ;
+               break;
+       case IKOT_TASK_INSPECT:
+               flavor = TASK_FLAVOR_INSPECT;
+               break;
+       default:
+               panic("strange kobject type");
+       }
+
+       kr = task_get_special_port_internal(task, which, portp, flavor);
+out:
+       task_deallocate(task);
+       return kr;
+}
+
+static kern_return_t
+port_allowed_with_task_flavor(
+       int                which,
+       mach_task_flavor_t flavor)
+{
+       switch (flavor) {
+       case TASK_FLAVOR_CONTROL:
+               return KERN_SUCCESS;
+
+       case TASK_FLAVOR_READ:
+
+               switch (which) {
+               case TASK_READ_PORT:
+               case TASK_INSPECT_PORT:
+               case TASK_NAME_PORT:
+                       return KERN_SUCCESS;
+               default:
+                       return KERN_INVALID_CAPABILITY;
+               }
+
+       case TASK_FLAVOR_INSPECT:
+
+               switch (which) {
+               case TASK_INSPECT_PORT:
+               case TASK_NAME_PORT:
+                       return KERN_SUCCESS;
+               default:
+                       return KERN_INVALID_CAPABILITY;
+               }
+
+       default:
+               return KERN_INVALID_CAPABILITY;
        }
-       return TASK_NULL;
 }
 
 /*
- *     Routine:        convert_port_to_task
+ *     Routine:        task_set_special_port [kernel call]
  *     Purpose:
- *             Convert from a port to a task.
- *             Doesn't consume the port ref; produces a task ref,
- *             which may be null.
+ *             Changes one of the task's special ports,
+ *             setting it to the supplied send right.
  *     Conditions:
- *             Nothing locked.
+ *             Nothing locked.  If successful, consumes
+ *             the supplied send right.
+ *     Returns:
+ *             KERN_SUCCESS                Changed the special port.
+ *             KERN_INVALID_ARGUMENT   The task is null.
+ *      KERN_INVALID_RIGHT      Port is marked as immovable.
+ *             KERN_FAILURE                The task/space is dead.
+ *             KERN_INVALID_ARGUMENT   Invalid special port.
+ *      KERN_NO_ACCESS             Restricted access to set port.
  */
-task_t
-convert_port_to_task(
-       ipc_port_t      port)
+
+kern_return_t
+task_set_special_port(
+       task_t          task,
+       int             which,
+       ipc_port_t      port)
+{
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       if (task_is_driver(current_task())) {
+               return KERN_NO_ACCESS;
+       }
+
+       if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
+               return KERN_INVALID_RIGHT;
+       }
+
+       switch (which) {
+       case TASK_KERNEL_PORT:
+       case TASK_HOST_PORT:
+#if CONFIG_CSR
+               if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
+                       /*
+                        * Only allow setting of task-self / task-host
+                        * special ports from user-space when SIP is
+                        * disabled (for Mach-on-Mach emulation).
+                        */
+                       break;
+               }
+#endif
+               return KERN_NO_ACCESS;
+       default:
+               break;
+       }
+
+       return task_set_special_port_internal(task, which, port);
+}
+
+/*
+ *     Routine:        task_set_special_port_internal
+ *     Purpose:
+ *             Changes one of the task's special ports,
+ *             setting it to the supplied send right.
+ *     Conditions:
+ *             Nothing locked.  If successful, consumes
+ *             the supplied send right.
+ *     Returns:
+ *             KERN_SUCCESS            Changed the special port.
+ *             KERN_INVALID_ARGUMENT   The task is null.
+ *             KERN_FAILURE            The task/space is dead.
+ *             KERN_INVALID_ARGUMENT   Invalid special port.
+ *      KERN_NO_ACCESS         Restricted access to overwrite port.
+ */
+
+kern_return_t
+task_set_special_port_internal(
+       task_t          task,
+       int             which,
+       ipc_port_t      port)
+{
+       ipc_port_t old = IP_NULL;
+       kern_return_t rc = KERN_INVALID_ARGUMENT;
+
+       if (task == TASK_NULL) {
+               goto out;
+       }
+
+       itk_lock(task);
+       if (!task->ipc_active) {
+               rc = KERN_FAILURE;
+               goto out_unlock;
+       }
+
+       switch (which) {
+       case TASK_KERNEL_PORT:
+               old = task->itk_settable_self;
+               task->itk_settable_self = port;
+               break;
+
+       case TASK_HOST_PORT:
+               old = task->itk_host;
+               task->itk_host = port;
+               break;
+
+       case TASK_BOOTSTRAP_PORT:
+               old = task->itk_bootstrap;
+               task->itk_bootstrap = port;
+               break;
+
+       /* Never allow overwrite of seatbelt port */
+       case TASK_SEATBELT_PORT:
+               if (IP_VALID(task->itk_seatbelt)) {
+                       rc = KERN_NO_ACCESS;
+                       goto out_unlock;
+               }
+               task->itk_seatbelt = port;
+               break;
+
+       /* Never allow overwrite of the task access port */
+       case TASK_ACCESS_PORT:
+               if (IP_VALID(task->itk_task_access)) {
+                       rc = KERN_NO_ACCESS;
+                       goto out_unlock;
+               }
+               task->itk_task_access = port;
+               break;
+
+       case TASK_DEBUG_CONTROL_PORT:
+               old = task->itk_debug_control;
+               task->itk_debug_control = port;
+               break;
+
+       default:
+               rc = KERN_INVALID_ARGUMENT;
+               goto out_unlock;
+       }/* switch */
+
+       rc = KERN_SUCCESS;
+
+out_unlock:
+       itk_unlock(task);
+
+       if (IP_VALID(old)) {
+               ipc_port_release_send(old);
+       }
+out:
+       return rc;
+}
+/*
+ *     Routine:        mach_ports_register [kernel call]
+ *     Purpose:
+ *             Stash a handful of port send rights in the task.
+ *             Child tasks will inherit these rights, but they
+ *             must use mach_ports_lookup to acquire them.
+ *
+ *             The rights are supplied in a (wired) kalloc'd segment.
+ *             Rights which aren't supplied are assumed to be null.
+ *     Conditions:
+ *             Nothing locked.  If successful, consumes
+ *             the supplied rights and memory.
+ *     Returns:
+ *             KERN_SUCCESS                Stashed the port rights.
+ *      KERN_INVALID_RIGHT      Port in array is marked immovable.
+ *             KERN_INVALID_ARGUMENT   The task is null.
+ *             KERN_INVALID_ARGUMENT   The task is dead.
+ *             KERN_INVALID_ARGUMENT   The memory param is null.
+ *             KERN_INVALID_ARGUMENT   Too many port rights supplied.
+ */
+
+kern_return_t
+mach_ports_register(
+       task_t                  task,
+       mach_port_array_t       memory,
+       mach_msg_type_number_t  portsCnt)
+{
+       ipc_port_t ports[TASK_PORT_REGISTER_MAX];
+       unsigned int i;
+
+       if ((task == TASK_NULL) ||
+           (portsCnt > TASK_PORT_REGISTER_MAX) ||
+           (portsCnt && memory == NULL)) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       /*
+        *      Pad the port rights with nulls.
+        */
+
+       for (i = 0; i < portsCnt; i++) {
+               ports[i] = memory[i];
+               if (IP_VALID(ports[i]) && (ports[i]->ip_immovable_receive || ports[i]->ip_immovable_send)) {
+                       return KERN_INVALID_RIGHT;
+               }
+       }
+       for (; i < TASK_PORT_REGISTER_MAX; i++) {
+               ports[i] = IP_NULL;
+       }
+
+       itk_lock(task);
+       if (!task->ipc_active) {
+               itk_unlock(task);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       /*
+        *      Replace the old send rights with the new.
+        *      Release the old rights after unlocking.
+        */
+
+       for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
+               ipc_port_t old;
+
+               old = task->itk_registered[i];
+               task->itk_registered[i] = ports[i];
+               ports[i] = old;
+       }
+
+       itk_unlock(task);
+
+       for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
+               if (IP_VALID(ports[i])) {
+                       ipc_port_release_send(ports[i]);
+               }
+       }
+
+       /*
+        *      Now that the operation is known to be successful,
+        *      we can free the memory.
+        */
+
+       if (portsCnt != 0) {
+               kfree(memory,
+                   (vm_size_t) (portsCnt * sizeof(mach_port_t)));
+       }
+
+       return KERN_SUCCESS;
+}
+
+/*
+ *     Routine:        mach_ports_lookup [kernel call]
+ *     Purpose:
+ *             Retrieves (clones) the stashed port send rights.
+ *     Conditions:
+ *             Nothing locked.  If successful, the caller gets
+ *             rights and memory.
+ *     Returns:
+ *             KERN_SUCCESS            Retrieved the send rights.
+ *             KERN_INVALID_ARGUMENT   The task is null.
+ *             KERN_INVALID_ARGUMENT   The task is dead.
+ *             KERN_RESOURCE_SHORTAGE  Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_ports_lookup(
+       task_t                  task,
+       mach_port_array_t       *portsp,
+       mach_msg_type_number_t  *portsCnt)
+{
+       void  *memory;
+       vm_size_t size;
+       ipc_port_t *ports;
+       int i;
+
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
+
+       memory = kalloc(size);
+       if (memory == 0) {
+               return KERN_RESOURCE_SHORTAGE;
+       }
+
+       itk_lock(task);
+       if (!task->ipc_active) {
+               itk_unlock(task);
+
+               kfree(memory, size);
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       ports = (ipc_port_t *) memory;
+
+       /*
+        *      Clone port rights.  Because kalloc'd memory
+        *      is wired, we won't fault while holding the task lock.
+        */
+
+       for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
+               ports[i] = ipc_port_copy_send(task->itk_registered[i]);
+       }
+
+       itk_unlock(task);
+
+       *portsp = (mach_port_array_t) ports;
+       *portsCnt = TASK_PORT_REGISTER_MAX;
+       return KERN_SUCCESS;
+}
+
+kern_return_t
+task_conversion_eval(task_t caller, task_t victim)
+{
+       /*
+        * Tasks are allowed to resolve their own task ports, and the kernel is
+        * allowed to resolve anyone's task port.
+        */
+       if (caller == kernel_task) {
+               return KERN_SUCCESS;
+       }
+
+       if (caller == victim) {
+               return KERN_SUCCESS;
+       }
+
+       /*
+        * Only the kernel can can resolve the kernel's task port. We've established
+        * by this point that the caller is not kernel_task.
+        */
+       if (victim == TASK_NULL || victim == kernel_task) {
+               return KERN_INVALID_SECURITY;
+       }
+
+       task_require(victim);
+
+#if !defined(XNU_TARGET_OS_OSX)
+       /*
+        * On platforms other than macOS, only a platform binary can resolve the task port
+        * of another platform binary.
+        */
+       if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
+#if SECURE_KERNEL
+               return KERN_INVALID_SECURITY;
+#else
+               if (cs_relax_platform_task_ports) {
+                       return KERN_SUCCESS;
+               } else {
+                       return KERN_INVALID_SECURITY;
+               }
+#endif /* SECURE_KERNEL */
+       }
+#endif /* !defined(XNU_TARGET_OS_OSX) */
+
+       return KERN_SUCCESS;
+}
+
+/*
+ *     Routine: convert_port_to_locked_task
+ *     Purpose:
+ *             Internal helper routine to convert from a port to a locked
+ *             task.  Used by several routines that try to convert from a
+ *             task port to a reference on some task related object.
+ *     Conditions:
+ *             Nothing locked, blocking OK.
+ */
+static task_t
+convert_port_to_locked_task(ipc_port_t port, boolean_t eval)
+{
+       int try_failed_count = 0;
+
+       while (IP_VALID(port)) {
+               task_t ct = current_task();
+               task_t task;
+
+               ip_lock(port);
+               if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL)) {
+                       ip_unlock(port);
+                       return TASK_NULL;
+               }
+               task = (task_t) ip_get_kobject(port);
+               assert(task != TASK_NULL);
+
+               if (eval && task_conversion_eval(ct, task)) {
+                       ip_unlock(port);
+                       return TASK_NULL;
+               }
+
+               /*
+                * Normal lock ordering puts task_lock() before ip_lock().
+                * Attempt out-of-order locking here.
+                */
+               if (task_lock_try(task)) {
+                       ip_unlock(port);
+                       return task;
+               }
+               try_failed_count++;
+
+               ip_unlock(port);
+               mutex_pause(try_failed_count);
+       }
+       return TASK_NULL;
+}
+
+/*
+ *     Routine: convert_port_to_locked_task_inspect
+ *     Purpose:
+ *             Internal helper routine to convert from a port to a locked
+ *             task inspect right. Used by internal routines that try to convert from a
+ *             task inspect port to a reference on some task related object.
+ *     Conditions:
+ *             Nothing locked, blocking OK.
+ */
+static task_inspect_t
+convert_port_to_locked_task_inspect(ipc_port_t port)
+{
+       int try_failed_count = 0;
+
+       while (IP_VALID(port)) {
+               task_inspect_t task;
+
+               ip_lock(port);
+               if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL &&
+                   ip_kotype(port) != IKOT_TASK_READ &&
+                   ip_kotype(port) != IKOT_TASK_INSPECT)) {
+                       ip_unlock(port);
+                       return TASK_INSPECT_NULL;
+               }
+               task = (task_inspect_t) ip_get_kobject(port);
+               assert(task != TASK_INSPECT_NULL);
+               /*
+                * Normal lock ordering puts task_lock() before ip_lock().
+                * Attempt out-of-order locking here.
+                */
+               if (task_lock_try((task_t)task)) {
+                       ip_unlock(port);
+                       return task;
+               }
+               try_failed_count++;
+
+               ip_unlock(port);
+               mutex_pause(try_failed_count);
+       }
+       return TASK_INSPECT_NULL;
+}
+
+/*
+ *     Routine: convert_port_to_locked_task_read
+ *     Purpose:
+ *             Internal helper routine to convert from a port to a locked
+ *             task read right. Used by internal routines that try to convert from a
+ *             task read port to a reference on some task related object.
+ *     Conditions:
+ *             Nothing locked, blocking OK.
+ */
+static task_read_t
+convert_port_to_locked_task_read(
+       ipc_port_t port,
+       boolean_t  eval)
+{
+       int try_failed_count = 0;
+
+       while (IP_VALID(port)) {
+               task_t ct = current_task();
+               task_read_t task;
+
+               ip_lock(port);
+               if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL &&
+                   ip_kotype(port) != IKOT_TASK_READ)) {
+                       ip_unlock(port);
+                       return TASK_READ_NULL;
+               }
+               task = (task_read_t)ipc_kobject_get(port);
+               assert(task != TASK_READ_NULL);
+
+               if (eval && task_conversion_eval(ct, task)) {
+                       ip_unlock(port);
+                       return TASK_READ_NULL;
+               }
+
+               /*
+                * Normal lock ordering puts task_lock() before ip_lock().
+                * Attempt out-of-order locking here.
+                */
+               if (task_lock_try((task_t)task)) {
+                       ip_unlock(port);
+                       return task;
+               }
+               try_failed_count++;
+
+               ip_unlock(port);
+               mutex_pause(try_failed_count);
+       }
+       return TASK_READ_NULL;
+}
+
+static task_t
+convert_port_to_task_locked(
+       ipc_port_t              port,
+       uint32_t                *exec_token,
+       boolean_t               eval)
+{
+       task_t          task = TASK_NULL;
+
+       ip_lock_held(port);
+       require_ip_active(port);
+
+       if (ip_kotype(port) == IKOT_TASK_CONTROL) {
+               task = (task_t) ip_get_kobject(port);
+               assert(task != TASK_NULL);
+
+               if (eval && task_conversion_eval(current_task(), task)) {
+                       return TASK_NULL;
+               }
+
+               if (exec_token) {
+                       *exec_token = task->exec_token;
+               }
+
+               task_reference_internal(task);
+       }
+
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_task_with_exec_token
+ *     Purpose:
+ *             Convert from a port to a task and return
+ *             the exec token stored in the task.
+ *             Doesn't consume the port ref; produces a task ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+task_t
+convert_port_to_task_with_exec_token(
+       ipc_port_t              port,
+       uint32_t                *exec_token,
+       boolean_t               eval)
+{
+       task_t          task = TASK_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       task = convert_port_to_task_locked(port, exec_token, eval);
+               }
+               ip_unlock(port);
+       }
+
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_task
+ *     Purpose:
+ *             Convert from a port to a task.
+ *             Doesn't consume the port ref; produces a task ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+task_t
+convert_port_to_task(
+       ipc_port_t              port)
+{
+       return convert_port_to_task_with_exec_token(port, NULL, TRUE);
+}
+
+/*
+ *     Routine:        convert_port_to_task_no_eval
+ *     Purpose:
+ *             Convert from a port to a task, skips task_conversion_eval.
+ *             Doesn't consume the port ref; produces a task ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+static task_t
+convert_port_to_task_no_eval(
+       ipc_port_t              port)
+{
+       return convert_port_to_task_with_exec_token(port, NULL, FALSE);
+}
+
+/*
+ *     Routine:        convert_port_to_task_name
+ *     Purpose:
+ *             Convert from a port to a task name.
+ *             Doesn't consume the port ref; produces a task name ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+static task_name_t
+convert_port_to_task_name_locked(
+       ipc_port_t              port)
+{
+       task_name_t task = TASK_NAME_NULL;
+
+       ip_lock_held(port);
+       require_ip_active(port);
+
+       if (ip_kotype(port) == IKOT_TASK_CONTROL ||
+           ip_kotype(port) == IKOT_TASK_READ ||
+           ip_kotype(port) == IKOT_TASK_INSPECT ||
+           ip_kotype(port) == IKOT_TASK_NAME) {
+               task = (task_name_t) ip_get_kobject(port);
+               assert(task != TASK_NAME_NULL);
+
+               task_reference_internal(task);
+       }
+
+       return task;
+}
+
+task_name_t
+convert_port_to_task_name(
+       ipc_port_t              port)
+{
+       task_name_t             task = TASK_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       task = convert_port_to_task_name_locked(port);
+               }
+               ip_unlock(port);
+       }
+
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_task_policy
+ *     Purpose:
+ *             Convert from a port to a task.
+ *             Doesn't consume the port ref; produces a task ref,
+ *             which may be null.
+ *             If the port is being used with task_port_set(), any task port
+ *             type other than TASK_CONTROL requires an entitlement. If the
+ *             port is being used with task_port_get(), TASK_NAME requires an
+ *             entitlement.
+ *     Conditions:
+ *             Nothing locked.
+ */
+static task_t
+convert_port_to_task_policy(ipc_port_t port, boolean_t set)
+{
+       task_t task = TASK_NULL;
+       task_t ctask = current_task();
+
+       if (!IP_VALID(port)) {
+               return TASK_NULL;
+       }
+
+       task = set ?
+           convert_port_to_task(port) :
+           convert_port_to_task_inspect(port);
+
+       if (task == TASK_NULL &&
+           IOTaskHasEntitlement(ctask, "com.apple.private.task_policy")) {
+               task = convert_port_to_task_name(port);
+       }
+
+       if (task_conversion_eval(ctask, task) != KERN_SUCCESS) {
+               task_deallocate(task);
+               return TASK_NULL;
+       }
+
+       return task;
+}
+
+task_policy_set_t
+convert_port_to_task_policy_set(ipc_port_t port)
+{
+       return convert_port_to_task_policy(port, true);
+}
+
+task_policy_get_t
+convert_port_to_task_policy_get(ipc_port_t port)
+{
+       return convert_port_to_task_policy(port, false);
+}
+
+static task_inspect_t
+convert_port_to_task_inspect_locked(
+       ipc_port_t              port)
+{
+       task_inspect_t task = TASK_INSPECT_NULL;
+
+       ip_lock_held(port);
+       require_ip_active(port);
+
+       if (ip_kotype(port) == IKOT_TASK_CONTROL ||
+           ip_kotype(port) == IKOT_TASK_READ ||
+           ip_kotype(port) == IKOT_TASK_INSPECT) {
+               task = (task_inspect_t) ip_get_kobject(port);
+               assert(task != TASK_INSPECT_NULL);
+
+               task_reference_internal(task);
+       }
+
+       return task;
+}
+
+static task_read_t
+convert_port_to_task_read_locked(
+       ipc_port_t port,
+       boolean_t  eval)
+{
+       task_read_t task = TASK_READ_NULL;
+
+       ip_lock_held(port);
+       require_ip_active(port);
+
+       if (ip_kotype(port) == IKOT_TASK_CONTROL ||
+           ip_kotype(port) == IKOT_TASK_READ) {
+               task_t ct = current_task();
+               task = (task_read_t)ipc_kobject_get(port);
+
+               assert(task != TASK_READ_NULL);
+
+               if (eval && task_conversion_eval(ct, task)) {
+                       return TASK_READ_NULL;
+               }
+
+               task_reference_internal(task);
+       }
+
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_task_check_type
+ *     Purpose:
+ *             Convert from a port to a task based on port's type.
+ *             Doesn't consume the port ref; produces a task ref,
+ *             which may be null.
+ *  Arguments:
+ *             port:       The port that we do conversion on
+ *      kotype:     Returns the IKOT_TYPE of the port, if translation succeeded
+ *      at_most:    The lowest capability flavor allowed. In mach_task_flavor_t,
+ *                  the higher the flavor number, the lesser the capability, hence the name.
+ *      eval_check: Whether to run task_conversion_eval check during the conversion.
+ *                  For backward compatibility, some interfaces does not run conversion
+ *                  eval on IKOT_TASK_CONTROL.
+ *     Conditions:
+ *             Nothing locked.
+ *  Returns:
+ *      task_t and port's type, if translation succeeded;
+ *      TASK_NULL and IKOT_NONE, if translation failed
+ */
+task_t
+convert_port_to_task_check_type(
+       ipc_port_t              port,
+       ipc_kobject_type_t     *kotype,
+       mach_task_flavor_t      at_most,
+       boolean_t               eval_check)
+{
+       task_t task = TASK_NULL;
+       ipc_kobject_type_t type = IKOT_NONE;
+
+       if (!IP_VALID(port) || !ip_active(port)) {
+               goto out;
+       }
+
+       switch (ip_kotype(port)) {
+       case IKOT_TASK_CONTROL:
+               task = eval_check ? convert_port_to_task(port) : convert_port_to_task_no_eval(port);
+               if (task != TASK_NULL) {
+                       type = IKOT_TASK_CONTROL;
+               }
+               break;
+       case IKOT_TASK_READ:
+               if (at_most >= TASK_FLAVOR_READ) {
+                       task = eval_check ? convert_port_to_task_read(port) : convert_port_to_task_read_no_eval(port);
+                       if (task != TASK_READ_NULL) {
+                               type = IKOT_TASK_READ;
+                       }
+               }
+               break;
+       case IKOT_TASK_INSPECT:
+               if (at_most >= TASK_FLAVOR_INSPECT) {
+                       task = convert_port_to_task_inspect(port);
+                       if (task != TASK_INSPECT_NULL) {
+                               type = IKOT_TASK_INSPECT;
+                       }
+               }
+               break;
+       case IKOT_TASK_NAME:
+               if (at_most >= TASK_FLAVOR_NAME) {
+                       task = convert_port_to_task_name(port);
+                       if (task != TASK_NAME_NULL) {
+                               type = IKOT_TASK_NAME;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+
+out:
+       if (kotype) {
+               *kotype = type;
+       }
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_thread_check_type
+ *     Purpose:
+ *             Convert from a port to a thread based on port's type.
+ *             Doesn't consume the port ref; produces a thread ref,
+ *             which may be null.
+ *      This conversion routine is _ONLY_ supposed to be used
+ *      by thread_get_special_port.
+ *  Arguments:
+ *             port:       The port that we do conversion on
+ *      kotype:     Returns the IKOT_TYPE of the port, if translation succeeded
+ *      at_most:    The lowest capability flavor allowed. In mach_thread_flavor_t,
+ *                  the higher the flavor number, the lesser the capability, hence the name.
+ *      eval_check: Whether to run task_conversion_eval check during the conversion.
+ *                  For backward compatibility, some interfaces do not run
+ *                  conversion eval on IKOT_THREAD_CONTROL.
+ *     Conditions:
+ *             Nothing locked.
+ *  Returns:
+ *      thread_t and port's type, if translation succeeded;
+ *      THREAD_NULL and IKOT_NONE, if translation failed
+ */
+thread_t
+convert_port_to_thread_check_type(
+       ipc_port_t              port,
+       ipc_kobject_type_t     *kotype,
+       mach_thread_flavor_t    at_most,
+       boolean_t               eval_check)
+{
+       thread_t thread = THREAD_NULL;
+       ipc_kobject_type_t type = IKOT_NONE;
+
+       if (!IP_VALID(port) || !ip_active(port)) {
+               goto out;
+       }
+
+       switch (ip_kotype(port)) {
+       case IKOT_THREAD_CONTROL:
+               thread = eval_check ? convert_port_to_thread(port) : convert_port_to_thread_no_eval(port);
+               if (thread != THREAD_NULL) {
+                       type = IKOT_THREAD_CONTROL;
+               }
+               break;
+       case IKOT_THREAD_READ:
+               if (at_most >= THREAD_FLAVOR_READ) {
+                       thread = eval_check ? convert_port_to_thread_read(port) : convert_port_to_thread_read_no_eval(port);
+                       if (thread != THREAD_READ_NULL) {
+                               type = IKOT_THREAD_READ;
+                       }
+               }
+               break;
+       case IKOT_THREAD_INSPECT:
+               if (at_most >= THREAD_FLAVOR_INSPECT) {
+                       thread = convert_port_to_thread_inspect(port);
+                       if (thread != THREAD_INSPECT_NULL) {
+                               type = IKOT_THREAD_INSPECT;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+
+out:
+       if (kotype) {
+               *kotype = type;
+       }
+       return thread;
+}
+
+/*
+ *     Routine:        convert_port_to_space_check_type
+ *     Purpose:
+ *             Convert from a port to a space based on port's type.
+ *             Doesn't consume the port ref; produces a space ref,
+ *             which may be null.
+ *  Arguments:
+ *      port:       The port that we do conversion on
+ *      kotype:     Returns the IKOT_TYPE of the port, if translation succeeded
+ *      at_most:    The lowest capability flavor allowed. In mach_task_flavor_t,
+ *                  the higher the flavor number, the lesser the capability, hence the name.
+ *      eval_check: Whether to run task_conversion_eval check during the conversion.
+ *                  For backward compatibility, some interfaces do not run
+ *                  conversion eval on IKOT_TASK_CONTROL.
+ *     Conditions:
+ *             Nothing locked.
+ *  Returns:
+ *      ipc_space_t and port's type, if translation succeeded;
+ *      IPC_SPACE_NULL and IKOT_NONE, if translation failed
+ */
+ipc_space_t
+convert_port_to_space_check_type(
+       ipc_port_t              port,
+       ipc_kobject_type_t     *kotype,
+       mach_task_flavor_t      at_most,
+       boolean_t               eval_check)
+{
+       ipc_space_t space = IPC_SPACE_NULL;
+       ipc_kobject_type_t type = IKOT_NONE;
+
+       if (!IP_VALID(port) || !ip_active(port)) {
+               goto out;
+       }
+
+       switch (ip_kotype(port)) {
+       case IKOT_TASK_CONTROL:
+               space = eval_check ? convert_port_to_space(port) : convert_port_to_space_no_eval(port);
+               if (space != IPC_SPACE_NULL) {
+                       type = IKOT_TASK_CONTROL;
+               }
+               break;
+       case IKOT_TASK_READ:
+               if (at_most >= TASK_FLAVOR_READ) {
+                       space = eval_check ? convert_port_to_space_read(port) : convert_port_to_space_read_no_eval(port);
+                       if (space != IPC_SPACE_READ_NULL) {
+                               type = IKOT_TASK_READ;
+                       }
+               }
+               break;
+       case IKOT_TASK_INSPECT:
+               if (at_most >= TASK_FLAVOR_INSPECT) {
+                       space = convert_port_to_space_inspect(port);
+                       if (space != IPC_SPACE_INSPECT_NULL) {
+                               type = IKOT_TASK_INSPECT;
+                       }
+               }
+               break;
+       default:
+               break;
+       }
+
+out:
+       if (kotype) {
+               *kotype = type;
+       }
+       return space;
+}
+
+/*
+ *     Routine:        convert_port_to_task_inspect
+ *     Purpose:
+ *             Convert from a port to a task inspection right
+ *             Doesn't consume the port ref; produces a task ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+task_inspect_t
+convert_port_to_task_inspect(
+       ipc_port_t              port)
+{
+       task_inspect_t task = TASK_INSPECT_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       task = convert_port_to_task_inspect_locked(port);
+               }
+               ip_unlock(port);
+       }
+
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_task_read
+ *     Purpose:
+ *             Convert from a port to a task read right
+ *             Doesn't consume the port ref; produces a task ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+task_read_t
+convert_port_to_task_read(
+       ipc_port_t              port)
+{
+       task_read_t task = TASK_READ_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       task = convert_port_to_task_read_locked(port, TRUE);
+               }
+               ip_unlock(port);
+       }
+
+       return task;
+}
+
+static task_read_t
+convert_port_to_task_read_no_eval(
+       ipc_port_t              port)
+{
+       task_read_t task = TASK_READ_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       task = convert_port_to_task_read_locked(port, FALSE);
+               }
+               ip_unlock(port);
+       }
+
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_task_suspension_token
+ *     Purpose:
+ *             Convert from a port to a task suspension token.
+ *             Doesn't consume the port ref; produces a suspension token ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+task_suspension_token_t
+convert_port_to_task_suspension_token(
+       ipc_port_t              port)
+{
+       task_suspension_token_t         task = TASK_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+
+               if (ip_active(port) &&
+                   ip_kotype(port) == IKOT_TASK_RESUME) {
+                       task = (task_suspension_token_t) ip_get_kobject(port);
+                       assert(task != TASK_NULL);
+
+                       task_reference_internal(task);
+               }
+
+               ip_unlock(port);
+       }
+
+       return task;
+}
+
+/*
+ *     Routine:        convert_port_to_space_with_flavor
+ *     Purpose:
+ *             Convert from a port to a space.
+ *             Doesn't consume the port ref; produces a space ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+static ipc_space_t
+convert_port_to_space_with_flavor(
+       ipc_port_t         port,
+       mach_task_flavor_t flavor,
+       boolean_t          eval)
+{
+       ipc_space_t space;
+       task_t task;
+
+       switch (flavor) {
+       case TASK_FLAVOR_CONTROL:
+               task = convert_port_to_locked_task(port, eval);
+               break;
+       case TASK_FLAVOR_READ:
+               task = convert_port_to_locked_task_read(port, eval);
+               break;
+       case TASK_FLAVOR_INSPECT:
+               task = convert_port_to_locked_task_inspect(port);
+               break;
+       default:
+               task = TASK_NULL;
+               break;
+       }
+
+       if (task == TASK_NULL) {
+               return IPC_SPACE_NULL;
+       }
+
+       if (!task->active) {
+               task_unlock(task);
+               return IPC_SPACE_NULL;
+       }
+
+       space = task->itk_space;
+       is_reference(space);
+       task_unlock(task);
+       return space;
+}
+
+ipc_space_t
+convert_port_to_space(
+       ipc_port_t      port)
+{
+       return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, TRUE);
+}
+
+static ipc_space_t
+convert_port_to_space_no_eval(
+       ipc_port_t      port)
+{
+       return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, FALSE);
+}
+
+ipc_space_read_t
+convert_port_to_space_read(
+       ipc_port_t      port)
+{
+       return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ, TRUE);
+}
+
+static ipc_space_read_t
+convert_port_to_space_read_no_eval(
+       ipc_port_t      port)
+{
+       return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ, FALSE);
+}
+
+ipc_space_inspect_t
+convert_port_to_space_inspect(
+       ipc_port_t      port)
+{
+       return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT, TRUE);
+}
+
+/*
+ *     Routine:        convert_port_to_map_with_flavor
+ *     Purpose:
+ *             Convert from a port to a map.
+ *             Doesn't consume the port ref; produces a map ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+static vm_map_t
+convert_port_to_map_with_flavor(
+       ipc_port_t         port,
+       mach_task_flavor_t flavor)
+{
+       task_t task;
+       vm_map_t map;
+
+       switch (flavor) {
+       case TASK_FLAVOR_CONTROL:
+               task = convert_port_to_locked_task(port, TRUE); /* always eval */
+               break;
+       case TASK_FLAVOR_READ:
+               task = convert_port_to_locked_task_read(port, TRUE); /* always eval */
+               break;
+       case TASK_FLAVOR_INSPECT:
+               task = convert_port_to_locked_task_inspect(port); /* always no eval */
+               break;
+       default:
+               task = TASK_NULL;
+               break;
+       }
+
+       if (task == TASK_NULL) {
+               return VM_MAP_NULL;
+       }
+
+       if (!task->active) {
+               task_unlock(task);
+               return VM_MAP_NULL;
+       }
+
+       map = task->map;
+       if (map->pmap == kernel_pmap) {
+               if (flavor == TASK_FLAVOR_CONTROL) {
+                       panic("userspace has control access to a "
+                           "kernel map %p through task %p", map, task);
+               }
+               if (task != kernel_task) {
+                       panic("userspace has access to a "
+                           "kernel map %p through task %p", map, task);
+               }
+       } else {
+               pmap_require(map->pmap);
+       }
+
+       vm_map_reference(map);
+       task_unlock(task);
+       return map;
+}
+
+vm_map_read_t
+convert_port_to_map(
+       ipc_port_t              port)
+{
+       return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL);
+}
+
+vm_map_read_t
+convert_port_to_map_read(
+       ipc_port_t              port)
+{
+       return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ);
+}
+
+vm_map_inspect_t
+convert_port_to_map_inspect(
+       ipc_port_t              port)
+{
+       return convert_port_to_map_with_flavor(port, TASK_FLAVOR_INSPECT);
+}
+
+
+/*
+ *     Routine:        convert_port_to_thread
+ *     Purpose:
+ *             Convert from a port to a thread.
+ *             Doesn't consume the port ref; produces an thread ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+static thread_t
+convert_port_to_thread_locked(
+       ipc_port_t               port,
+       port_to_thread_options_t options,
+       boolean_t                eval)
+{
+       thread_t        thread = THREAD_NULL;
+
+       ip_lock_held(port);
+       require_ip_active(port);
+
+       if (ip_kotype(port) == IKOT_THREAD_CONTROL) {
+               thread = (thread_t) ip_get_kobject(port);
+               assert(thread != THREAD_NULL);
+
+               if (options & PORT_TO_THREAD_NOT_CURRENT_THREAD) {
+                       if (thread == current_thread()) {
+                               return THREAD_NULL;
+                       }
+               }
+
+               if (options & PORT_TO_THREAD_IN_CURRENT_TASK) {
+                       if (thread->task != current_task()) {
+                               return THREAD_NULL;
+                       }
+               } else {
+                       /* Use task conversion rules for thread control conversions */
+                       if (eval && task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
+                               return THREAD_NULL;
+                       }
+               }
+
+               thread_reference_internal(thread);
+       }
+
+       return thread;
+}
+
+thread_t
+convert_port_to_thread(
+       ipc_port_t              port)
+{
+       thread_t        thread = THREAD_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, TRUE);
+               }
+               ip_unlock(port);
+       }
+
+       return thread;
+}
+
+static thread_t
+convert_port_to_thread_no_eval(
+       ipc_port_t              port)
+{
+       thread_t        thread = THREAD_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, FALSE);
+               }
+               ip_unlock(port);
+       }
+
+       return thread;
+}
+
+/*
+ *     Routine:        convert_port_to_thread_inspect
+ *     Purpose:
+ *             Convert from a port to a thread inspect right
+ *             Doesn't consume the port ref; produces a thread ref,
+ *             which may be null.
+ *     Conditions:
+ *             Nothing locked.
+ */
+static thread_inspect_t
+convert_port_to_thread_inspect_locked(
+       ipc_port_t              port)
+{
+       thread_inspect_t thread = THREAD_INSPECT_NULL;
+
+       ip_lock_held(port);
+       require_ip_active(port);
+
+       if (ip_kotype(port) == IKOT_THREAD_CONTROL ||
+           ip_kotype(port) == IKOT_THREAD_READ ||
+           ip_kotype(port) == IKOT_THREAD_INSPECT) {
+               thread = (thread_inspect_t)ipc_kobject_get(port);
+               assert(thread != THREAD_INSPECT_NULL);
+               thread_reference_internal((thread_t)thread);
+       }
+
+       return thread;
+}
+
+thread_inspect_t
+convert_port_to_thread_inspect(
+       ipc_port_t              port)
 {
-       task_t task;
+       thread_inspect_t thread = THREAD_INSPECT_NULL;
 
-       task = convert_port_to_locked_task(port);
-       if (task) {
-               task->ref_count++;
-               task_unlock(task);
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       thread = convert_port_to_thread_inspect_locked(port);
+               }
+               ip_unlock(port);
        }
-       return task;
+
+       return thread;
 }
 
 /*
- *     Routine:        convert_port_to_space
+ *     Routine:        convert_port_to_thread_read
  *     Purpose:
- *             Convert from a port to a space.
- *             Doesn't consume the port ref; produces a space ref,
+ *             Convert from a port to a thread read right
+ *             Doesn't consume the port ref; produces a thread ref,
  *             which may be null.
  *     Conditions:
  *             Nothing locked.
  */
-ipc_space_t
-convert_port_to_space(
-       ipc_port_t      port)
+static thread_read_t
+convert_port_to_thread_read_locked(
+       ipc_port_t port,
+       boolean_t  eval)
 {
-       ipc_space_t space;
-       task_t task;
+       thread_read_t thread = THREAD_READ_NULL;
 
-       task = convert_port_to_locked_task(port);
+       ip_lock_held(port);
+       require_ip_active(port);
 
-       if (task == TASK_NULL)
-               return IPC_SPACE_NULL;
+       if (ip_kotype(port) == IKOT_THREAD_CONTROL ||
+           ip_kotype(port) == IKOT_THREAD_READ) {
+               thread = (thread_read_t) ip_get_kobject(port);
+               assert(thread != THREAD_READ_NULL);
 
-       if (!task->active) {
-               task_unlock(task);
-               return IPC_SPACE_NULL;
+               /* Use task conversion rules for thread control conversions */
+               if (eval && task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
+                       return THREAD_READ_NULL;
+               }
+
+               thread_reference_internal((thread_t)thread);
        }
-               
-       space = task->itk_space;
-       is_reference(space);
-       task_unlock(task);
-       return (space);
+
+       return thread;
 }
 
-upl_t
-convert_port_to_upl(
-       ipc_port_t      port)
+thread_read_t
+convert_port_to_thread_read(
+       ipc_port_t              port)
 {
-       upl_t upl;
+       thread_read_t thread = THREAD_READ_NULL;
 
-       ip_lock(port);
-       if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
-                       ip_unlock(port);
-                       return (upl_t)NULL;
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       thread = convert_port_to_thread_read_locked(port, TRUE);
+               }
+               ip_unlock(port);
        }
-       upl = (upl_t) port->ip_kobject;
-       ip_unlock(port);
-       upl_lock(upl);
-       upl->ref_count+=1;
-       upl_unlock(upl);
-       return upl;
-}
 
-mach_port_t
-convert_upl_to_port(
-       upl_t           upl)
-{
-       return MACH_PORT_NULL;
+       return thread;
 }
 
-__private_extern__ void
-upl_no_senders(
-       upl_t                   upl,
-       mach_port_mscount_t     mscount)
+static thread_read_t
+convert_port_to_thread_read_no_eval(
+       ipc_port_t              port)
 {
-       return;
+       thread_read_t thread = THREAD_READ_NULL;
+
+       if (IP_VALID(port)) {
+               ip_lock(port);
+               if (ip_active(port)) {
+                       thread = convert_port_to_thread_read_locked(port, FALSE);
+               }
+               ip_unlock(port);
+       }
+
+       return thread;
 }
 
+
 /*
- *     Routine:        convert_port_entry_to_map
+ *     Routine:        convert_thread_to_port_with_flavor
  *     Purpose:
- *             Convert from a port specifying an entry or a task
- *             to a map. Doesn't consume the port ref; produces a map ref,
- *             which may be null.  Unlike convert_port_to_map, the
- *             port may be task or a named entry backed.
+ *             Convert from a thread to a port of given flavor.
+ *             Consumes a thread ref; produces a naked send right
+ *             which may be invalid.
  *     Conditions:
  *             Nothing locked.
  */
+static ipc_port_t
+convert_thread_to_port_with_flavor(
+       thread_t              thread,
+       mach_thread_flavor_t  flavor)
+{
+       ipc_port_t port = IP_NULL;
 
+       thread_mtx_lock(thread);
 
-vm_map_t
-convert_port_entry_to_map(
-       ipc_port_t      port)
-{
-       task_t task;
-       vm_map_t map;
-       vm_named_entry_t        named_entry;
-
-       if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
-               while(TRUE) {
-                       ip_lock(port);
-                       if(ip_active(port) && (ip_kotype(port) 
-                                               == IKOT_NAMED_ENTRY)) {
-                               named_entry =
-                                        (vm_named_entry_t)port->ip_kobject;
-                               if (!(mutex_try(&(named_entry)->Lock))) {
-                                               ip_unlock(port);
-                                               mutex_pause();
-                                               continue;
-                               }
-                               named_entry->ref_count++;
-                               mutex_unlock(&(named_entry)->Lock);
-                               ip_unlock(port);
-                               if ((named_entry->is_sub_map) &&
-                                       (named_entry->protection 
-                                       & VM_PROT_WRITE)) {
-                                       map = named_entry->backing.map;
-                               } else {
-                                       mach_destroy_memory_entry(port);
-                                       return VM_MAP_NULL;
-                               }
-                               vm_map_reference_swap(map);
-                               mach_destroy_memory_entry(port);
-                               break;
-                       }
-                       else 
-                               return VM_MAP_NULL;
-               }
+       if (!thread->ipc_active) {
+               goto exit;
+       }
+
+       if (flavor == THREAD_FLAVOR_CONTROL) {
+               port = ipc_port_make_send(thread->ith_thread_ports[flavor]);
        } else {
-               task_t task;
+               ipc_kobject_type_t kotype = (flavor == THREAD_FLAVOR_READ) ? IKOT_THREAD_READ : IKOT_THREAD_INSPECT;
+               /*
+                * Claim a send right on the thread read/inspect port, and request a no-senders
+                * notification on that port (if none outstanding). A thread reference is not
+                * donated here even though the ports are created lazily because it doesn't own the
+                * kobject that it points to. Threads manage their lifetime explicitly and
+                * have to synchronize with each other, between the task/thread terminating and the
+                * send-once notification firing, and this is done under the thread mutex
+                * rather than with atomics.
+                */
+               (void)ipc_kobject_make_send_lazy_alloc_port(&thread->ith_thread_ports[flavor], (ipc_kobject_t)thread,
+                   kotype, IPC_KOBJECT_ALLOC_IMMOVABLE_SEND, false, 0);
+               port = thread->ith_thread_ports[flavor];
+       }
 
-               task = convert_port_to_locked_task(port);
-               
-               if (task == TASK_NULL)
-                       return VM_MAP_NULL;
+exit:
+       thread_mtx_unlock(thread);
+       thread_deallocate(thread);
+       return port;
+}
 
-               if (!task->active) {
-                       task_unlock(task);
-                       return VM_MAP_NULL;
-               }
-               
-               map = task->map;
-               vm_map_reference_swap(map);
-               task_unlock(task);
-       }
+ipc_port_t
+convert_thread_to_port(
+       thread_t                thread)
+{
+       return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_CONTROL);
+}
 
-       return map;
+ipc_port_t
+convert_thread_read_to_port(thread_read_t thread)
+{
+       return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_READ);
+}
+
+ipc_port_t
+convert_thread_inspect_to_port(thread_inspect_t thread)
+{
+       return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_INSPECT);
 }
 
+
 /*
- *     Routine:        convert_port_entry_to_object
+ *     Routine:        port_name_to_thread
  *     Purpose:
- *             Convert from a port specifying a named entry to an
- *             object. Doesn't consume the port ref; produces a map ref,
- *             which may be null. 
+ *             Convert from a port name to a thread reference
+ *             A name of MACH_PORT_NULL is valid for the null thread.
  *     Conditions:
  *             Nothing locked.
  */
+thread_t
+port_name_to_thread(
+       mach_port_name_t         name,
+       port_to_thread_options_t options)
+{
+       thread_t        thread = THREAD_NULL;
+       ipc_port_t      kport;
+       kern_return_t kr;
 
-
-vm_object_t
-convert_port_entry_to_object(
-       ipc_port_t      port)
-{
-       vm_object_t object;
-       vm_named_entry_t        named_entry;
-
-       if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
-               while(TRUE) {
-                       ip_lock(port);
-                       if(ip_active(port) && (ip_kotype(port) 
-                                               == IKOT_NAMED_ENTRY)) {
-                               named_entry =
-                                        (vm_named_entry_t)port->ip_kobject;
-                               if (!(mutex_try(&(named_entry)->Lock))) {
-                                               ip_unlock(port);
-                                               mutex_pause();
-                                               continue;
-                               }
-                               named_entry->ref_count++;
-                               mutex_unlock(&(named_entry)->Lock);
-                               ip_unlock(port);
-                               if ((!named_entry->is_sub_map) &&
-                                       (named_entry->protection 
-                                       & VM_PROT_WRITE)) {
-                                       object = named_entry->object;
-                               } else {
-                                       mach_destroy_memory_entry(port);
-                                       return (vm_object_t)NULL;
-                               }
-                               vm_object_reference(named_entry->object);
-                               mach_destroy_memory_entry(port);
-                               break;
-                       }
-                       else 
-                               return (vm_object_t)NULL;
+       if (MACH_PORT_VALID(name)) {
+               kr = ipc_port_translate_send(current_space(), name, &kport);
+               if (kr == KERN_SUCCESS) {
+                       thread = convert_port_to_thread_locked(kport, options, TRUE);
+                       ip_unlock(kport);
                }
-       } else {
-               return (vm_object_t)NULL;
        }
 
-       return object;
+       return thread;
 }
 
 /*
- *     Routine:        convert_port_to_map
+ *     Routine:        port_name_to_task
  *     Purpose:
- *             Convert from a port to a map.
- *             Doesn't consume the port ref; produces a map ref,
- *             which may be null.
+ *             Convert from a port name to a task reference
+ *             A name of MACH_PORT_NULL is valid for the null task.
  *     Conditions:
  *             Nothing locked.
  */
-
-vm_map_t
-convert_port_to_map(
-       ipc_port_t      port)
+task_t
+port_name_to_task(
+       mach_port_name_t name)
 {
-       task_t task;
-       vm_map_t map;
-
-       task = convert_port_to_locked_task(port);
-               
-       if (task == TASK_NULL)
-               return VM_MAP_NULL;
+       ipc_port_t kport;
+       kern_return_t kr;
+       task_t task = TASK_NULL;
 
-       if (!task->active) {
-               task_unlock(task);
-               return VM_MAP_NULL;
+       if (MACH_PORT_VALID(name)) {
+               kr = ipc_port_translate_send(current_space(), name, &kport);
+               if (kr == KERN_SUCCESS) {
+                       task = convert_port_to_task_locked(kport, NULL, TRUE);
+                       ip_unlock(kport);
+               }
        }
-               
-       map = task->map;
-       vm_map_reference_swap(map);
-       task_unlock(task);
-       return map;
+       return task;
 }
 
-
 /*
- *     Routine:        convert_port_to_act
+ *     Routine:        port_name_to_task_read
  *     Purpose:
- *             Convert from a port to a thr_act.
- *             Doesn't consume the port ref; produces an thr_act ref,
- *             which may be null.
+ *             Convert from a port name to a task reference
+ *             A name of MACH_PORT_NULL is valid for the null task.
  *     Conditions:
  *             Nothing locked.
  */
-
-thread_act_t
-convert_port_to_act( ipc_port_t port )
+task_read_t
+port_name_to_task_read(
+       mach_port_name_t name)
 {
-       boolean_t r;
-       thread_act_t thr_act = 0;
+       ipc_port_t kport;
+       kern_return_t kr;
+       task_read_t tr = TASK_READ_NULL;
 
-       r = FALSE;
-       while (!r && IP_VALID(port)) {
-               ip_lock(port);
-               r = ref_act_port_locked(port, &thr_act);
-               /* port unlocked */
+       if (MACH_PORT_VALID(name)) {
+               kr = ipc_port_translate_send(current_space(), name, &kport);
+               if (kr == KERN_SUCCESS) {
+                       tr = convert_port_to_task_read_locked(kport, TRUE);
+                       ip_unlock(kport);
+               }
        }
-       return (thr_act);
+       return tr;
 }
 
-boolean_t
-ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
+/*
+ *     Routine:        port_name_to_task_read_no_eval
+ *     Purpose:
+ *             Convert from a port name to a task reference
+ *             A name of MACH_PORT_NULL is valid for the null task.
+ *             Skips task_conversion_eval() during conversion.
+ *     Conditions:
+ *             Nothing locked.
+ */
+task_read_t
+port_name_to_task_read_no_eval(
+       mach_port_name_t name)
 {
-       thread_act_t thr_act;
-
-       thr_act = 0;
-       if (ip_active(port) &&
-               (ip_kotype(port) == IKOT_ACT)) {
-               thr_act = (thread_act_t) port->ip_kobject;
-               assert(thr_act != THR_ACT_NULL);
+       ipc_port_t kport;
+       kern_return_t kr;
+       task_read_t tr = TASK_READ_NULL;
 
-               /*
-                * Out of order locking here, normal
-                * ordering is act_lock(), then ip_lock().
-                */
-               if (!act_lock_try(thr_act)) {
-                       ip_unlock(port);
-                       mutex_pause();
-                       return (FALSE);
+       if (MACH_PORT_VALID(name)) {
+               kr = ipc_port_translate_send(current_space(), name, &kport);
+               if (kr == KERN_SUCCESS) {
+                       tr = convert_port_to_task_read_locked(kport, FALSE);
+                       ip_unlock(kport);
                }
-               act_reference_locked(thr_act);
-               act_unlock(thr_act);
        }
-       *pthr_act = thr_act;
-       ip_unlock(port);
-       return (TRUE);
+       return tr;
 }
 
 /*
- *     Routine:        port_name_to_act
+ *     Routine:        port_name_to_task_name
  *     Purpose:
- *             Convert from a port name to an act reference
- *             A name of MACH_PORT_NULL is valid for the null act
+ *             Convert from a port name to a task reference
+ *             A name of MACH_PORT_NULL is valid for the null task.
  *     Conditions:
  *             Nothing locked.
  */
-thread_act_t
-port_name_to_act(
-       mach_port_name_t        name)
+task_name_t
+port_name_to_task_name(
+       mach_port_name_t name)
 {
-       thread_act_t thr_act = THR_ACT_NULL;
-       ipc_port_t kern_port;
+       ipc_port_t kport;
        kern_return_t kr;
+       task_name_t tn = TASK_NAME_NULL;
 
        if (MACH_PORT_VALID(name)) {
-               kr = ipc_object_copyin(current_space(), name,
-                                      MACH_MSG_TYPE_COPY_SEND,
-                                      (ipc_object_t *) &kern_port);
-               if (kr != KERN_SUCCESS)
-                       return THR_ACT_NULL;
-
-               thr_act = convert_port_to_act(kern_port);
-               
-               if (IP_VALID(kern_port))
-                       ipc_port_release_send(kern_port);
+               kr = ipc_port_translate_send(current_space(), name, &kport);
+               if (kr == KERN_SUCCESS) {
+                       tn = convert_port_to_task_name_locked(kport);
+                       ip_unlock(kport);
+               }
        }
-       return thr_act;
+       return tn;
 }
 
-task_t
-port_name_to_task(
+/*
+ *     Routine:        port_name_to_host
+ *     Purpose:
+ *             Convert from a port name to a host pointer.
+ *             NOTE: This does _not_ return a +1 reference to the host_t
+ *     Conditions:
+ *             Nothing locked.
+ */
+host_t
+port_name_to_host(
        mach_port_name_t name)
 {
-       ipc_port_t kern_port;
+       host_t host = HOST_NULL;
        kern_return_t kr;
-       task_t task = TASK_NULL;
+       ipc_port_t port;
 
        if (MACH_PORT_VALID(name)) {
-               kr = ipc_object_copyin(current_space(), name,
-                                      MACH_MSG_TYPE_COPY_SEND,
-                                      (ipc_object_t *) &kern_port);
-               if (kr != KERN_SUCCESS)
-                       return TASK_NULL;
-
-               task = convert_port_to_task(kern_port);
-
-               if (IP_VALID(kern_port))
-                       ipc_port_release_send(kern_port);
+               kr = ipc_port_translate_send(current_space(), name, &port);
+               if (kr == KERN_SUCCESS) {
+                       host = convert_port_to_host(port);
+                       ip_unlock(port);
+               }
        }
-       return task;
+       return host;
 }
 
 /*
- *     Routine:        convert_task_to_port
+ *     Routine:        convert_task_to_port_with_flavor
  *     Purpose:
- *             Convert from a task to a port.
+ *             Convert from a task to a port of given flavor.
  *             Consumes a task ref; produces a naked send right
- *             which may be invalid.  
+ *             which may be invalid.
  *     Conditions:
  *             Nothing locked.
  */
+static ipc_port_t
+convert_task_to_port_with_flavor(
+       task_t              task,
+       mach_task_flavor_t  flavor)
+{
+       ipc_port_t port = IP_NULL;
+       ipc_kobject_type_t kotype = IKOT_NONE;
+
+       itk_lock(task);
+
+       if (!task->ipc_active) {
+               goto exit;
+       }
+
+       switch (flavor) {
+       case TASK_FLAVOR_CONTROL:
+       case TASK_FLAVOR_NAME:
+               port = ipc_port_make_send(task->itk_task_ports[flavor]);
+               break;
+       /*
+        * Claim a send right on the task read/inspect port, and request a no-senders
+        * notification on that port (if none outstanding). A task reference is
+        * deliberately not donated here because ipc_kobject_make_send_lazy_alloc_port
+        * is used only for convenience and these ports don't control the lifecycle of
+        * the task kobject. Instead, the task's itk_lock is used to synchronize the
+        * handling of the no-senders notification with the task termination.
+        */
+       case TASK_FLAVOR_READ:
+       case TASK_FLAVOR_INSPECT:
+               kotype = (flavor == TASK_FLAVOR_READ) ? IKOT_TASK_READ : IKOT_TASK_INSPECT;
+               (void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_task_ports[flavor],
+                   (ipc_kobject_t)task, kotype, IPC_KOBJECT_ALLOC_IMMOVABLE_SEND, true,
+                   OS_PTRAUTH_DISCRIMINATOR("task.itk_task_ports"));
+               port = task->itk_task_ports[flavor];
+
+               break;
+       }
+
+exit:
+       itk_unlock(task);
+       task_deallocate(task);
+       return port;
+}
 
 ipc_port_t
 convert_task_to_port(
-       task_t          task)
+       task_t          task)
 {
-       ipc_port_t port;
+       return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL);
+}
+
+ipc_port_t
+convert_task_read_to_port(
+       task_read_t          task)
+{
+       return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ);
+}
+
+ipc_port_t
+convert_task_inspect_to_port(
+       task_inspect_t          task)
+{
+       return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT);
+}
+
+ipc_port_t
+convert_task_name_to_port(
+       task_name_t             task)
+{
+       return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME);
+}
+
+ipc_port_t
+convert_task_to_port_pinned(
+       task_t          task)
+{
+       ipc_port_t port = IP_NULL;
 
        itk_lock(task);
-       if (task->itk_self != IP_NULL)
-#if    NORMA_TASK
-               if (task->map == VM_MAP_NULL)
-                       /* norma placeholder task */
-                       port = ipc_port_copy_send(task->itk_self);
-               else
-#endif /* NORMA_TASK */
+
+       if (task->ipc_active && task->itk_self != IP_NULL) {
                port = ipc_port_make_send(task->itk_self);
-       else
-               port = IP_NULL;
-       itk_unlock(task);
+       }
 
+       itk_unlock(task);
        task_deallocate(task);
        return port;
 }
-
 /*
- *     Routine:        convert_act_to_port
+ *     Routine:        convert_task_suspend_token_to_port
  *     Purpose:
- *             Convert from a thr_act to a port.
- *             Consumes an thr_act ref; produces a naked send right
+ *             Convert from a task suspension token to a port.
+ *             Consumes a task suspension token ref; produces a naked send-once right
  *             which may be invalid.
  *     Conditions:
  *             Nothing locked.
  */
-
 ipc_port_t
-convert_act_to_port(thr_act)
-       thread_act_t thr_act;
+convert_task_suspension_token_to_port(
+       task_suspension_token_t         task)
 {
        ipc_port_t port;
 
-       act_lock(thr_act);
-       if (thr_act->ith_self != IP_NULL)
-               port = ipc_port_make_send(thr_act->ith_self);
-       else
+       task_lock(task);
+       if (task->active) {
+               if (task->itk_resume == IP_NULL) {
+                       task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
+                           IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
+               }
+
+               /*
+                * Create a send-once right for each instance of a direct user-called
+                * task_suspend2 call. Each time one of these send-once rights is abandoned,
+                * the notification handler will resume the target task.
+                */
+               port = ipc_port_make_sonce(task->itk_resume);
+               assert(IP_VALID(port));
+       } else {
                port = IP_NULL;
-       act_unlock(thr_act);
+       }
+
+       task_unlock(task);
+       task_suspension_token_deallocate(task);
 
-       act_deallocate(thr_act);
        return port;
 }
 
+ipc_port_t
+convert_thread_to_port_pinned(
+       thread_t                thread)
+{
+       ipc_port_t              port = IP_NULL;
+
+       thread_mtx_lock(thread);
+
+       if (thread->ipc_active && thread->ith_self != IP_NULL) {
+               port = ipc_port_make_send(thread->ith_self);
+       }
+
+       thread_mtx_unlock(thread);
+       thread_deallocate(thread);
+       return port;
+}
 /*
  *     Routine:        space_deallocate
  *     Purpose:
@@ -1304,12 +3640,48 @@ convert_act_to_port(thr_act)
 
 void
 space_deallocate(
-       ipc_space_t     space)
+       ipc_space_t     space)
 {
-       if (space != IS_NULL)
+       if (space != IS_NULL) {
                is_release(space);
+       }
+}
+
+/*
+ *     Routine:        space_read_deallocate
+ *     Purpose:
+ *             Deallocate a space read ref produced by convert_port_to_space_read.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+void
+space_read_deallocate(
+       ipc_space_read_t     space)
+{
+       if (space != IS_INSPECT_NULL) {
+               is_release((ipc_space_t)space);
+       }
+}
+
+/*
+ *     Routine:        space_inspect_deallocate
+ *     Purpose:
+ *             Deallocate a space inspect ref produced by convert_port_to_space_inspect.
+ *     Conditions:
+ *             Nothing locked.
+ */
+
+void
+space_inspect_deallocate(
+       ipc_space_inspect_t     space)
+{
+       if (space != IS_INSPECT_NULL) {
+               is_release((ipc_space_t)space);
+       }
 }
 
+
 /*
  *     Routine:        thread/task_set_exception_ports [kernel call]
  *     Purpose:
@@ -1330,132 +3702,192 @@ space_deallocate(
 
 kern_return_t
 thread_set_exception_ports(
-       thread_act_t                    thr_act,
-       exception_mask_t                exception_mask,
-       ipc_port_t                      new_port,
-       exception_behavior_t            new_behavior,
-       thread_state_flavor_t           new_flavor)
+       thread_t                                thread,
+       exception_mask_t                exception_mask,
+       ipc_port_t                              new_port,
+       exception_behavior_t    new_behavior,
+       thread_state_flavor_t   new_flavor)
 {
-       register int    i;
-       ipc_port_t      old_port[EXC_TYPES_COUNT];
+       ipc_port_t              old_port[EXC_TYPES_COUNT];
+       boolean_t privileged = current_task()->sec_token.val[0] == 0;
+       register int    i;
+
+#if CONFIG_MACF
+       struct label *new_label;
+#endif
 
-       if (!thr_act)
+       if (thread == THREAD_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       if (exception_mask & ~EXC_MASK_ALL)
+       if (exception_mask & ~EXC_MASK_VALID) {
                return KERN_INVALID_ARGUMENT;
+       }
 
        if (IP_VALID(new_port)) {
-               switch (new_behavior) {
+               switch (new_behavior & ~MACH_EXCEPTION_MASK) {
                case EXCEPTION_DEFAULT:
                case EXCEPTION_STATE:
                case EXCEPTION_STATE_IDENTITY:
                        break;
+
                default:
                        return KERN_INVALID_ARGUMENT;
                }
        }
 
-       /* 
+
+       /*
         * Check the validity of the thread_state_flavor by calling the
         * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
         * osfmk/mach/ARCHITECTURE/thread_status.h
         */
-       if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
+       if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
                return KERN_INVALID_ARGUMENT;
        }
 
-       act_lock(thr_act);
-       if (!thr_act->active) {
-               act_unlock(thr_act);
+#if CONFIG_MACF
+       new_label = mac_exc_create_label_for_current_proc();
+#endif
+
+       thread_mtx_lock(thread);
+
+       if (!thread->active) {
+               thread_mtx_unlock(thread);
+
                return KERN_FAILURE;
        }
 
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
-               if (exception_mask & (1 << i)) {
-                       old_port[i] = thr_act->exc_actions[i].port;
-                       thr_act->exc_actions[i].port =
-                               ipc_port_copy_send(new_port);
-                       thr_act->exc_actions[i].behavior = new_behavior;
-                       thr_act->exc_actions[i].flavor = new_flavor;
-               } else
+       if (thread->exc_actions == NULL) {
+               ipc_thread_init_exc_actions(thread);
+       }
+       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+               if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+                   && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
+#endif
+                   ) {
+                       old_port[i] = thread->exc_actions[i].port;
+                       thread->exc_actions[i].port = ipc_port_copy_send(new_port);
+                       thread->exc_actions[i].behavior = new_behavior;
+                       thread->exc_actions[i].flavor = new_flavor;
+                       thread->exc_actions[i].privileged = privileged;
+               } else {
                        old_port[i] = IP_NULL;
-       }/* for */
-       /*
-        * Consume send rights without any lock held.
-        */
-       act_unlock(thr_act);
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
-               if (IP_VALID(old_port[i]))
+               }
+       }
+
+       thread_mtx_unlock(thread);
+
+#if CONFIG_MACF
+       mac_exc_free_label(new_label);
+#endif
+
+       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+               if (IP_VALID(old_port[i])) {
                        ipc_port_release_send(old_port[i]);
-       if (IP_VALID(new_port))          /* consume send right */
+               }
+       }
+
+       if (IP_VALID(new_port)) {         /* consume send right */
                ipc_port_release_send(new_port);
+       }
 
-        return KERN_SUCCESS;
-}/* thread_set_exception_port */
+       return KERN_SUCCESS;
+}
 
 kern_return_t
 task_set_exception_ports(
-       task_t                          task,
-       exception_mask_t                exception_mask,
-       ipc_port_t                      new_port,
-       exception_behavior_t            new_behavior,
-       thread_state_flavor_t           new_flavor)
+       task_t                                  task,
+       exception_mask_t                exception_mask,
+       ipc_port_t                              new_port,
+       exception_behavior_t    new_behavior,
+       thread_state_flavor_t   new_flavor)
 {
-       register int    i;
-       ipc_port_t      old_port[EXC_TYPES_COUNT];
+       ipc_port_t              old_port[EXC_TYPES_COUNT];
+       boolean_t privileged = current_task()->sec_token.val[0] == 0;
+       register int    i;
+
+#if CONFIG_MACF
+       struct label *new_label;
+#endif
 
        if (task == TASK_NULL) {
                return KERN_INVALID_ARGUMENT;
        }
 
-       if (exception_mask & ~EXC_MASK_ALL) {
+       if (exception_mask & ~EXC_MASK_VALID) {
                return KERN_INVALID_ARGUMENT;
        }
 
        if (IP_VALID(new_port)) {
-               switch (new_behavior) {
+               switch (new_behavior & ~MACH_EXCEPTION_MASK) {
                case EXCEPTION_DEFAULT:
                case EXCEPTION_STATE:
                case EXCEPTION_STATE_IDENTITY:
                        break;
+
                default:
                        return KERN_INVALID_ARGUMENT;
                }
        }
-       /* Cannot easily check "new_flavor", but that just means that
-        * the flavor in the generated exception message might be garbage:
-        * GIGO */
 
-        itk_lock(task);
-        if (task->itk_self == IP_NULL) {
-                itk_unlock(task);
-                return KERN_FAILURE;
-        }
 
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
-               if (exception_mask & (1 << i)) {
+       /*
+        * Check the validity of the thread_state_flavor by calling the
+        * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
+        * osfmk/mach/ARCHITECTURE/thread_status.h
+        */
+       if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+#if CONFIG_MACF
+       new_label = mac_exc_create_label_for_current_proc();
+#endif
+
+       itk_lock(task);
+
+       if (!task->ipc_active) {
+               itk_unlock(task);
+               return KERN_FAILURE;
+       }
+
+       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+               if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+                   && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
+#endif
+                   ) {
                        old_port[i] = task->exc_actions[i].port;
                        task->exc_actions[i].port =
-                               ipc_port_copy_send(new_port);
+                           ipc_port_copy_send(new_port);
                        task->exc_actions[i].behavior = new_behavior;
                        task->exc_actions[i].flavor = new_flavor;
-               } else
+                       task->exc_actions[i].privileged = privileged;
+               } else {
                        old_port[i] = IP_NULL;
-       }/* for */
+               }
+       }
 
-       /*
-        * Consume send rights without any lock held.
-        */
-        itk_unlock(task);
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
-               if (IP_VALID(old_port[i]))
+       itk_unlock(task);
+
+#if CONFIG_MACF
+       mac_exc_free_label(new_label);
+#endif
+
+       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+               if (IP_VALID(old_port[i])) {
                        ipc_port_release_send(old_port[i]);
-       if (IP_VALID(new_port))          /* consume send right */
+               }
+       }
+
+       if (IP_VALID(new_port)) {         /* consume send right */
                ipc_port_release_send(new_port);
+       }
 
-        return KERN_SUCCESS;
-}/* task_set_exception_port */
+       return KERN_SUCCESS;
+}
 
 /*
  *     Routine:        thread/task_swap_exception_ports [kernel call]
@@ -1486,197 +3918,247 @@ task_set_exception_ports(
 
 kern_return_t
 thread_swap_exception_ports(
-       thread_act_t                    thr_act,
-       exception_mask_t                exception_mask,
-       ipc_port_t                      new_port,
-       exception_behavior_t            new_behavior,
-       thread_state_flavor_t           new_flavor,
-       exception_mask_array_t          masks,
-       mach_msg_type_number_t          * CountCnt,
-       exception_port_array_t          ports,
+       thread_t                                        thread,
+       exception_mask_t                        exception_mask,
+       ipc_port_t                                      new_port,
+       exception_behavior_t            new_behavior,
+       thread_state_flavor_t           new_flavor,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_array_t          ports,
        exception_behavior_array_t      behaviors,
-       thread_state_flavor_array_t     flavors )
+       thread_state_flavor_array_t     flavors)
 {
-       register int    i,
-                       j,
-                       count;
-       ipc_port_t      old_port[EXC_TYPES_COUNT];
+       ipc_port_t              old_port[EXC_TYPES_COUNT];
+       boolean_t privileged = current_task()->sec_token.val[0] == 0;
+       unsigned int    i, j, count;
+
+#if CONFIG_MACF
+       struct label *new_label;
+#endif
 
-       if (!thr_act)
+       if (thread == THREAD_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       if (exception_mask & ~EXC_MASK_ALL) {
+       if (exception_mask & ~EXC_MASK_VALID) {
                return KERN_INVALID_ARGUMENT;
        }
 
        if (IP_VALID(new_port)) {
-               switch (new_behavior) {
+               switch (new_behavior & ~MACH_EXCEPTION_MASK) {
                case EXCEPTION_DEFAULT:
                case EXCEPTION_STATE:
                case EXCEPTION_STATE_IDENTITY:
                        break;
+
                default:
                        return KERN_INVALID_ARGUMENT;
                }
        }
-       /* Cannot easily check "new_flavor", but that just means that
-        * the flavor in the generated exception message might be garbage:
-        * GIGO */
 
-       act_lock(thr_act);
-       if (!thr_act->active) {
-               act_unlock(thr_act);
+
+       if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+#if CONFIG_MACF
+       new_label = mac_exc_create_label_for_current_proc();
+#endif
+
+       thread_mtx_lock(thread);
+
+       if (!thread->active) {
+               thread_mtx_unlock(thread);
+#if CONFIG_MACF
+               mac_exc_free_label(new_label);
+#endif
                return KERN_FAILURE;
        }
 
-       count = 0;
+       if (thread->exc_actions == NULL) {
+               ipc_thread_init_exc_actions(thread);
+       }
 
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
-               if (exception_mask & (1 << i)) {
-                       for (j = 0; j < count; j++) {
-/*
- *                             search for an identical entry, if found
- *                             set corresponding mask for this exception.
- */
-                               if (thr_act->exc_actions[i].port == ports[j] &&
-                                 thr_act->exc_actions[i].behavior ==behaviors[j]
-                                 && thr_act->exc_actions[i].flavor ==flavors[j])
-                               {
+       assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
+       for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
+               if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+                   && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
+#endif
+                   ) {
+                       for (j = 0; j < count; ++j) {
+                               /*
+                                * search for an identical entry, if found
+                                * set corresponding mask for this exception.
+                                */
+                               if (thread->exc_actions[i].port == ports[j] &&
+                                   thread->exc_actions[i].behavior == behaviors[j] &&
+                                   thread->exc_actions[i].flavor == flavors[j]) {
                                        masks[j] |= (1 << i);
                                        break;
                                }
-                       }/* for */
+                       }
+
                        if (j == count) {
                                masks[j] = (1 << i);
-                               ports[j] =
-                               ipc_port_copy_send(thr_act->exc_actions[i].port);
+                               ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
 
-                               behaviors[j] = thr_act->exc_actions[i].behavior;
-                               flavors[j] = thr_act->exc_actions[i].flavor;
-                               count++;
+                               behaviors[j] = thread->exc_actions[i].behavior;
+                               flavors[j] = thread->exc_actions[i].flavor;
+                               ++count;
                        }
 
-                       old_port[i] = thr_act->exc_actions[i].port;
-                       thr_act->exc_actions[i].port =
-                               ipc_port_copy_send(new_port);
-                       thr_act->exc_actions[i].behavior = new_behavior;
-                       thr_act->exc_actions[i].flavor = new_flavor;
-                       if (count > *CountCnt) {
-                               break;
-                       }
-               } else
+                       old_port[i] = thread->exc_actions[i].port;
+                       thread->exc_actions[i].port = ipc_port_copy_send(new_port);
+                       thread->exc_actions[i].behavior = new_behavior;
+                       thread->exc_actions[i].flavor = new_flavor;
+                       thread->exc_actions[i].privileged = privileged;
+               } else {
                        old_port[i] = IP_NULL;
-       }/* for */
+               }
+       }
 
-       /*
-        * Consume send rights without any lock held.
-        */
-       act_unlock(thr_act);
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
-               if (IP_VALID(old_port[i]))
+       thread_mtx_unlock(thread);
+
+#if CONFIG_MACF
+       mac_exc_free_label(new_label);
+#endif
+
+       while (--i >= FIRST_EXCEPTION) {
+               if (IP_VALID(old_port[i])) {
                        ipc_port_release_send(old_port[i]);
-       if (IP_VALID(new_port))          /* consume send right */
+               }
+       }
+
+       if (IP_VALID(new_port)) {         /* consume send right */
                ipc_port_release_send(new_port);
+       }
+
        *CountCnt = count;
+
        return KERN_SUCCESS;
-}/* thread_swap_exception_ports */
+}
 
 kern_return_t
 task_swap_exception_ports(
-       task_t                          task,
-       exception_mask_t                exception_mask,
-       ipc_port_t                      new_port,
-       exception_behavior_t            new_behavior,
-       thread_state_flavor_t           new_flavor,
-       exception_mask_array_t          masks,
-       mach_msg_type_number_t          * CountCnt,
-       exception_port_array_t          ports,
+       task_t                                          task,
+       exception_mask_t                        exception_mask,
+       ipc_port_t                                      new_port,
+       exception_behavior_t            new_behavior,
+       thread_state_flavor_t           new_flavor,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_array_t          ports,
        exception_behavior_array_t      behaviors,
-       thread_state_flavor_array_t     flavors         )
+       thread_state_flavor_array_t     flavors)
 {
-       register int    i,
-                       j,
-                       count;
-       ipc_port_t      old_port[EXC_TYPES_COUNT];
+       ipc_port_t              old_port[EXC_TYPES_COUNT];
+       boolean_t privileged = current_task()->sec_token.val[0] == 0;
+       unsigned int    i, j, count;
+
+#if CONFIG_MACF
+       struct label *new_label;
+#endif
 
-       if (task == TASK_NULL)
+       if (task == TASK_NULL) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       if (exception_mask & ~EXC_MASK_ALL) {
+       if (exception_mask & ~EXC_MASK_VALID) {
                return KERN_INVALID_ARGUMENT;
        }
 
        if (IP_VALID(new_port)) {
-               switch (new_behavior) {
+               switch (new_behavior & ~MACH_EXCEPTION_MASK) {
                case EXCEPTION_DEFAULT:
                case EXCEPTION_STATE:
                case EXCEPTION_STATE_IDENTITY:
                        break;
+
                default:
                        return KERN_INVALID_ARGUMENT;
                }
        }
-       /* Cannot easily check "new_flavor", but that just means that
-        * the flavor in the generated exception message might be garbage:
-        * GIGO */
+
+
+       if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+#if CONFIG_MACF
+       new_label = mac_exc_create_label_for_current_proc();
+#endif
 
        itk_lock(task);
-       if (task->itk_self == IP_NULL) {
+
+       if (!task->ipc_active) {
                itk_unlock(task);
+#if CONFIG_MACF
+               mac_exc_free_label(new_label);
+#endif
                return KERN_FAILURE;
        }
 
-       count = 0;
-
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
-               if (exception_mask & (1 << i)) {
+       assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
+       for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
+               if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+                   && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
+#endif
+                   ) {
                        for (j = 0; j < count; j++) {
-/*
*                             search for an identical entry, if found
*                             set corresponding mask for this exception.
- */
+                               /*
                               * search for an identical entry, if found
                               * set corresponding mask for this exception.
                               */
                                if (task->exc_actions[i].port == ports[j] &&
-                                 task->exc_actions[i].behavior == behaviors[j]
-                                 && task->exc_actions[i].flavor == flavors[j])
-                               {
+                                   task->exc_actions[i].behavior == behaviors[j] &&
+                                   task->exc_actions[i].flavor == flavors[j]) {
                                        masks[j] |= (1 << i);
                                        break;
                                }
-                       }/* for */
+                       }
+
                        if (j == count) {
                                masks[j] = (1 << i);
-                               ports[j] =
-                               ipc_port_copy_send(task->exc_actions[i].port);
+                               ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
                                behaviors[j] = task->exc_actions[i].behavior;
                                flavors[j] = task->exc_actions[i].flavor;
-                               count++;
+                               ++count;
                        }
+
                        old_port[i] = task->exc_actions[i].port;
-                       task->exc_actions[i].port =
-                               ipc_port_copy_send(new_port);
+
+                       task->exc_actions[i].port =     ipc_port_copy_send(new_port);
                        task->exc_actions[i].behavior = new_behavior;
                        task->exc_actions[i].flavor = new_flavor;
-                       if (count > *CountCnt) {
-                               break;
-                       }
-               } else
+                       task->exc_actions[i].privileged = privileged;
+               } else {
                        old_port[i] = IP_NULL;
-       }/* for */
-
+               }
+       }
 
-       /*
-        * Consume send rights without any lock held.
-        */
        itk_unlock(task);
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
-               if (IP_VALID(old_port[i]))
+
+#if CONFIG_MACF
+       mac_exc_free_label(new_label);
+#endif
+
+       while (--i >= FIRST_EXCEPTION) {
+               if (IP_VALID(old_port[i])) {
                        ipc_port_release_send(old_port[i]);
-       if (IP_VALID(new_port))          /* consume send right */
+               }
+       }
+
+       if (IP_VALID(new_port)) {         /* consume send right */
                ipc_port_release_send(new_port);
+       }
+
        *CountCnt = count;
 
        return KERN_SUCCESS;
-}/* task_swap_exception_ports */
+}
 
 /*
  *     Routine:        thread/task_get_exception_ports [kernel call]
@@ -1696,131 +4178,356 @@ task_swap_exception_ports(
  *                                     Illegal mask bit set.
  *             KERN_FAILURE            The thread is dead.
  */
-
-kern_return_t
-thread_get_exception_ports(
-       thread_act_t                    thr_act,
+static kern_return_t
+thread_get_exception_ports_internal(
+       thread_t                        thread,
        exception_mask_t                exception_mask,
-       exception_mask_array_t          masks,
-       mach_msg_type_number_t          * CountCnt,
-       exception_port_array_t          ports,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_info_array_t     ports_info,
+       exception_port_array_t          ports,
        exception_behavior_array_t      behaviors,
-       thread_state_flavor_array_t     flavors         )
+       thread_state_flavor_array_t     flavors)
 {
-       register int    i,
-                       j,
-                       count;
+       unsigned int count;
+       boolean_t info_only = (ports_info != NULL);
+       boolean_t dbg_ok = TRUE;
+       ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
+
+       if (thread == THREAD_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       if (!thr_act)
+       if (exception_mask & ~EXC_MASK_VALID) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       if (exception_mask & ~EXC_MASK_ALL) {
+       if (!info_only && !ports) {
                return KERN_INVALID_ARGUMENT;
        }
 
-       act_lock(thr_act);
-       if (!thr_act->active) {
-               act_unlock(thr_act);
+#if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
+       if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
+               dbg_ok = TRUE;
+       } else {
+               dbg_ok = FALSE;
+       }
+#endif
+
+       thread_mtx_lock(thread);
+
+       if (!thread->active) {
+               thread_mtx_unlock(thread);
+
                return KERN_FAILURE;
        }
 
        count = 0;
 
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+       if (thread->exc_actions == NULL) {
+               goto done;
+       }
+
+       for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
                if (exception_mask & (1 << i)) {
-                       for (j = 0; j < count; j++) {
-/*
- *                             search for an identical entry, if found
- *                             set corresponding mask for this exception.
- */
-                               if (thr_act->exc_actions[i].port == ports[j] &&
-                                 thr_act->exc_actions[i].behavior ==behaviors[j]
-                                 && thr_act->exc_actions[i].flavor == flavors[j])
-                               {
+                       ipc_port_t exc_port = thread->exc_actions[i].port;
+                       exception_behavior_t exc_behavior = thread->exc_actions[i].behavior;
+                       thread_state_flavor_t exc_flavor = thread->exc_actions[i].flavor;
+
+                       for (j = 0; j < count; ++j) {
+                               /*
+                                * search for an identical entry, if found
+                                * set corresponding mask for this exception.
+                                */
+                               if (exc_port == port_ptrs[j] &&
+                                   exc_behavior == behaviors[j] &&
+                                   exc_flavor == flavors[j]) {
                                        masks[j] |= (1 << i);
                                        break;
                                }
-                       }/* for */
-                       if (j == count) {
+                       }
+
+                       if (j == count && count < *CountCnt) {
                                masks[j] = (1 << i);
-                               ports[j] =
-                               ipc_port_copy_send(thr_act->exc_actions[i].port);
-                               behaviors[j] = thr_act->exc_actions[i].behavior;
-                               flavors[j] = thr_act->exc_actions[i].flavor;
-                               count++;
-                               if (count >= *CountCnt) {
-                                       break;
+                               port_ptrs[j] = exc_port;
+
+                               if (info_only) {
+                                       if (!dbg_ok || !IP_VALID(exc_port)) {
+                                               /* avoid taking port lock if !dbg_ok */
+                                               ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
+                                       } else {
+                                               uintptr_t receiver;
+                                               (void)ipc_port_get_receiver_task(exc_port, &receiver);
+                                               ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
+                                               ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
+                                       }
+                               } else {
+                                       ports[j] = ipc_port_copy_send(exc_port);
                                }
+                               behaviors[j] = exc_behavior;
+                               flavors[j] = exc_flavor;
+                               ++count;
                        }
                }
-       }/* for */
+       }
 
-       act_unlock(thr_act);
+done:
+       thread_mtx_unlock(thread);
 
        *CountCnt = count;
+
        return KERN_SUCCESS;
-}/* thread_get_exception_ports */
+}
+
+static kern_return_t
+thread_get_exception_ports(
+       thread_t                        thread,
+       exception_mask_t                exception_mask,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_array_t          ports,
+       exception_behavior_array_t      behaviors,
+       thread_state_flavor_array_t     flavors)
+{
+       return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
+                  NULL, ports, behaviors, flavors);
+}
 
 kern_return_t
-task_get_exception_ports(
-       task_t                          task,
+thread_get_exception_ports_info(
+       mach_port_t                     port,
+       exception_mask_t                exception_mask,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_info_array_t     ports_info,
+       exception_behavior_array_t      behaviors,
+       thread_state_flavor_array_t     flavors)
+{
+       kern_return_t kr;
+
+       thread_t thread = convert_port_to_thread_read_no_eval(port);
+
+       if (thread == THREAD_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
+           ports_info, NULL, behaviors, flavors);
+
+       thread_deallocate(thread);
+       return kr;
+}
+
+kern_return_t
+thread_get_exception_ports_from_user(
+       mach_port_t                     port,
+       exception_mask_t                exception_mask,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t         *CountCnt,
+       exception_port_array_t          ports,
+       exception_behavior_array_t      behaviors,
+       thread_state_flavor_array_t     flavors)
+{
+       kern_return_t kr;
+
+       thread_t thread = convert_port_to_thread_no_eval(port);
+
+       if (thread == THREAD_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
+
+       thread_deallocate(thread);
+       return kr;
+}
+
+static kern_return_t
+task_get_exception_ports_internal(
+       task_t                          task,
        exception_mask_t                exception_mask,
-       exception_mask_array_t          masks,
-       mach_msg_type_number_t          * CountCnt,
-       exception_port_array_t          ports,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_info_array_t     ports_info,
+       exception_port_array_t          ports,
        exception_behavior_array_t      behaviors,
-       thread_state_flavor_array_t     flavors         )
+       thread_state_flavor_array_t     flavors)
 {
-       register int    i,
-                       j,
-                       count;
+       unsigned int count;
+       boolean_t info_only = (ports_info != NULL);
+       boolean_t dbg_ok = TRUE;
+       ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
+
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
 
-       if (task == TASK_NULL)
+       if (exception_mask & ~EXC_MASK_VALID) {
                return KERN_INVALID_ARGUMENT;
+       }
 
-       if (exception_mask & ~EXC_MASK_ALL) {
+       if (!info_only && !ports) {
                return KERN_INVALID_ARGUMENT;
        }
 
+#if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
+       if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
+               dbg_ok = TRUE;
+       } else {
+               dbg_ok = FALSE;
+       }
+#endif
+
        itk_lock(task);
-       if (task->itk_self == IP_NULL) {
+
+       if (!task->ipc_active) {
                itk_unlock(task);
                return KERN_FAILURE;
        }
 
        count = 0;
 
-       for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+       for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
                if (exception_mask & (1 << i)) {
-                       for (j = 0; j < count; j++) {
-/*
- *                             search for an identical entry, if found
- *                             set corresponding mask for this exception.
- */
-                               if (task->exc_actions[i].port == ports[j] &&
-                                 task->exc_actions[i].behavior == behaviors[j]
-                                 && task->exc_actions[i].flavor == flavors[j])
-                               {
+                       ipc_port_t exc_port = task->exc_actions[i].port;
+                       exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
+                       thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
+
+                       for (j = 0; j < count; ++j) {
+                               /*
+                                * search for an identical entry, if found
+                                * set corresponding mask for this exception.
+                                */
+                               if (exc_port == port_ptrs[j] &&
+                                   exc_behavior == behaviors[j] &&
+                                   exc_flavor == flavors[j]) {
                                        masks[j] |= (1 << i);
                                        break;
                                }
-                       }/* for */
-                       if (j == count) {
+                       }
+
+                       if (j == count && count < *CountCnt) {
                                masks[j] = (1 << i);
-                               ports[j] =
-                                 ipc_port_copy_send(task->exc_actions[i].port);
-                               behaviors[j] = task->exc_actions[i].behavior;
-                               flavors[j] = task->exc_actions[i].flavor;
-                               count++;
-                               if (count > *CountCnt) {
-                                       break;
+                               port_ptrs[j] = exc_port;
+
+                               if (info_only) {
+                                       if (!dbg_ok || !IP_VALID(exc_port)) {
+                                               /* avoid taking port lock if !dbg_ok */
+                                               ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
+                                       } else {
+                                               uintptr_t receiver;
+                                               (void)ipc_port_get_receiver_task(exc_port, &receiver);
+                                               ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
+                                               ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
+                                       }
+                               } else {
+                                       ports[j] = ipc_port_copy_send(exc_port);
                                }
+                               behaviors[j] = exc_behavior;
+                               flavors[j] = exc_flavor;
+                               ++count;
                        }
                }
-       }/* for */
+       }
 
        itk_unlock(task);
 
        *CountCnt = count;
+
        return KERN_SUCCESS;
-}/* task_get_exception_ports */
+}
+
+static kern_return_t
+task_get_exception_ports(
+       task_t                          task,
+       exception_mask_t                exception_mask,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_array_t          ports,
+       exception_behavior_array_t      behaviors,
+       thread_state_flavor_array_t     flavors)
+{
+       return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
+                  NULL, ports, behaviors, flavors);
+}
+
+kern_return_t
+task_get_exception_ports_info(
+       mach_port_t                     port,
+       exception_mask_t                exception_mask,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t          *CountCnt,
+       exception_port_info_array_t     ports_info,
+       exception_behavior_array_t      behaviors,
+       thread_state_flavor_array_t     flavors)
+{
+       kern_return_t kr;
+
+       task_t task = convert_port_to_task_read_no_eval(port);
+
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
+           ports_info, NULL, behaviors, flavors);
+
+       task_deallocate(task);
+       return kr;
+}
+
+kern_return_t
+task_get_exception_ports_from_user(
+       mach_port_t                     port,
+       exception_mask_t                exception_mask,
+       exception_mask_array_t          masks,
+       mach_msg_type_number_t         *CountCnt,
+       exception_port_array_t          ports,
+       exception_behavior_array_t      behaviors,
+       thread_state_flavor_array_t     flavors)
+{
+       kern_return_t kr;
+
+       task_t task = convert_port_to_task_no_eval(port);
+
+       if (task == TASK_NULL) {
+               return KERN_INVALID_ARGUMENT;
+       }
+
+       kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
+
+       task_deallocate(task);
+       return kr;
+}
+
+/*
+ *     Routine:        ipc_thread_port_unpin
+ *     Purpose:
+ *             Called on the thread port when the thread is
+ *             terminating so that the last ref can be deallocated
+ *             without a guard exception.
+ *     Conditions:
+ *             Thread mutex lock is held.
+ *             check_bit should be set to true only when port is expected
+ *             to have ip_pinned bit set.
+ */
+void
+ipc_thread_port_unpin(
+       ipc_port_t port,
+       __unused bool check_bit)
+{
+       if (port == IP_NULL) {
+               return;
+       }
+       ip_lock(port);
+       imq_lock(&port->ip_messages);
+#if DEVELOPMENT || DEBUG
+       if (pinned_control_port_enabled && check_bit) {
+               assert(ip_is_control(port)); /*remove once we get rid of boot-arg */
+               assert(port->ip_pinned == 1);
+       }
+#endif
+       port->ip_pinned = 0;
+       imq_unlock(&port->ip_messages);
+       ip_unlock(port);
+}