#include <kern/kalloc.h>
#include <kern/thread.h>
#include <kern/misc_protos.h>
+#include <kdp/kdp_dyld.h>
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
#include <sys/csr.h>
#endif
-#if CONFIG_EMBEDDED && !SECURE_KERNEL
+#if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
extern int cs_relax_platform_task_ports;
#endif
+extern boolean_t IOTaskHasEntitlement(task_t, const char *);
+
/* forward declarations */
-task_t convert_port_to_locked_task(ipc_port_t port);
-task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port);
+static kern_return_t port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
+static kern_return_t port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
kern_return_t task_conversion_eval(task_t caller, task_t victim);
+static ipc_space_t convert_port_to_space_no_eval(ipc_port_t port);
+static thread_t convert_port_to_thread_no_eval(ipc_port_t port);
+static ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor);
+static ipc_port_t convert_thread_to_port_with_flavor(thread_t thread, mach_thread_flavor_t flavor);
+static task_read_t convert_port_to_task_read_no_eval(ipc_port_t port);
+static thread_read_t convert_port_to_thread_read_no_eval(ipc_port_t port);
+static ipc_space_read_t convert_port_to_space_read_no_eval(ipc_port_t port);
/*
* Routine: ipc_task_init
ipc_space_t space;
ipc_port_t kport;
ipc_port_t nport;
+ ipc_port_t pport;
kern_return_t kr;
int i;
space->is_task = task;
- kport = ipc_port_alloc_kernel();
- if (kport == IP_NULL) {
- panic("ipc_task_init");
+ if (immovable_control_port_enabled) {
+ ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
+ if (pinned_control_port_enabled) {
+ options |= IPC_KOBJECT_ALLOC_PINNED;
+ }
+ pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_NONE, options);
+
+ kport = ipc_kobject_alloc_labeled_port(IKO_NULL, IKOT_TASK_CONTROL,
+ IPC_LABEL_SUBST_TASK, IPC_KOBJECT_ALLOC_NONE);
+ kport->ip_alt_port = pport;
+ } else {
+ kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
+ IPC_KOBJECT_ALLOC_NONE);
+
+ pport = kport;
}
nport = ipc_port_alloc_kernel();
panic("ipc_task_init");
}
+ if (pport == IP_NULL) {
+ panic("ipc_task_init");
+ }
+
itk_lock_init(task);
- task->itk_self = kport;
- task->itk_nself = nport;
+ task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
+ task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
+
+ /* Lazily allocated on-demand */
+ task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
+ task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
+ task->itk_dyld_notify = NULL;
+
+ task->itk_self = pport;
task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
if (task_is_a_corpse_fork(task)) {
/*
* No sender's notification for corpse would not
* work with a naked send right in kernel.
*/
- task->itk_sself = IP_NULL;
+ task->itk_settable_self = IP_NULL;
} else {
- task->itk_sself = ipc_port_make_send(kport);
+ task->itk_settable_self = ipc_port_make_send(kport);
}
task->itk_debug_control = IP_NULL;
task->itk_space = space;
}
} else {
itk_lock(parent);
- assert(parent->itk_self != IP_NULL);
+ assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
/* inherit registered ports */
{
ipc_port_t kport;
ipc_port_t nport;
+ ipc_port_t iport;
+ ipc_port_t rdport;
+ ipc_port_t pport;
itk_lock(task);
- kport = task->itk_self;
+
+ assert(!task->ipc_active || task_is_a_corpse(task));
+ task->ipc_active = true;
+
+ kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
if (kport != IP_NULL) {
- ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
+ ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK_CONTROL);
}
- nport = task->itk_nself;
+ nport = task->itk_task_ports[TASK_FLAVOR_NAME];
if (nport != IP_NULL) {
ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
}
+ iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
+ if (iport != IP_NULL) {
+ ipc_kobject_set(iport, (ipc_kobject_t) task, IKOT_TASK_INSPECT);
+ }
+ rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+ if (rdport != IP_NULL) {
+ ipc_kobject_set(rdport, (ipc_kobject_t) task, IKOT_TASK_READ);
+ }
+ pport = task->itk_self;
+ if (immovable_control_port_enabled && pport != IP_NULL) {
+ ipc_kobject_set(pport, (ipc_kobject_t) task, IKOT_TASK_CONTROL);
+ }
+
itk_unlock(task);
}
{
ipc_port_t kport;
ipc_port_t nport;
+ ipc_port_t iport;
+ ipc_port_t rdport;
ipc_port_t rport;
+ ipc_port_t pport;
itk_lock(task);
- kport = task->itk_self;
+
+ /*
+ * This innocuous looking line is load bearing.
+ *
+ * It is used to disable the creation of lazy made ports.
+ * We must do so before we drop the last reference on the task,
+ * as task ports do not own a reference on the task, and
+ * convert_port_to_task* will crash trying to resurect a task.
+ */
+ task->ipc_active = false;
+
+ kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
if (kport != IP_NULL) {
- ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+ ip_lock(kport);
+ kport->ip_alt_port = IP_NULL;
+ ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+ ip_unlock(kport);
}
- nport = task->itk_nself;
+ nport = task->itk_task_ports[TASK_FLAVOR_NAME];
if (nport != IP_NULL) {
ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
}
+ iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
+ if (iport != IP_NULL) {
+ ipc_kobject_set(iport, IKO_NULL, IKOT_NONE);
+ }
+ rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+ if (rdport != IP_NULL) {
+ ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE);
+ }
+ pport = task->itk_self;
+ if (pport != kport && pport != IP_NULL) {
+ assert(immovable_control_port_enabled);
+ assert(pport->ip_immovable_send);
+ ipc_kobject_set(pport, IKO_NULL, IKOT_NONE);
+ }
rport = task->itk_resume;
if (rport != IP_NULL) {
{
ipc_port_t kport;
ipc_port_t nport;
+ ipc_port_t iport;
+ ipc_port_t rdport;
ipc_port_t rport;
- int i;
+ ipc_port_t pport;
+ ipc_port_t sself;
+ ipc_port_t *notifiers_ptr = NULL;
itk_lock(task);
- kport = task->itk_self;
+
+ /*
+ * If we ever failed to clear ipc_active before the last reference
+ * was dropped, lazy ports might be made and used after the last
+ * reference is dropped and cause use after free (see comment in
+ * ipc_task_disable()).
+ */
+ assert(!task->ipc_active);
+
+ kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
+ sself = task->itk_settable_self;
if (kport == IP_NULL) {
/* the task is already terminated (can this happen?) */
itk_unlock(task);
return;
}
- task->itk_self = IP_NULL;
+ task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
+
+ rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+ task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
- nport = task->itk_nself;
+ iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
+ task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
+
+ nport = task->itk_task_ports[TASK_FLAVOR_NAME];
assert(nport != IP_NULL);
- task->itk_nself = IP_NULL;
+ task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
+
+ if (task->itk_dyld_notify) {
+ notifiers_ptr = task->itk_dyld_notify;
+ task->itk_dyld_notify = NULL;
+ }
+
+ if (immovable_control_port_enabled) {
+ pport = task->itk_self;
+ assert(pport != IP_NULL);
+ }
+
+ task->itk_self = IP_NULL;
rport = task->itk_resume;
task->itk_resume = IP_NULL;
itk_unlock(task);
/* release the naked send rights */
+ if (IP_VALID(sself)) {
+ ipc_port_release_send(sself);
+ }
- if (IP_VALID(task->itk_sself)) {
- ipc_port_release_send(task->itk_sself);
+ if (notifiers_ptr) {
+ for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
+ if (IP_VALID(notifiers_ptr[i])) {
+ ipc_port_release_send(notifiers_ptr[i]);
+ }
+ }
+ kfree(notifiers_ptr, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT * sizeof(ipc_port_t));
}
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (IP_VALID(task->exc_actions[i].port)) {
ipc_port_release_send(task->exc_actions[i].port);
}
ipc_port_release_send(task->itk_debug_control);
}
- for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
+ for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
if (IP_VALID(task->itk_registered[i])) {
ipc_port_release_send(task->itk_registered[i]);
}
}
/* destroy the kernel ports */
+ if (immovable_control_port_enabled) {
+ ip_lock(kport);
+ kport->ip_alt_port = IP_NULL;
+ ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+ ip_unlock(kport);
+
+ /* pport == kport if immovability is off */
+ ipc_port_dealloc_kernel(pport);
+ }
ipc_port_dealloc_kernel(kport);
ipc_port_dealloc_kernel(nport);
+ if (iport != IP_NULL) {
+ ipc_port_dealloc_kernel(iport);
+ }
+ if (rdport != IP_NULL) {
+ ipc_port_dealloc_kernel(rdport);
+ }
if (rport != IP_NULL) {
ipc_port_dealloc_kernel(rport);
}
* Purpose:
* Reset a task's IPC state to protect it when
* it enters an elevated security context. The
- * task name port can remain the same - since
- * it represents no specific privilege.
+ * task name port can remain the same - since it
+ * represents no specific privilege.
* Conditions:
* Nothing locked. The task must be suspended.
* (Or the current thread must be in the task.)
ipc_task_reset(
task_t task)
{
- ipc_port_t old_kport, new_kport;
+ ipc_port_t old_kport, old_pport, new_kport, new_pport;
ipc_port_t old_sself;
+ ipc_port_t old_rdport;
+ ipc_port_t old_iport;
ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
- int i;
+ ipc_port_t *notifiers_ptr = NULL;
#if CONFIG_MACF
/* Fresh label to unset credentials in existing labels. */
struct label *unset_label = mac_exc_create_label();
#endif
- new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task, IKOT_TASK,
- IPC_KOBJECT_ALLOC_MAKE_SEND);
+ if (immovable_control_port_enabled) {
+ ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
+ if (pinned_control_port_enabled) {
+ options |= IPC_KOBJECT_ALLOC_PINNED;
+ }
+
+ new_pport = ipc_kobject_alloc_port((ipc_kobject_t)task,
+ IKOT_TASK_CONTROL, options);
+
+ new_kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)task,
+ IKOT_TASK_CONTROL, IPC_LABEL_SUBST_TASK,
+ IPC_KOBJECT_ALLOC_NONE);
+ new_kport->ip_alt_port = new_pport;
+ } else {
+ new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
+ IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
+
+ new_pport = new_kport;
+ }
itk_lock(task);
- old_kport = task->itk_self;
+ old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
+ old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
+ old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
+
+ old_pport = task->itk_self;
- if (old_kport == IP_NULL) {
+ if (old_pport == IP_NULL) {
/* the task is already terminated (can this happen?) */
itk_unlock(task);
- ipc_port_release_send(new_kport);
ipc_port_dealloc_kernel(new_kport);
+ if (immovable_control_port_enabled) {
+ ipc_port_dealloc_kernel(new_pport);
+ }
#if CONFIG_MACF
mac_exc_free_label(unset_label);
#endif
return;
}
- old_sself = task->itk_sself;
- task->itk_sself = task->itk_self = new_kport;
+ old_sself = task->itk_settable_self;
+ task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
+ task->itk_self = new_pport;
+
+ task->itk_settable_self = ipc_port_make_send(new_kport);
/* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
ip_lock(old_kport);
+ old_kport->ip_alt_port = IP_NULL;
ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
task->exec_token += 1;
ip_unlock(old_kport);
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ /* Reset the read and inspect flavors of task port */
+ task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
+ task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
+
+ if (immovable_control_port_enabled) {
+ ip_lock(old_pport);
+ ipc_kobject_set_atomically(old_pport, IKO_NULL, IKOT_NONE);
+ task->exec_token += 1;
+ ip_unlock(old_pport);
+ }
+
+ for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
old_exc_actions[i] = IP_NULL;
if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
}
task->itk_debug_control = IP_NULL;
+ if (task->itk_dyld_notify) {
+ notifiers_ptr = task->itk_dyld_notify;
+ task->itk_dyld_notify = NULL;
+ }
+
itk_unlock(task);
#if CONFIG_MACF
ipc_port_release_send(old_sself);
}
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ if (notifiers_ptr) {
+ for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
+ if (IP_VALID(notifiers_ptr[i])) {
+ ipc_port_release_send(notifiers_ptr[i]);
+ }
+ }
+ kfree(notifiers_ptr, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT * sizeof(ipc_port_t));
+ }
+
+ for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (IP_VALID(old_exc_actions[i])) {
ipc_port_release_send(old_exc_actions[i]);
}
}/* for */
- /* destroy the kernel port */
+ /* destroy all task port flavors */
ipc_port_dealloc_kernel(old_kport);
+ if (immovable_control_port_enabled) {
+ ipc_port_dealloc_kernel(old_pport);
+ }
+ if (old_rdport != IP_NULL) {
+ ipc_port_dealloc_kernel(old_rdport);
+ }
+ if (old_iport != IP_NULL) {
+ ipc_port_dealloc_kernel(old_iport);
+ }
}
/*
void
ipc_thread_init(
- thread_t thread)
+ thread_t thread,
+ ipc_thread_init_options_t options)
{
ipc_port_t kport;
+ ipc_port_t pport;
+ ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
+
+ /*
+ * Having immovable_control_port_enabled boot-arg set does not guarantee
+ * thread control port should be made immovable/pinned, also check options.
+ *
+ * raw mach threads created via thread_create() have neither of INIT_PINNED
+ * or INIT_IMMOVABLE set.
+ */
+ if (immovable_control_port_enabled && (options & IPC_THREAD_INIT_IMMOVABLE)) {
+ alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
+
+ if (pinned_control_port_enabled && (options & IPC_THREAD_INIT_PINNED)) {
+ alloc_options |= IPC_KOBJECT_ALLOC_PINNED;
+ }
+
+ pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+ IKOT_THREAD_CONTROL, alloc_options);
- kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD,
- IPC_KOBJECT_ALLOC_MAKE_SEND);
+ kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
+ IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
+ kport->ip_alt_port = pport;
+ } else {
+ kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+ IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
+
+ pport = kport;
+ }
+
+ thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = kport;
+
+ thread->ith_settable_self = ipc_port_make_send(kport);
+
+ thread->ith_self = pport;
- thread->ith_sself = thread->ith_self = kport;
thread->ith_special_reply_port = NULL;
thread->exc_actions = NULL;
thread->ith_assertions = 0;
#endif
+ thread->ipc_active = true;
ipc_kmsg_queue_init(&thread->ith_messages);
thread->ith_rpc_reply = IP_NULL;
}
}
+/*
+ * Routine: ipc_thread_disable
+ * Purpose:
+ * Clean up and destroy a thread's IPC state.
+ * Conditions:
+ * Thread locked.
+ */
void
ipc_thread_disable(
thread_t thread)
{
- ipc_port_t kport = thread->ith_self;
+ ipc_port_t kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
+ ipc_port_t iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
+ ipc_port_t rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
+ ipc_port_t pport = thread->ith_self;
+
+ /*
+ * This innocuous looking line is load bearing.
+ *
+ * It is used to disable the creation of lazy made ports.
+ * We must do so before we drop the last reference on the thread,
+ * as thread ports do not own a reference on the thread, and
+ * convert_port_to_thread* will crash trying to resurect a thread.
+ */
+ thread->ipc_active = false;
if (kport != IP_NULL) {
- ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+ ip_lock(kport);
+ kport->ip_alt_port = IP_NULL;
+ ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+ ip_unlock(kport);
+ }
+
+ if (iport != IP_NULL) {
+ ipc_kobject_set(iport, IKO_NULL, IKOT_NONE);
+ }
+
+ if (rdport != IP_NULL) {
+ ipc_kobject_set(rdport, IKO_NULL, IKOT_NONE);
+ }
+
+ if (pport != kport && pport != IP_NULL) {
+ assert(immovable_control_port_enabled);
+ assert(pport->ip_immovable_send);
+ ipc_kobject_set(pport, IKO_NULL, IKOT_NONE);
}
/* unbind the thread special reply port */
ipc_thread_terminate(
thread_t thread)
{
- ipc_port_t kport = thread->ith_self;
+ ipc_port_t kport = IP_NULL;
+ ipc_port_t iport = IP_NULL;
+ ipc_port_t rdport = IP_NULL;
+ ipc_port_t ith_rpc_reply = IP_NULL;
+ ipc_port_t pport = IP_NULL;
- if (kport != IP_NULL) {
- int i;
+ thread_mtx_lock(thread);
+
+ /*
+ * If we ever failed to clear ipc_active before the last reference
+ * was dropped, lazy ports might be made and used after the last
+ * reference is dropped and cause use after free (see comment in
+ * ipc_thread_disable()).
+ */
+ assert(!thread->ipc_active);
+
+ kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
+ iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
+ rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
+ pport = thread->ith_self;
- if (IP_VALID(thread->ith_sself)) {
- ipc_port_release_send(thread->ith_sself);
+ if (kport != IP_NULL) {
+ if (IP_VALID(thread->ith_settable_self)) {
+ ipc_port_release_send(thread->ith_settable_self);
}
- thread->ith_sself = thread->ith_self = IP_NULL;
+ thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = IP_NULL;
+ thread->ith_thread_ports[THREAD_FLAVOR_READ] = IP_NULL;
+ thread->ith_thread_ports[THREAD_FLAVOR_INSPECT] = IP_NULL;
+ thread->ith_settable_self = IP_NULL;
+ thread->ith_self = IP_NULL;
if (thread->exc_actions != NULL) {
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+ for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (IP_VALID(thread->exc_actions[i].port)) {
ipc_port_release_send(thread->exc_actions[i].port);
}
}
ipc_thread_destroy_exc_actions(thread);
}
-
- ipc_port_dealloc_kernel(kport);
}
#if IMPORTANCE_INHERITANCE
#endif
assert(ipc_kmsg_queue_empty(&thread->ith_messages));
+ ith_rpc_reply = thread->ith_rpc_reply;
+ thread->ith_rpc_reply = IP_NULL;
- if (thread->ith_rpc_reply != IP_NULL) {
- ipc_port_dealloc_reply(thread->ith_rpc_reply);
- }
+ thread_mtx_unlock(thread);
- thread->ith_rpc_reply = IP_NULL;
+ if (pport != kport && pport != IP_NULL) {
+ /* this thread has immovable contorl port */
+ ip_lock(kport);
+ kport->ip_alt_port = IP_NULL;
+ ipc_kobject_set_atomically(kport, IKO_NULL, IKOT_NONE);
+ ip_unlock(kport);
+ ipc_port_dealloc_kernel(pport);
+ }
+ if (kport != IP_NULL) {
+ ipc_port_dealloc_kernel(kport);
+ }
+ if (iport != IP_NULL) {
+ ipc_port_dealloc_kernel(iport);
+ }
+ if (rdport != IP_NULL) {
+ ipc_port_dealloc_kernel(rdport);
+ }
+ if (ith_rpc_reply != IP_NULL) {
+ ipc_port_dealloc_reply(ith_rpc_reply);
+ }
}
/*
* Purpose:
* Reset the IPC state for a given Mach thread when
* its task enters an elevated security context.
- * Both the thread port and its exception ports have
+ * All flavors of thread port and its exception ports have
* to be reset. Its RPC reply port cannot have any
- * rights outstanding, so it should be fine.
+ * rights outstanding, so it should be fine. The thread
+ * inspect and read port are set to NULL.
* Conditions:
* Nothing locked.
*/
ipc_thread_reset(
thread_t thread)
{
- ipc_port_t old_kport, new_kport;
+ ipc_port_t old_kport, new_kport, old_pport, new_pport;
ipc_port_t old_sself;
+ ipc_port_t old_rdport;
+ ipc_port_t old_iport;
ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
boolean_t has_old_exc_actions = FALSE;
- int i;
+ boolean_t thread_is_immovable, thread_is_pinned;
+ int i;
#if CONFIG_MACF
struct label *new_label = mac_exc_create_label();
#endif
- new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD,
- IPC_KOBJECT_ALLOC_MAKE_SEND);
+ thread_is_immovable = thread->ith_self->ip_immovable_send;
+ thread_is_pinned = thread->ith_self->ip_pinned;
+
+ if (thread_is_immovable) {
+ ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
+
+ if (thread_is_pinned) {
+ assert(pinned_control_port_enabled);
+ alloc_options |= IPC_KOBJECT_ALLOC_PINNED;
+ }
+ if (thread_is_immovable) {
+ alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
+ }
+ new_pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+ IKOT_THREAD_CONTROL, alloc_options);
+
+ new_kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
+ IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD,
+ IPC_KOBJECT_ALLOC_NONE);
+ new_kport->ip_alt_port = new_pport;
+ } else {
+ new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
+ IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
+
+ new_pport = new_kport;
+ }
thread_mtx_lock(thread);
- old_kport = thread->ith_self;
- old_sself = thread->ith_sself;
+ old_kport = thread->ith_thread_ports[THREAD_FLAVOR_CONTROL];
+ old_rdport = thread->ith_thread_ports[THREAD_FLAVOR_READ];
+ old_iport = thread->ith_thread_ports[THREAD_FLAVOR_INSPECT];
+
+ old_sself = thread->ith_settable_self;
+ old_pport = thread->ith_self;
if (old_kport == IP_NULL && thread->inspection == FALSE) {
- /* the is already terminated (can this happen?) */
+ /* thread is already terminated (can this happen?) */
thread_mtx_unlock(thread);
- ipc_port_release_send(new_kport);
ipc_port_dealloc_kernel(new_kport);
+ if (thread_is_immovable) {
+ ipc_port_dealloc_kernel(new_pport);
+ }
#if CONFIG_MACF
mac_exc_free_label(new_label);
#endif
return;
}
- thread->ith_sself = thread->ith_self = new_kport;
+ thread->ipc_active = true;
+ thread->ith_thread_ports[THREAD_FLAVOR_CONTROL] = new_kport;
+ thread->ith_self = new_pport;
+ thread->ith_settable_self = ipc_port_make_send(new_kport);
+ thread->ith_thread_ports[THREAD_FLAVOR_INSPECT] = IP_NULL;
+ thread->ith_thread_ports[THREAD_FLAVOR_READ] = IP_NULL;
+
if (old_kport != IP_NULL) {
- ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
+ ip_lock(old_kport);
+ old_kport->ip_alt_port = IP_NULL;
+ ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
+ ip_unlock(old_kport);
+ }
+ if (old_rdport != IP_NULL) {
+ ipc_kobject_set(old_rdport, IKO_NULL, IKOT_NONE);
+ }
+ if (old_iport != IP_NULL) {
+ ipc_kobject_set(old_iport, IKO_NULL, IKOT_NONE);
+ }
+ if (thread_is_immovable && old_pport != IP_NULL) {
+ ipc_kobject_set(old_pport, IKO_NULL, IKOT_NONE);
}
/*
if (old_kport != IP_NULL) {
ipc_port_dealloc_kernel(old_kport);
}
+ if (old_rdport != IP_NULL) {
+ ipc_port_dealloc_kernel(old_rdport);
+ }
+ if (old_iport != IP_NULL) {
+ ipc_port_dealloc_kernel(old_iport);
+ }
+
+ if (thread_is_immovable && old_pport != IP_NULL) {
+ ipc_port_dealloc_kernel(old_pport);
+ }
/* unbind the thread special reply port */
if (IP_VALID(thread->ith_special_reply_port)) {
retrieve_task_self_fast(
task_t task)
{
- __assert_only ipc_port_t sright;
- ipc_port_t port;
+ ipc_port_t port = IP_NULL;
assert(task == current_task());
itk_lock(task);
assert(task->itk_self != IP_NULL);
- if ((port = task->itk_sself) == task->itk_self) {
- /* no interposing */
- sright = ipc_port_copy_send(port);
- assert(sright == port);
+ if (task->itk_settable_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
+ /* no interposing, return the IMMOVABLE port */
+ port = ipc_port_make_send(task->itk_self);
+ if (immovable_control_port_enabled) {
+ assert(port->ip_immovable_send == 1);
+ if (pinned_control_port_enabled) {
+ /* pinned port is also immovable */
+ assert(port->ip_pinned == 1);
+ }
+ }
} else {
- port = ipc_port_copy_send(port);
+ port = ipc_port_copy_send(task->itk_settable_self);
}
itk_unlock(task);
return port;
}
+/*
+ * Routine: mach_task_is_self
+ * Purpose:
+ * [MIG call] Checks if the task (control/read/inspect/name/movable)
+ * port is pointing to current_task.
+ */
+kern_return_t
+mach_task_is_self(
+ task_t task,
+ boolean_t *is_self)
+{
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ *is_self = (task == current_task());
+
+ return KERN_SUCCESS;
+}
+
/*
* Routine: retrieve_thread_self_fast
* Purpose:
retrieve_thread_self_fast(
thread_t thread)
{
- __assert_only ipc_port_t sright;
- ipc_port_t port;
+ ipc_port_t port = IP_NULL;
assert(thread == current_thread());
assert(thread->ith_self != IP_NULL);
- if ((port = thread->ith_sself) == thread->ith_self) {
- /* no interposing */
- sright = ipc_port_copy_send(port);
- assert(sright == port);
+ if (thread->ith_settable_self == thread->ith_thread_ports[THREAD_FLAVOR_CONTROL]) {
+ /* no interposing, return IMMOVABLE_PORT */
+ port = ipc_port_make_send(thread->ith_self);
} else {
- port = ipc_port_copy_send(port);
+ port = ipc_port_copy_send(thread->ith_settable_self);
}
thread_mtx_unlock(thread);
kern_return_t
thread_get_special_port(
- thread_t thread,
- int which,
- ipc_port_t *portp)
+ thread_inspect_t thread,
+ int which,
+ ipc_port_t *portp);
+
+static kern_return_t
+thread_get_special_port_internal(
+ thread_inspect_t thread,
+ int which,
+ ipc_port_t *portp,
+ mach_thread_flavor_t flavor)
{
- kern_return_t result = KERN_SUCCESS;
- ipc_port_t *whichp;
+ kern_return_t kr;
+ ipc_port_t port;
if (thread == THREAD_NULL) {
return KERN_INVALID_ARGUMENT;
}
+ if ((kr = port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
+ return kr;
+ }
+
+ thread_mtx_lock(thread);
+ if (!thread->active) {
+ thread_mtx_unlock(thread);
+ return KERN_FAILURE;
+ }
+
switch (which) {
case THREAD_KERNEL_PORT:
- whichp = &thread->ith_sself;
+ port = ipc_port_copy_send(thread->ith_settable_self);
+ thread_mtx_unlock(thread);
+ break;
+
+ case THREAD_READ_PORT:
+ case THREAD_INSPECT_PORT:
+ thread_mtx_unlock(thread);
+ mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
+ THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
+ /* convert_thread_to_port_with_flavor consumes a thread reference */
+ thread_reference(thread);
+ port = convert_thread_to_port_with_flavor(thread, current_flavor);
break;
default:
+ thread_mtx_unlock(thread);
return KERN_INVALID_ARGUMENT;
}
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+thread_get_special_port(
+ thread_inspect_t thread,
+ int which,
+ ipc_port_t *portp)
+{
+ return thread_get_special_port_internal(thread, which, portp, THREAD_FLAVOR_CONTROL);
+}
+
+static ipc_port_t
+thread_get_non_substituted_self(thread_t thread)
+{
+ ipc_port_t port = IP_NULL;
+
thread_mtx_lock(thread);
+ port = thread->ith_settable_self;
+ if (IP_VALID(port)) {
+ ip_reference(port);
+ }
+ thread_mtx_unlock(thread);
- if (thread->active) {
- *portp = ipc_port_copy_send(*whichp);
- } else {
- result = KERN_FAILURE;
+ if (IP_VALID(port)) {
+ /* consumes the port reference */
+ return ipc_kobject_alloc_subst_once(port);
}
- thread_mtx_unlock(thread);
+ return port;
+}
- return result;
+kern_return_t
+thread_get_special_port_from_user(
+ mach_port_t port,
+ int which,
+ ipc_port_t *portp)
+{
+ ipc_kobject_type_t kotype;
+ mach_thread_flavor_t flavor;
+ kern_return_t kr = KERN_SUCCESS;
+
+ thread_t thread = convert_port_to_thread_check_type(port, &kotype,
+ THREAD_FLAVOR_INSPECT, FALSE);
+
+ if (thread == THREAD_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (which == THREAD_KERNEL_PORT && thread->task == current_task()) {
+#if CONFIG_MACF
+ /*
+ * only check for threads belong to current_task,
+ * because foreign thread ports are always movable
+ */
+ if (mac_task_check_get_movable_control_port()) {
+ kr = KERN_DENIED;
+ goto out;
+ }
+#endif
+ if (kotype == IKOT_THREAD_CONTROL) {
+ *portp = thread_get_non_substituted_self(thread);
+ goto out;
+ }
+ }
+
+ switch (kotype) {
+ case IKOT_THREAD_CONTROL:
+ flavor = THREAD_FLAVOR_CONTROL;
+ break;
+ case IKOT_THREAD_READ:
+ flavor = THREAD_FLAVOR_READ;
+ break;
+ case IKOT_THREAD_INSPECT:
+ flavor = THREAD_FLAVOR_INSPECT;
+ break;
+ default:
+ panic("strange kobject type");
+ }
+
+ kr = thread_get_special_port_internal(thread, which, portp, flavor);
+out:
+ thread_deallocate(thread);
+ return kr;
+}
+
+static kern_return_t
+port_allowed_with_thread_flavor(
+ int which,
+ mach_thread_flavor_t flavor)
+{
+ switch (flavor) {
+ case THREAD_FLAVOR_CONTROL:
+ return KERN_SUCCESS;
+
+ case THREAD_FLAVOR_READ:
+
+ switch (which) {
+ case THREAD_READ_PORT:
+ case THREAD_INSPECT_PORT:
+ return KERN_SUCCESS;
+ default:
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ case THREAD_FLAVOR_INSPECT:
+
+ switch (which) {
+ case THREAD_INSPECT_PORT:
+ return KERN_SUCCESS;
+ default:
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ default:
+ return KERN_INVALID_CAPABILITY;
+ }
}
/*
* Nothing locked. If successful, consumes
* the supplied send right.
* Returns:
- * KERN_SUCCESS Changed the special port.
- * KERN_INVALID_ARGUMENT The thread is null.
- * KERN_FAILURE The thread is dead.
- * KERN_INVALID_ARGUMENT Invalid special port.
+ * KERN_SUCCESS Changed the special port.
+ * KERN_INVALID_ARGUMENT The thread is null.
+ * KERN_INVALID_RIGHT Port is marked as immovable.
+ * KERN_FAILURE The thread is dead.
+ * KERN_INVALID_ARGUMENT Invalid special port.
+ * KERN_NO_ACCESS Restricted access to set port.
*/
kern_return_t
thread_set_special_port(
thread_t thread,
int which,
- ipc_port_t port)
+ ipc_port_t port)
{
kern_return_t result = KERN_SUCCESS;
ipc_port_t *whichp, old = IP_NULL;
return KERN_INVALID_ARGUMENT;
}
- switch (which) {
- case THREAD_KERNEL_PORT:
- whichp = &thread->ith_sself;
- break;
-
- default:
- return KERN_INVALID_ARGUMENT;
+ if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
+ return KERN_INVALID_RIGHT;
}
- thread_mtx_lock(thread);
+ switch (which) {
+ case THREAD_KERNEL_PORT:
+#if CONFIG_CSR
+ if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
+ /*
+ * Only allow setting of thread-self
+ * special port from user-space when SIP is
+ * disabled (for Mach-on-Mach emulation).
+ */
+ return KERN_NO_ACCESS;
+ }
+#endif
+ whichp = &thread->ith_settable_self;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ thread_mtx_lock(thread);
if (thread->active) {
old = *whichp;
* Conditions:
* Nothing locked.
* Returns:
- * KERN_SUCCESS Extracted a send right.
+ * KERN_SUCCESS Extracted a send right.
* KERN_INVALID_ARGUMENT The task is null.
- * KERN_FAILURE The task/space is dead.
+ * KERN_FAILURE The task/space is dead.
* KERN_INVALID_ARGUMENT Invalid special port.
*/
task_get_special_port(
task_t task,
int which,
- ipc_port_t *portp)
+ ipc_port_t *portp);
+
+static kern_return_t
+task_get_special_port_internal(
+ task_t task,
+ int which,
+ ipc_port_t *portp,
+ mach_task_flavor_t flavor)
{
+ kern_return_t kr;
ipc_port_t port;
if (task == TASK_NULL) {
return KERN_INVALID_ARGUMENT;
}
+ if ((kr = port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
+ return kr;
+ }
+
itk_lock(task);
- if (task->itk_self == IP_NULL) {
+ if (!task->ipc_active) {
itk_unlock(task);
return KERN_FAILURE;
}
switch (which) {
case TASK_KERNEL_PORT:
- port = ipc_port_copy_send(task->itk_sself);
+ port = ipc_port_copy_send(task->itk_settable_self);
+ itk_unlock(task);
+ break;
+
+ case TASK_READ_PORT:
+ case TASK_INSPECT_PORT:
+ itk_unlock(task);
+ mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
+ TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
+ /* convert_task_to_port_with_flavor consumes a task reference */
+ task_reference(task);
+ port = convert_task_to_port_with_flavor(task, current_flavor);
break;
case TASK_NAME_PORT:
- port = ipc_port_make_send(task->itk_nself);
+ port = ipc_port_make_send(task->itk_task_ports[TASK_FLAVOR_NAME]);
+ itk_unlock(task);
break;
case TASK_HOST_PORT:
port = ipc_port_copy_send(task->itk_host);
+ itk_unlock(task);
break;
case TASK_BOOTSTRAP_PORT:
port = ipc_port_copy_send(task->itk_bootstrap);
+ itk_unlock(task);
break;
case TASK_SEATBELT_PORT:
port = ipc_port_copy_send(task->itk_seatbelt);
+ itk_unlock(task);
break;
case TASK_ACCESS_PORT:
port = ipc_port_copy_send(task->itk_task_access);
+ itk_unlock(task);
break;
case TASK_DEBUG_CONTROL_PORT:
port = ipc_port_copy_send(task->itk_debug_control);
+ itk_unlock(task);
break;
default:
itk_unlock(task);
return KERN_INVALID_ARGUMENT;
}
- itk_unlock(task);
*portp = port;
return KERN_SUCCESS;
}
+kern_return_t
+task_get_special_port(
+ task_t task,
+ int which,
+ ipc_port_t *portp)
+{
+ return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
+}
+
+static ipc_port_t
+task_get_non_substituted_self(task_t task)
+{
+ ipc_port_t port = IP_NULL;
+
+ itk_lock(task);
+ port = task->itk_settable_self;
+ if (IP_VALID(port)) {
+ ip_reference(port);
+ }
+ itk_unlock(task);
+
+ if (IP_VALID(port)) {
+ /* consumes the port reference */
+ return ipc_kobject_alloc_subst_once(port);
+ }
+
+ return port;
+}
+kern_return_t
+task_get_special_port_from_user(
+ mach_port_t port,
+ int which,
+ ipc_port_t *portp)
+{
+ ipc_kobject_type_t kotype;
+ mach_task_flavor_t flavor;
+ kern_return_t kr = KERN_SUCCESS;
+
+ task_t task = convert_port_to_task_check_type(port, &kotype,
+ TASK_FLAVOR_INSPECT, FALSE);
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (which == TASK_KERNEL_PORT && task == current_task()) {
+#if CONFIG_MACF
+ /*
+ * only check for current_task,
+ * because foreign task ports are always movable
+ */
+ if (mac_task_check_get_movable_control_port()) {
+ kr = KERN_DENIED;
+ goto out;
+ }
+#endif
+ if (kotype == IKOT_TASK_CONTROL) {
+ *portp = task_get_non_substituted_self(task);
+ goto out;
+ }
+ }
+
+ switch (kotype) {
+ case IKOT_TASK_CONTROL:
+ flavor = TASK_FLAVOR_CONTROL;
+ break;
+ case IKOT_TASK_READ:
+ flavor = TASK_FLAVOR_READ;
+ break;
+ case IKOT_TASK_INSPECT:
+ flavor = TASK_FLAVOR_INSPECT;
+ break;
+ default:
+ panic("strange kobject type");
+ }
+
+ kr = task_get_special_port_internal(task, which, portp, flavor);
+out:
+ task_deallocate(task);
+ return kr;
+}
+
+static kern_return_t
+port_allowed_with_task_flavor(
+ int which,
+ mach_task_flavor_t flavor)
+{
+ switch (flavor) {
+ case TASK_FLAVOR_CONTROL:
+ return KERN_SUCCESS;
+
+ case TASK_FLAVOR_READ:
+
+ switch (which) {
+ case TASK_READ_PORT:
+ case TASK_INSPECT_PORT:
+ case TASK_NAME_PORT:
+ return KERN_SUCCESS;
+ default:
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ case TASK_FLAVOR_INSPECT:
+
+ switch (which) {
+ case TASK_INSPECT_PORT:
+ case TASK_NAME_PORT:
+ return KERN_SUCCESS;
+ default:
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ default:
+ return KERN_INVALID_CAPABILITY;
+ }
+}
+
/*
* Routine: task_set_special_port [kernel call]
* Purpose:
* Nothing locked. If successful, consumes
* the supplied send right.
* Returns:
- * KERN_SUCCESS Changed the special port.
+ * KERN_SUCCESS Changed the special port.
* KERN_INVALID_ARGUMENT The task is null.
- * KERN_FAILURE The task/space is dead.
+ * KERN_INVALID_RIGHT Port is marked as immovable.
+ * KERN_FAILURE The task/space is dead.
* KERN_INVALID_ARGUMENT Invalid special port.
- * KERN_NO_ACCESS Restricted access to set port.
+ * KERN_NO_ACCESS Restricted access to set port.
*/
kern_return_t
return KERN_NO_ACCESS;
}
+ if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
+ return KERN_INVALID_RIGHT;
+ }
+
switch (which) {
case TASK_KERNEL_PORT:
case TASK_HOST_PORT:
int which,
ipc_port_t port)
{
- ipc_port_t *whichp;
- ipc_port_t old;
+ ipc_port_t old = IP_NULL;
+ kern_return_t rc = KERN_INVALID_ARGUMENT;
if (task == TASK_NULL) {
- return KERN_INVALID_ARGUMENT;
+ goto out;
+ }
+
+ itk_lock(task);
+ if (!task->ipc_active) {
+ rc = KERN_FAILURE;
+ goto out_unlock;
}
switch (which) {
case TASK_KERNEL_PORT:
- whichp = &task->itk_sself;
+ old = task->itk_settable_self;
+ task->itk_settable_self = port;
break;
case TASK_HOST_PORT:
- whichp = &task->itk_host;
+ old = task->itk_host;
+ task->itk_host = port;
break;
case TASK_BOOTSTRAP_PORT:
- whichp = &task->itk_bootstrap;
+ old = task->itk_bootstrap;
+ task->itk_bootstrap = port;
break;
+ /* Never allow overwrite of seatbelt port */
case TASK_SEATBELT_PORT:
- whichp = &task->itk_seatbelt;
+ if (IP_VALID(task->itk_seatbelt)) {
+ rc = KERN_NO_ACCESS;
+ goto out_unlock;
+ }
+ task->itk_seatbelt = port;
break;
+ /* Never allow overwrite of the task access port */
case TASK_ACCESS_PORT:
- whichp = &task->itk_task_access;
+ if (IP_VALID(task->itk_task_access)) {
+ rc = KERN_NO_ACCESS;
+ goto out_unlock;
+ }
+ task->itk_task_access = port;
break;
case TASK_DEBUG_CONTROL_PORT:
- whichp = &task->itk_debug_control;
+ old = task->itk_debug_control;
+ task->itk_debug_control = port;
break;
default:
- return KERN_INVALID_ARGUMENT;
+ rc = KERN_INVALID_ARGUMENT;
+ goto out_unlock;
}/* switch */
- itk_lock(task);
- if (task->itk_self == IP_NULL) {
- itk_unlock(task);
- return KERN_FAILURE;
- }
-
- /* Never allow overwrite of seatbelt, or task access ports */
- switch (which) {
- case TASK_SEATBELT_PORT:
- case TASK_ACCESS_PORT:
- if (IP_VALID(*whichp)) {
- itk_unlock(task);
- return KERN_NO_ACCESS;
- }
- break;
- default:
- break;
- }
+ rc = KERN_SUCCESS;
- old = *whichp;
- *whichp = port;
+out_unlock:
itk_unlock(task);
if (IP_VALID(old)) {
ipc_port_release_send(old);
}
- return KERN_SUCCESS;
+out:
+ return rc;
}
-
/*
* Routine: mach_ports_register [kernel call]
* Purpose:
* Nothing locked. If successful, consumes
* the supplied rights and memory.
* Returns:
- * KERN_SUCCESS Stashed the port rights.
+ * KERN_SUCCESS Stashed the port rights.
+ * KERN_INVALID_RIGHT Port in array is marked immovable.
* KERN_INVALID_ARGUMENT The task is null.
* KERN_INVALID_ARGUMENT The task is dead.
* KERN_INVALID_ARGUMENT The memory param is null.
for (i = 0; i < portsCnt; i++) {
ports[i] = memory[i];
+ if (IP_VALID(ports[i]) && (ports[i]->ip_immovable_receive || ports[i]->ip_immovable_send)) {
+ return KERN_INVALID_RIGHT;
+ }
}
for (; i < TASK_PORT_REGISTER_MAX; i++) {
ports[i] = IP_NULL;
}
itk_lock(task);
- if (task->itk_self == IP_NULL) {
+ if (!task->ipc_active) {
itk_unlock(task);
return KERN_INVALID_ARGUMENT;
}
}
itk_lock(task);
- if (task->itk_self == IP_NULL) {
+ if (!task->ipc_active) {
itk_unlock(task);
kfree(memory, size);
return KERN_SUCCESS;
}
-extern zone_t task_zone;
-
kern_return_t
task_conversion_eval(task_t caller, task_t victim)
{
return KERN_INVALID_SECURITY;
}
- zone_require(victim, task_zone);
+ task_require(victim);
-#if CONFIG_EMBEDDED
+#if !defined(XNU_TARGET_OS_OSX)
/*
- * On embedded platforms, only a platform binary can resolve the task port
+ * On platforms other than macOS, only a platform binary can resolve the task port
* of another platform binary.
*/
if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
}
#endif /* SECURE_KERNEL */
}
-#endif /* CONFIG_EMBEDDED */
+#endif /* !defined(XNU_TARGET_OS_OSX) */
return KERN_SUCCESS;
}
* Conditions:
* Nothing locked, blocking OK.
*/
-task_t
-convert_port_to_locked_task(ipc_port_t port)
+static task_t
+convert_port_to_locked_task(ipc_port_t port, boolean_t eval)
{
int try_failed_count = 0;
task_t task;
ip_lock(port);
- if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
+ if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL)) {
ip_unlock(port);
return TASK_NULL;
}
task = (task_t) ip_get_kobject(port);
assert(task != TASK_NULL);
- if (task_conversion_eval(ct, task)) {
+ if (eval && task_conversion_eval(ct, task)) {
ip_unlock(port);
return TASK_NULL;
}
* Conditions:
* Nothing locked, blocking OK.
*/
-task_inspect_t
+static task_inspect_t
convert_port_to_locked_task_inspect(ipc_port_t port)
{
int try_failed_count = 0;
task_inspect_t task;
ip_lock(port);
- if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
+ if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL &&
+ ip_kotype(port) != IKOT_TASK_READ &&
+ ip_kotype(port) != IKOT_TASK_INSPECT)) {
ip_unlock(port);
return TASK_INSPECT_NULL;
}
return TASK_INSPECT_NULL;
}
+/*
+ * Routine: convert_port_to_locked_task_read
+ * Purpose:
+ * Internal helper routine to convert from a port to a locked
+ * task read right. Used by internal routines that try to convert from a
+ * task read port to a reference on some task related object.
+ * Conditions:
+ * Nothing locked, blocking OK.
+ */
+static task_read_t
+convert_port_to_locked_task_read(
+ ipc_port_t port,
+ boolean_t eval)
+{
+ int try_failed_count = 0;
+
+ while (IP_VALID(port)) {
+ task_t ct = current_task();
+ task_read_t task;
+
+ ip_lock(port);
+ if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK_CONTROL &&
+ ip_kotype(port) != IKOT_TASK_READ)) {
+ ip_unlock(port);
+ return TASK_READ_NULL;
+ }
+ task = (task_read_t)ipc_kobject_get(port);
+ assert(task != TASK_READ_NULL);
+
+ if (eval && task_conversion_eval(ct, task)) {
+ ip_unlock(port);
+ return TASK_READ_NULL;
+ }
+
+ /*
+ * Normal lock ordering puts task_lock() before ip_lock().
+ * Attempt out-of-order locking here.
+ */
+ if (task_lock_try((task_t)task)) {
+ ip_unlock(port);
+ return task;
+ }
+ try_failed_count++;
+
+ ip_unlock(port);
+ mutex_pause(try_failed_count);
+ }
+ return TASK_READ_NULL;
+}
+
static task_t
convert_port_to_task_locked(
ipc_port_t port,
- uint32_t *exec_token)
+ uint32_t *exec_token,
+ boolean_t eval)
{
task_t task = TASK_NULL;
ip_lock_held(port);
require_ip_active(port);
- if (ip_kotype(port) == IKOT_TASK) {
- task_t ct = current_task();
+ if (ip_kotype(port) == IKOT_TASK_CONTROL) {
task = (task_t) ip_get_kobject(port);
assert(task != TASK_NULL);
- if (task_conversion_eval(ct, task)) {
+ if (eval && task_conversion_eval(current_task(), task)) {
return TASK_NULL;
}
if (exec_token) {
*exec_token = task->exec_token;
}
+
task_reference_internal(task);
}
task_t
convert_port_to_task_with_exec_token(
ipc_port_t port,
- uint32_t *exec_token)
+ uint32_t *exec_token,
+ boolean_t eval)
{
task_t task = TASK_NULL;
if (IP_VALID(port)) {
ip_lock(port);
if (ip_active(port)) {
- task = convert_port_to_task_locked(port, exec_token);
+ task = convert_port_to_task_locked(port, exec_token, eval);
}
ip_unlock(port);
}
convert_port_to_task(
ipc_port_t port)
{
- return convert_port_to_task_with_exec_token(port, NULL);
+ return convert_port_to_task_with_exec_token(port, NULL, TRUE);
}
+/*
+ * Routine: convert_port_to_task_no_eval
+ * Purpose:
+ * Convert from a port to a task, skips task_conversion_eval.
+ * Doesn't consume the port ref; produces a task ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+static task_t
+convert_port_to_task_no_eval(
+ ipc_port_t port)
+{
+ return convert_port_to_task_with_exec_token(port, NULL, FALSE);
+}
/*
* Routine: convert_port_to_task_name
* Conditions:
* Nothing locked.
*/
-task_name_t
-convert_port_to_task_name(
- ipc_port_t port)
-{
- task_name_t task = TASK_NULL;
-
- if (IP_VALID(port)) {
- ip_lock(port);
-
- if (ip_active(port) &&
- (ip_kotype(port) == IKOT_TASK ||
- ip_kotype(port) == IKOT_TASK_NAME)) {
- task = (task_name_t) ip_get_kobject(port);
- assert(task != TASK_NAME_NULL);
-
- task_reference_internal(task);
- }
-
- ip_unlock(port);
- }
-
- return task;
-}
-static task_inspect_t
-convert_port_to_task_inspect_locked(
+static task_name_t
+convert_port_to_task_name_locked(
ipc_port_t port)
{
- task_inspect_t task = TASK_INSPECT_NULL;
+ task_name_t task = TASK_NAME_NULL;
ip_lock_held(port);
require_ip_active(port);
- if (ip_kotype(port) == IKOT_TASK) {
- task = (task_inspect_t) ip_get_kobject(port);
- assert(task != TASK_INSPECT_NULL);
+ if (ip_kotype(port) == IKOT_TASK_CONTROL ||
+ ip_kotype(port) == IKOT_TASK_READ ||
+ ip_kotype(port) == IKOT_TASK_INSPECT ||
+ ip_kotype(port) == IKOT_TASK_NAME) {
+ task = (task_name_t) ip_get_kobject(port);
+ assert(task != TASK_NAME_NULL);
task_reference_internal(task);
}
return task;
}
-/*
- * Routine: convert_port_to_task_inspect
- * Purpose:
- * Convert from a port to a task inspection right
- * Doesn't consume the port ref; produces a task ref,
- * which may be null.
- * Conditions:
- * Nothing locked.
- */
-task_inspect_t
-convert_port_to_task_inspect(
+task_name_t
+convert_port_to_task_name(
ipc_port_t port)
{
- task_inspect_t task = TASK_INSPECT_NULL;
+ task_name_t task = TASK_NULL;
if (IP_VALID(port)) {
ip_lock(port);
if (ip_active(port)) {
- task = convert_port_to_task_inspect_locked(port);
+ task = convert_port_to_task_name_locked(port);
}
ip_unlock(port);
}
}
/*
- * Routine: convert_port_to_task_suspension_token
+ * Routine: convert_port_to_task_policy
* Purpose:
- * Convert from a port to a task suspension token.
- * Doesn't consume the port ref; produces a suspension token ref,
+ * Convert from a port to a task.
+ * Doesn't consume the port ref; produces a task ref,
* which may be null.
+ * If the port is being used with task_port_set(), any task port
+ * type other than TASK_CONTROL requires an entitlement. If the
+ * port is being used with task_port_get(), TASK_NAME requires an
+ * entitlement.
* Conditions:
* Nothing locked.
*/
-task_suspension_token_t
-convert_port_to_task_suspension_token(
- ipc_port_t port)
+static task_t
+convert_port_to_task_policy(ipc_port_t port, boolean_t set)
{
- task_suspension_token_t task = TASK_NULL;
+ task_t task = TASK_NULL;
+ task_t ctask = current_task();
- if (IP_VALID(port)) {
- ip_lock(port);
+ if (!IP_VALID(port)) {
+ return TASK_NULL;
+ }
- if (ip_active(port) &&
- ip_kotype(port) == IKOT_TASK_RESUME) {
- task = (task_suspension_token_t) ip_get_kobject(port);
- assert(task != TASK_NULL);
+ task = set ?
+ convert_port_to_task(port) :
+ convert_port_to_task_inspect(port);
- task_reference_internal(task);
- }
+ if (task == TASK_NULL &&
+ IOTaskHasEntitlement(ctask, "com.apple.private.task_policy")) {
+ task = convert_port_to_task_name(port);
+ }
- ip_unlock(port);
+ if (task_conversion_eval(ctask, task) != KERN_SUCCESS) {
+ task_deallocate(task);
+ return TASK_NULL;
}
return task;
}
-/*
- * Routine: convert_port_to_space
- * Purpose:
- * Convert from a port to a space.
- * Doesn't consume the port ref; produces a space ref,
- * which may be null.
- * Conditions:
- * Nothing locked.
- */
-ipc_space_t
-convert_port_to_space(
- ipc_port_t port)
+task_policy_set_t
+convert_port_to_task_policy_set(ipc_port_t port)
{
- ipc_space_t space;
- task_t task;
+ return convert_port_to_task_policy(port, true);
+}
- task = convert_port_to_locked_task(port);
+task_policy_get_t
+convert_port_to_task_policy_get(ipc_port_t port)
+{
+ return convert_port_to_task_policy(port, false);
+}
- if (task == TASK_NULL) {
- return IPC_SPACE_NULL;
+static task_inspect_t
+convert_port_to_task_inspect_locked(
+ ipc_port_t port)
+{
+ task_inspect_t task = TASK_INSPECT_NULL;
+
+ ip_lock_held(port);
+ require_ip_active(port);
+
+ if (ip_kotype(port) == IKOT_TASK_CONTROL ||
+ ip_kotype(port) == IKOT_TASK_READ ||
+ ip_kotype(port) == IKOT_TASK_INSPECT) {
+ task = (task_inspect_t) ip_get_kobject(port);
+ assert(task != TASK_INSPECT_NULL);
+
+ task_reference_internal(task);
}
- if (!task->active) {
- task_unlock(task);
- return IPC_SPACE_NULL;
+ return task;
+}
+
+static task_read_t
+convert_port_to_task_read_locked(
+ ipc_port_t port,
+ boolean_t eval)
+{
+ task_read_t task = TASK_READ_NULL;
+
+ ip_lock_held(port);
+ require_ip_active(port);
+
+ if (ip_kotype(port) == IKOT_TASK_CONTROL ||
+ ip_kotype(port) == IKOT_TASK_READ) {
+ task_t ct = current_task();
+ task = (task_read_t)ipc_kobject_get(port);
+
+ assert(task != TASK_READ_NULL);
+
+ if (eval && task_conversion_eval(ct, task)) {
+ return TASK_READ_NULL;
+ }
+
+ task_reference_internal(task);
}
- space = task->itk_space;
- is_reference(space);
- task_unlock(task);
+ return task;
+}
+
+/*
+ * Routine: convert_port_to_task_check_type
+ * Purpose:
+ * Convert from a port to a task based on port's type.
+ * Doesn't consume the port ref; produces a task ref,
+ * which may be null.
+ * Arguments:
+ * port: The port that we do conversion on
+ * kotype: Returns the IKOT_TYPE of the port, if translation succeeded
+ * at_most: The lowest capability flavor allowed. In mach_task_flavor_t,
+ * the higher the flavor number, the lesser the capability, hence the name.
+ * eval_check: Whether to run task_conversion_eval check during the conversion.
+ * For backward compatibility, some interfaces does not run conversion
+ * eval on IKOT_TASK_CONTROL.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * task_t and port's type, if translation succeeded;
+ * TASK_NULL and IKOT_NONE, if translation failed
+ */
+task_t
+convert_port_to_task_check_type(
+ ipc_port_t port,
+ ipc_kobject_type_t *kotype,
+ mach_task_flavor_t at_most,
+ boolean_t eval_check)
+{
+ task_t task = TASK_NULL;
+ ipc_kobject_type_t type = IKOT_NONE;
+
+ if (!IP_VALID(port) || !ip_active(port)) {
+ goto out;
+ }
+
+ switch (ip_kotype(port)) {
+ case IKOT_TASK_CONTROL:
+ task = eval_check ? convert_port_to_task(port) : convert_port_to_task_no_eval(port);
+ if (task != TASK_NULL) {
+ type = IKOT_TASK_CONTROL;
+ }
+ break;
+ case IKOT_TASK_READ:
+ if (at_most >= TASK_FLAVOR_READ) {
+ task = eval_check ? convert_port_to_task_read(port) : convert_port_to_task_read_no_eval(port);
+ if (task != TASK_READ_NULL) {
+ type = IKOT_TASK_READ;
+ }
+ }
+ break;
+ case IKOT_TASK_INSPECT:
+ if (at_most >= TASK_FLAVOR_INSPECT) {
+ task = convert_port_to_task_inspect(port);
+ if (task != TASK_INSPECT_NULL) {
+ type = IKOT_TASK_INSPECT;
+ }
+ }
+ break;
+ case IKOT_TASK_NAME:
+ if (at_most >= TASK_FLAVOR_NAME) {
+ task = convert_port_to_task_name(port);
+ if (task != TASK_NAME_NULL) {
+ type = IKOT_TASK_NAME;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (kotype) {
+ *kotype = type;
+ }
+ return task;
+}
+
+/*
+ * Routine: convert_port_to_thread_check_type
+ * Purpose:
+ * Convert from a port to a thread based on port's type.
+ * Doesn't consume the port ref; produces a thread ref,
+ * which may be null.
+ * This conversion routine is _ONLY_ supposed to be used
+ * by thread_get_special_port.
+ * Arguments:
+ * port: The port that we do conversion on
+ * kotype: Returns the IKOT_TYPE of the port, if translation succeeded
+ * at_most: The lowest capability flavor allowed. In mach_thread_flavor_t,
+ * the higher the flavor number, the lesser the capability, hence the name.
+ * eval_check: Whether to run task_conversion_eval check during the conversion.
+ * For backward compatibility, some interfaces do not run
+ * conversion eval on IKOT_THREAD_CONTROL.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * thread_t and port's type, if translation succeeded;
+ * THREAD_NULL and IKOT_NONE, if translation failed
+ */
+thread_t
+convert_port_to_thread_check_type(
+ ipc_port_t port,
+ ipc_kobject_type_t *kotype,
+ mach_thread_flavor_t at_most,
+ boolean_t eval_check)
+{
+ thread_t thread = THREAD_NULL;
+ ipc_kobject_type_t type = IKOT_NONE;
+
+ if (!IP_VALID(port) || !ip_active(port)) {
+ goto out;
+ }
+
+ switch (ip_kotype(port)) {
+ case IKOT_THREAD_CONTROL:
+ thread = eval_check ? convert_port_to_thread(port) : convert_port_to_thread_no_eval(port);
+ if (thread != THREAD_NULL) {
+ type = IKOT_THREAD_CONTROL;
+ }
+ break;
+ case IKOT_THREAD_READ:
+ if (at_most >= THREAD_FLAVOR_READ) {
+ thread = eval_check ? convert_port_to_thread_read(port) : convert_port_to_thread_read_no_eval(port);
+ if (thread != THREAD_READ_NULL) {
+ type = IKOT_THREAD_READ;
+ }
+ }
+ break;
+ case IKOT_THREAD_INSPECT:
+ if (at_most >= THREAD_FLAVOR_INSPECT) {
+ thread = convert_port_to_thread_inspect(port);
+ if (thread != THREAD_INSPECT_NULL) {
+ type = IKOT_THREAD_INSPECT;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (kotype) {
+ *kotype = type;
+ }
+ return thread;
+}
+
+/*
+ * Routine: convert_port_to_space_check_type
+ * Purpose:
+ * Convert from a port to a space based on port's type.
+ * Doesn't consume the port ref; produces a space ref,
+ * which may be null.
+ * Arguments:
+ * port: The port that we do conversion on
+ * kotype: Returns the IKOT_TYPE of the port, if translation succeeded
+ * at_most: The lowest capability flavor allowed. In mach_task_flavor_t,
+ * the higher the flavor number, the lesser the capability, hence the name.
+ * eval_check: Whether to run task_conversion_eval check during the conversion.
+ * For backward compatibility, some interfaces do not run
+ * conversion eval on IKOT_TASK_CONTROL.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * ipc_space_t and port's type, if translation succeeded;
+ * IPC_SPACE_NULL and IKOT_NONE, if translation failed
+ */
+ipc_space_t
+convert_port_to_space_check_type(
+ ipc_port_t port,
+ ipc_kobject_type_t *kotype,
+ mach_task_flavor_t at_most,
+ boolean_t eval_check)
+{
+ ipc_space_t space = IPC_SPACE_NULL;
+ ipc_kobject_type_t type = IKOT_NONE;
+
+ if (!IP_VALID(port) || !ip_active(port)) {
+ goto out;
+ }
+
+ switch (ip_kotype(port)) {
+ case IKOT_TASK_CONTROL:
+ space = eval_check ? convert_port_to_space(port) : convert_port_to_space_no_eval(port);
+ if (space != IPC_SPACE_NULL) {
+ type = IKOT_TASK_CONTROL;
+ }
+ break;
+ case IKOT_TASK_READ:
+ if (at_most >= TASK_FLAVOR_READ) {
+ space = eval_check ? convert_port_to_space_read(port) : convert_port_to_space_read_no_eval(port);
+ if (space != IPC_SPACE_READ_NULL) {
+ type = IKOT_TASK_READ;
+ }
+ }
+ break;
+ case IKOT_TASK_INSPECT:
+ if (at_most >= TASK_FLAVOR_INSPECT) {
+ space = convert_port_to_space_inspect(port);
+ if (space != IPC_SPACE_INSPECT_NULL) {
+ type = IKOT_TASK_INSPECT;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+out:
+ if (kotype) {
+ *kotype = type;
+ }
return space;
}
/*
- * Routine: convert_port_to_space_inspect
+ * Routine: convert_port_to_task_inspect
* Purpose:
- * Convert from a port to a space inspect right.
- * Doesn't consume the port ref; produces a space inspect ref,
+ * Convert from a port to a task inspection right
+ * Doesn't consume the port ref; produces a task ref,
* which may be null.
* Conditions:
* Nothing locked.
*/
-ipc_space_inspect_t
-convert_port_to_space_inspect(
- ipc_port_t port)
+task_inspect_t
+convert_port_to_task_inspect(
+ ipc_port_t port)
+{
+ task_inspect_t task = TASK_INSPECT_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port)) {
+ task = convert_port_to_task_inspect_locked(port);
+ }
+ ip_unlock(port);
+ }
+
+ return task;
+}
+
+/*
+ * Routine: convert_port_to_task_read
+ * Purpose:
+ * Convert from a port to a task read right
+ * Doesn't consume the port ref; produces a task ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+task_read_t
+convert_port_to_task_read(
+ ipc_port_t port)
{
- ipc_space_inspect_t space;
- task_inspect_t task;
+ task_read_t task = TASK_READ_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port)) {
+ task = convert_port_to_task_read_locked(port, TRUE);
+ }
+ ip_unlock(port);
+ }
- task = convert_port_to_locked_task_inspect(port);
+ return task;
+}
- if (task == TASK_INSPECT_NULL) {
- return IPC_SPACE_INSPECT_NULL;
+static task_read_t
+convert_port_to_task_read_no_eval(
+ ipc_port_t port)
+{
+ task_read_t task = TASK_READ_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port)) {
+ task = convert_port_to_task_read_locked(port, FALSE);
+ }
+ ip_unlock(port);
+ }
+
+ return task;
+}
+
+/*
+ * Routine: convert_port_to_task_suspension_token
+ * Purpose:
+ * Convert from a port to a task suspension token.
+ * Doesn't consume the port ref; produces a suspension token ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+task_suspension_token_t
+convert_port_to_task_suspension_token(
+ ipc_port_t port)
+{
+ task_suspension_token_t task = TASK_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+
+ if (ip_active(port) &&
+ ip_kotype(port) == IKOT_TASK_RESUME) {
+ task = (task_suspension_token_t) ip_get_kobject(port);
+ assert(task != TASK_NULL);
+
+ task_reference_internal(task);
+ }
+
+ ip_unlock(port);
+ }
+
+ return task;
+}
+
+/*
+ * Routine: convert_port_to_space_with_flavor
+ * Purpose:
+ * Convert from a port to a space.
+ * Doesn't consume the port ref; produces a space ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+static ipc_space_t
+convert_port_to_space_with_flavor(
+ ipc_port_t port,
+ mach_task_flavor_t flavor,
+ boolean_t eval)
+{
+ ipc_space_t space;
+ task_t task;
+
+ switch (flavor) {
+ case TASK_FLAVOR_CONTROL:
+ task = convert_port_to_locked_task(port, eval);
+ break;
+ case TASK_FLAVOR_READ:
+ task = convert_port_to_locked_task_read(port, eval);
+ break;
+ case TASK_FLAVOR_INSPECT:
+ task = convert_port_to_locked_task_inspect(port);
+ break;
+ default:
+ task = TASK_NULL;
+ break;
+ }
+
+ if (task == TASK_NULL) {
+ return IPC_SPACE_NULL;
}
if (!task->active) {
task_unlock(task);
- return IPC_SPACE_INSPECT_NULL;
+ return IPC_SPACE_NULL;
}
- space = (ipc_space_inspect_t)task->itk_space;
- is_reference((ipc_space_t)space);
- task_unlock((task_t)task);
+ space = task->itk_space;
+ is_reference(space);
+ task_unlock(task);
return space;
}
+ipc_space_t
+convert_port_to_space(
+ ipc_port_t port)
+{
+ return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, TRUE);
+}
+
+static ipc_space_t
+convert_port_to_space_no_eval(
+ ipc_port_t port)
+{
+ return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL, FALSE);
+}
+
+ipc_space_read_t
+convert_port_to_space_read(
+ ipc_port_t port)
+{
+ return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ, TRUE);
+}
+
+static ipc_space_read_t
+convert_port_to_space_read_no_eval(
+ ipc_port_t port)
+{
+ return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ, FALSE);
+}
+
+ipc_space_inspect_t
+convert_port_to_space_inspect(
+ ipc_port_t port)
+{
+ return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT, TRUE);
+}
+
/*
- * Routine: convert_port_to_map
+ * Routine: convert_port_to_map_with_flavor
* Purpose:
* Convert from a port to a map.
* Doesn't consume the port ref; produces a map ref,
* Nothing locked.
*/
-vm_map_t
-convert_port_to_map(
- ipc_port_t port)
+static vm_map_t
+convert_port_to_map_with_flavor(
+ ipc_port_t port,
+ mach_task_flavor_t flavor)
{
task_t task;
vm_map_t map;
- task = convert_port_to_locked_task(port);
+ switch (flavor) {
+ case TASK_FLAVOR_CONTROL:
+ task = convert_port_to_locked_task(port, TRUE); /* always eval */
+ break;
+ case TASK_FLAVOR_READ:
+ task = convert_port_to_locked_task_read(port, TRUE); /* always eval */
+ break;
+ case TASK_FLAVOR_INSPECT:
+ task = convert_port_to_locked_task_inspect(port); /* always no eval */
+ break;
+ default:
+ task = TASK_NULL;
+ break;
+ }
if (task == TASK_NULL) {
return VM_MAP_NULL;
}
map = task->map;
- vm_map_reference_swap(map);
+ if (map->pmap == kernel_pmap) {
+ if (flavor == TASK_FLAVOR_CONTROL) {
+ panic("userspace has control access to a "
+ "kernel map %p through task %p", map, task);
+ }
+ if (task != kernel_task) {
+ panic("userspace has access to a "
+ "kernel map %p through task %p", map, task);
+ }
+ } else {
+ pmap_require(map->pmap);
+ }
+
+ vm_map_reference(map);
task_unlock(task);
return map;
}
+vm_map_read_t
+convert_port_to_map(
+ ipc_port_t port)
+{
+ return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL);
+}
+
+vm_map_read_t
+convert_port_to_map_read(
+ ipc_port_t port)
+{
+ return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ);
+}
+
+vm_map_inspect_t
+convert_port_to_map_inspect(
+ ipc_port_t port)
+{
+ return convert_port_to_map_with_flavor(port, TASK_FLAVOR_INSPECT);
+}
+
/*
* Routine: convert_port_to_thread
static thread_t
convert_port_to_thread_locked(
ipc_port_t port,
- port_to_thread_options_t options)
+ port_to_thread_options_t options,
+ boolean_t eval)
{
thread_t thread = THREAD_NULL;
ip_lock_held(port);
require_ip_active(port);
- if (ip_kotype(port) == IKOT_THREAD) {
+ if (ip_kotype(port) == IKOT_THREAD_CONTROL) {
thread = (thread_t) ip_get_kobject(port);
assert(thread != THREAD_NULL);
}
} else {
/* Use task conversion rules for thread control conversions */
- if (task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
+ if (eval && task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
return THREAD_NULL;
}
}
if (IP_VALID(port)) {
ip_lock(port);
if (ip_active(port)) {
- thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE);
+ thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, TRUE);
+ }
+ ip_unlock(port);
+ }
+
+ return thread;
+}
+
+static thread_t
+convert_port_to_thread_no_eval(
+ ipc_port_t port)
+{
+ thread_t thread = THREAD_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port)) {
+ thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE, FALSE);
}
ip_unlock(port);
}
/*
* Routine: convert_port_to_thread_inspect
* Purpose:
- * Convert from a port to a thread inspection right
+ * Convert from a port to a thread inspect right
* Doesn't consume the port ref; produces a thread ref,
* which may be null.
* Conditions:
* Nothing locked.
*/
+static thread_inspect_t
+convert_port_to_thread_inspect_locked(
+ ipc_port_t port)
+{
+ thread_inspect_t thread = THREAD_INSPECT_NULL;
+
+ ip_lock_held(port);
+ require_ip_active(port);
+
+ if (ip_kotype(port) == IKOT_THREAD_CONTROL ||
+ ip_kotype(port) == IKOT_THREAD_READ ||
+ ip_kotype(port) == IKOT_THREAD_INSPECT) {
+ thread = (thread_inspect_t)ipc_kobject_get(port);
+ assert(thread != THREAD_INSPECT_NULL);
+ thread_reference_internal((thread_t)thread);
+ }
+
+ return thread;
+}
+
thread_inspect_t
convert_port_to_thread_inspect(
ipc_port_t port)
if (IP_VALID(port)) {
ip_lock(port);
+ if (ip_active(port)) {
+ thread = convert_port_to_thread_inspect_locked(port);
+ }
+ ip_unlock(port);
+ }
- if (ip_active(port) &&
- ip_kotype(port) == IKOT_THREAD) {
- thread = (thread_inspect_t) ip_get_kobject(port);
- assert(thread != THREAD_INSPECT_NULL);
- thread_reference_internal((thread_t)thread);
+ return thread;
+}
+
+/*
+ * Routine: convert_port_to_thread_read
+ * Purpose:
+ * Convert from a port to a thread read right
+ * Doesn't consume the port ref; produces a thread ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+static thread_read_t
+convert_port_to_thread_read_locked(
+ ipc_port_t port,
+ boolean_t eval)
+{
+ thread_read_t thread = THREAD_READ_NULL;
+
+ ip_lock_held(port);
+ require_ip_active(port);
+
+ if (ip_kotype(port) == IKOT_THREAD_CONTROL ||
+ ip_kotype(port) == IKOT_THREAD_READ) {
+ thread = (thread_read_t) ip_get_kobject(port);
+ assert(thread != THREAD_READ_NULL);
+
+ /* Use task conversion rules for thread control conversions */
+ if (eval && task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
+ return THREAD_READ_NULL;
+ }
+
+ thread_reference_internal((thread_t)thread);
+ }
+
+ return thread;
+}
+
+thread_read_t
+convert_port_to_thread_read(
+ ipc_port_t port)
+{
+ thread_read_t thread = THREAD_READ_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port)) {
+ thread = convert_port_to_thread_read_locked(port, TRUE);
}
ip_unlock(port);
}
return thread;
}
+static thread_read_t
+convert_port_to_thread_read_no_eval(
+ ipc_port_t port)
+{
+ thread_read_t thread = THREAD_READ_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+ if (ip_active(port)) {
+ thread = convert_port_to_thread_read_locked(port, FALSE);
+ }
+ ip_unlock(port);
+ }
+
+ return thread;
+}
+
+
/*
- * Routine: convert_thread_inspect_to_port
+ * Routine: convert_thread_to_port_with_flavor
* Purpose:
- * Convert from a thread inspect reference to a port.
- * Consumes a thread ref;
- * As we never export thread inspect ports, always
- * creates a NULL port.
+ * Convert from a thread to a port of given flavor.
+ * Consumes a thread ref; produces a naked send right
+ * which may be invalid.
* Conditions:
* Nothing locked.
*/
+static ipc_port_t
+convert_thread_to_port_with_flavor(
+ thread_t thread,
+ mach_thread_flavor_t flavor)
+{
+ ipc_port_t port = IP_NULL;
+
+ thread_mtx_lock(thread);
+
+ if (!thread->ipc_active) {
+ goto exit;
+ }
+
+ if (flavor == THREAD_FLAVOR_CONTROL) {
+ port = ipc_port_make_send(thread->ith_thread_ports[flavor]);
+ } else {
+ ipc_kobject_type_t kotype = (flavor == THREAD_FLAVOR_READ) ? IKOT_THREAD_READ : IKOT_THREAD_INSPECT;
+ /*
+ * Claim a send right on the thread read/inspect port, and request a no-senders
+ * notification on that port (if none outstanding). A thread reference is not
+ * donated here even though the ports are created lazily because it doesn't own the
+ * kobject that it points to. Threads manage their lifetime explicitly and
+ * have to synchronize with each other, between the task/thread terminating and the
+ * send-once notification firing, and this is done under the thread mutex
+ * rather than with atomics.
+ */
+ (void)ipc_kobject_make_send_lazy_alloc_port(&thread->ith_thread_ports[flavor], (ipc_kobject_t)thread,
+ kotype, IPC_KOBJECT_ALLOC_IMMOVABLE_SEND, false, 0);
+ port = thread->ith_thread_ports[flavor];
+ }
+
+exit:
+ thread_mtx_unlock(thread);
+ thread_deallocate(thread);
+ return port;
+}
+
+ipc_port_t
+convert_thread_to_port(
+ thread_t thread)
+{
+ return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_CONTROL);
+}
+
+ipc_port_t
+convert_thread_read_to_port(thread_read_t thread)
+{
+ return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_READ);
+}
ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)
{
- thread_deallocate(thread);
- return IP_NULL;
+ return convert_thread_to_port_with_flavor(thread, THREAD_FLAVOR_INSPECT);
}
/*
* Routine: port_name_to_thread
* Purpose:
- * Convert from a port name to an thread reference
+ * Convert from a port name to a thread reference
* A name of MACH_PORT_NULL is valid for the null thread.
* Conditions:
* Nothing locked.
if (MACH_PORT_VALID(name)) {
kr = ipc_port_translate_send(current_space(), name, &kport);
if (kr == KERN_SUCCESS) {
- thread = convert_port_to_thread_locked(kport, options);
+ thread = convert_port_to_thread_locked(kport, options, TRUE);
ip_unlock(kport);
}
}
return thread;
}
+/*
+ * Routine: port_name_to_task
+ * Purpose:
+ * Convert from a port name to a task reference
+ * A name of MACH_PORT_NULL is valid for the null task.
+ * Conditions:
+ * Nothing locked.
+ */
task_t
port_name_to_task(
mach_port_name_t name)
if (MACH_PORT_VALID(name)) {
kr = ipc_port_translate_send(current_space(), name, &kport);
if (kr == KERN_SUCCESS) {
- task = convert_port_to_task_locked(kport, NULL);
+ task = convert_port_to_task_locked(kport, NULL, TRUE);
ip_unlock(kport);
}
}
return task;
}
-task_inspect_t
-port_name_to_task_inspect(
+/*
+ * Routine: port_name_to_task_read
+ * Purpose:
+ * Convert from a port name to a task reference
+ * A name of MACH_PORT_NULL is valid for the null task.
+ * Conditions:
+ * Nothing locked.
+ */
+task_read_t
+port_name_to_task_read(
+ mach_port_name_t name)
+{
+ ipc_port_t kport;
+ kern_return_t kr;
+ task_read_t tr = TASK_READ_NULL;
+
+ if (MACH_PORT_VALID(name)) {
+ kr = ipc_port_translate_send(current_space(), name, &kport);
+ if (kr == KERN_SUCCESS) {
+ tr = convert_port_to_task_read_locked(kport, TRUE);
+ ip_unlock(kport);
+ }
+ }
+ return tr;
+}
+
+/*
+ * Routine: port_name_to_task_read_no_eval
+ * Purpose:
+ * Convert from a port name to a task reference
+ * A name of MACH_PORT_NULL is valid for the null task.
+ * Skips task_conversion_eval() during conversion.
+ * Conditions:
+ * Nothing locked.
+ */
+task_read_t
+port_name_to_task_read_no_eval(
+ mach_port_name_t name)
+{
+ ipc_port_t kport;
+ kern_return_t kr;
+ task_read_t tr = TASK_READ_NULL;
+
+ if (MACH_PORT_VALID(name)) {
+ kr = ipc_port_translate_send(current_space(), name, &kport);
+ if (kr == KERN_SUCCESS) {
+ tr = convert_port_to_task_read_locked(kport, FALSE);
+ ip_unlock(kport);
+ }
+ }
+ return tr;
+}
+
+/*
+ * Routine: port_name_to_task_name
+ * Purpose:
+ * Convert from a port name to a task reference
+ * A name of MACH_PORT_NULL is valid for the null task.
+ * Conditions:
+ * Nothing locked.
+ */
+task_name_t
+port_name_to_task_name(
mach_port_name_t name)
{
ipc_port_t kport;
kern_return_t kr;
- task_inspect_t ti = TASK_INSPECT_NULL;
+ task_name_t tn = TASK_NAME_NULL;
if (MACH_PORT_VALID(name)) {
kr = ipc_port_translate_send(current_space(), name, &kport);
if (kr == KERN_SUCCESS) {
- ti = convert_port_to_task_inspect_locked(kport);
+ tn = convert_port_to_task_name_locked(kport);
ip_unlock(kport);
}
}
- return ti;
+ return tn;
}
/*
return host;
}
-/*
- * Routine: convert_task_to_port
- * Purpose:
- * Convert from a task to a port.
- * Consumes a task ref; produces a naked send right
- * which may be invalid.
- * Conditions:
- * Nothing locked.
- */
+/*
+ * Routine: convert_task_to_port_with_flavor
+ * Purpose:
+ * Convert from a task to a port of given flavor.
+ * Consumes a task ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+static ipc_port_t
+convert_task_to_port_with_flavor(
+ task_t task,
+ mach_task_flavor_t flavor)
+{
+ ipc_port_t port = IP_NULL;
+ ipc_kobject_type_t kotype = IKOT_NONE;
+
+ itk_lock(task);
+
+ if (!task->ipc_active) {
+ goto exit;
+ }
+
+ switch (flavor) {
+ case TASK_FLAVOR_CONTROL:
+ case TASK_FLAVOR_NAME:
+ port = ipc_port_make_send(task->itk_task_ports[flavor]);
+ break;
+ /*
+ * Claim a send right on the task read/inspect port, and request a no-senders
+ * notification on that port (if none outstanding). A task reference is
+ * deliberately not donated here because ipc_kobject_make_send_lazy_alloc_port
+ * is used only for convenience and these ports don't control the lifecycle of
+ * the task kobject. Instead, the task's itk_lock is used to synchronize the
+ * handling of the no-senders notification with the task termination.
+ */
+ case TASK_FLAVOR_READ:
+ case TASK_FLAVOR_INSPECT:
+ kotype = (flavor == TASK_FLAVOR_READ) ? IKOT_TASK_READ : IKOT_TASK_INSPECT;
+ (void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_task_ports[flavor],
+ (ipc_kobject_t)task, kotype, IPC_KOBJECT_ALLOC_IMMOVABLE_SEND, true,
+ OS_PTRAUTH_DISCRIMINATOR("task.itk_task_ports"));
+ port = task->itk_task_ports[flavor];
+
+ break;
+ }
+
+exit:
+ itk_unlock(task);
+ task_deallocate(task);
+ return port;
+}
+
+ipc_port_t
+convert_task_to_port(
+ task_t task)
+{
+ return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL);
+}
+
+ipc_port_t
+convert_task_read_to_port(
+ task_read_t task)
+{
+ return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ);
+}
+
+ipc_port_t
+convert_task_inspect_to_port(
+ task_inspect_t task)
+{
+ return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT);
+}
ipc_port_t
-convert_task_to_port(
+convert_task_name_to_port(
+ task_name_t task)
+{
+ return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME);
+}
+
+ipc_port_t
+convert_task_to_port_pinned(
task_t task)
{
- ipc_port_t port;
+ ipc_port_t port = IP_NULL;
itk_lock(task);
- if (task->itk_self != IP_NULL) {
+ if (task->ipc_active && task->itk_self != IP_NULL) {
port = ipc_port_make_send(task->itk_self);
- } else {
- port = IP_NULL;
}
itk_unlock(task);
-
task_deallocate(task);
return port;
}
-
-/*
- * Routine: convert_task_inspect_to_port
- * Purpose:
- * Convert from a task inspect reference to a port.
- * Consumes a task ref;
- * As we never export task inspect ports, always
- * creates a NULL port.
- * Conditions:
- * Nothing locked.
- */
-ipc_port_t
-convert_task_inspect_to_port(
- task_inspect_t task)
-{
- task_deallocate(task);
-
- return IP_NULL;
-}
-
/*
* Routine: convert_task_suspend_token_to_port
* Purpose:
return port;
}
-
-/*
- * Routine: convert_task_name_to_port
- * Purpose:
- * Convert from a task name ref to a port.
- * Consumes a task name ref; produces a naked send right
- * which may be invalid.
- * Conditions:
- * Nothing locked.
- */
-
-ipc_port_t
-convert_task_name_to_port(
- task_name_t task_name)
-{
- ipc_port_t port;
-
- itk_lock(task_name);
- if (task_name->itk_nself != IP_NULL) {
- port = ipc_port_make_send(task_name->itk_nself);
- } else {
- port = IP_NULL;
- }
- itk_unlock(task_name);
-
- task_name_deallocate(task_name);
- return port;
-}
-
-/*
- * Routine: convert_thread_to_port
- * Purpose:
- * Convert from a thread to a port.
- * Consumes an thread ref; produces a naked send right
- * which may be invalid.
- * Conditions:
- * Nothing locked.
- */
-
ipc_port_t
-convert_thread_to_port(
+convert_thread_to_port_pinned(
thread_t thread)
{
- ipc_port_t port;
+ ipc_port_t port = IP_NULL;
thread_mtx_lock(thread);
- if (thread->ith_self != IP_NULL) {
+ if (thread->ipc_active && thread->ith_self != IP_NULL) {
port = ipc_port_make_send(thread->ith_self);
- } else {
- port = IP_NULL;
}
thread_mtx_unlock(thread);
-
thread_deallocate(thread);
-
return port;
}
-
/*
* Routine: space_deallocate
* Purpose:
}
}
+/*
+ * Routine: space_read_deallocate
+ * Purpose:
+ * Deallocate a space read ref produced by convert_port_to_space_read.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+space_read_deallocate(
+ ipc_space_read_t space)
+{
+ if (space != IS_INSPECT_NULL) {
+ is_release((ipc_space_t)space);
+ }
+}
+
/*
* Routine: space_inspect_deallocate
* Purpose:
}
}
+
/*
* Routine: thread/task_set_exception_ports [kernel call]
* Purpose:
}
}
+
/*
* Check the validity of the thread_state_flavor by calling the
* VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
}
}
- if (IP_VALID(new_port)) { /* consume send right */
+ if (IP_VALID(new_port)) { /* consume send right */
ipc_port_release_send(new_port);
}
}
}
+
/*
* Check the validity of the thread_state_flavor by calling the
* VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
itk_lock(task);
- if (task->itk_self == IP_NULL) {
+ if (!task->ipc_active) {
itk_unlock(task);
-
return KERN_FAILURE;
}
}
}
- if (IP_VALID(new_port)) { /* consume send right */
+ if (IP_VALID(new_port)) { /* consume send right */
ipc_port_release_send(new_port);
}
}
}
+
if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
return KERN_INVALID_ARGUMENT;
}
if (!thread->active) {
thread_mtx_unlock(thread);
-
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
return KERN_FAILURE;
}
}
}
- if (IP_VALID(new_port)) { /* consume send right */
+ if (IP_VALID(new_port)) { /* consume send right */
ipc_port_release_send(new_port);
}
}
}
+
if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
return KERN_INVALID_ARGUMENT;
}
itk_lock(task);
- if (task->itk_self == IP_NULL) {
+ if (!task->ipc_active) {
itk_unlock(task);
-
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
return KERN_FAILURE;
}
}
}
- if (IP_VALID(new_port)) { /* consume send right */
+ if (IP_VALID(new_port)) { /* consume send right */
ipc_port_release_send(new_port);
}
* Illegal mask bit set.
* KERN_FAILURE The thread is dead.
*/
-
-kern_return_t
-thread_get_exception_ports(
- thread_t thread,
- exception_mask_t exception_mask,
+static kern_return_t
+thread_get_exception_ports_internal(
+ thread_t thread,
+ exception_mask_t exception_mask,
exception_mask_array_t masks,
mach_msg_type_number_t *CountCnt,
+ exception_port_info_array_t ports_info,
exception_port_array_t ports,
exception_behavior_array_t behaviors,
thread_state_flavor_array_t flavors)
{
- unsigned int i, j, count;
+ unsigned int count;
+ boolean_t info_only = (ports_info != NULL);
+ boolean_t dbg_ok = TRUE;
+ ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
if (thread == THREAD_NULL) {
return KERN_INVALID_ARGUMENT;
return KERN_INVALID_ARGUMENT;
}
+ if (!info_only && !ports) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+#if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
+ if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
+ dbg_ok = TRUE;
+ } else {
+ dbg_ok = FALSE;
+ }
+#endif
+
thread_mtx_lock(thread);
if (!thread->active) {
goto done;
}
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+ for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
+ ipc_port_t exc_port = thread->exc_actions[i].port;
+ exception_behavior_t exc_behavior = thread->exc_actions[i].behavior;
+ thread_state_flavor_t exc_flavor = thread->exc_actions[i].flavor;
+
for (j = 0; j < count; ++j) {
/*
* search for an identical entry, if found
* set corresponding mask for this exception.
*/
- if (thread->exc_actions[i].port == ports[j] &&
- thread->exc_actions[i].behavior == behaviors[j] &&
- thread->exc_actions[i].flavor == flavors[j]) {
+ if (exc_port == port_ptrs[j] &&
+ exc_behavior == behaviors[j] &&
+ exc_flavor == flavors[j]) {
masks[j] |= (1 << i);
break;
}
}
- if (j == count) {
+ if (j == count && count < *CountCnt) {
masks[j] = (1 << i);
- ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
- behaviors[j] = thread->exc_actions[i].behavior;
- flavors[j] = thread->exc_actions[i].flavor;
- ++count;
- if (count >= *CountCnt) {
- break;
+ port_ptrs[j] = exc_port;
+
+ if (info_only) {
+ if (!dbg_ok || !IP_VALID(exc_port)) {
+ /* avoid taking port lock if !dbg_ok */
+ ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
+ } else {
+ uintptr_t receiver;
+ (void)ipc_port_get_receiver_task(exc_port, &receiver);
+ ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
+ ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
+ }
+ } else {
+ ports[j] = ipc_port_copy_send(exc_port);
}
+ behaviors[j] = exc_behavior;
+ flavors[j] = exc_flavor;
+ ++count;
}
}
}
return KERN_SUCCESS;
}
+static kern_return_t
+thread_get_exception_ports(
+ thread_t thread,
+ exception_mask_t exception_mask,
+ exception_mask_array_t masks,
+ mach_msg_type_number_t *CountCnt,
+ exception_port_array_t ports,
+ exception_behavior_array_t behaviors,
+ thread_state_flavor_array_t flavors)
+{
+ return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
+ NULL, ports, behaviors, flavors);
+}
+
kern_return_t
-task_get_exception_ports(
- task_t task,
- exception_mask_t exception_mask,
+thread_get_exception_ports_info(
+ mach_port_t port,
+ exception_mask_t exception_mask,
exception_mask_array_t masks,
mach_msg_type_number_t *CountCnt,
+ exception_port_info_array_t ports_info,
+ exception_behavior_array_t behaviors,
+ thread_state_flavor_array_t flavors)
+{
+ kern_return_t kr;
+
+ thread_t thread = convert_port_to_thread_read_no_eval(port);
+
+ if (thread == THREAD_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
+ ports_info, NULL, behaviors, flavors);
+
+ thread_deallocate(thread);
+ return kr;
+}
+
+kern_return_t
+thread_get_exception_ports_from_user(
+ mach_port_t port,
+ exception_mask_t exception_mask,
+ exception_mask_array_t masks,
+ mach_msg_type_number_t *CountCnt,
exception_port_array_t ports,
exception_behavior_array_t behaviors,
thread_state_flavor_array_t flavors)
{
- unsigned int i, j, count;
+ kern_return_t kr;
+
+ thread_t thread = convert_port_to_thread_no_eval(port);
+
+ if (thread == THREAD_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
+
+ thread_deallocate(thread);
+ return kr;
+}
+
+static kern_return_t
+task_get_exception_ports_internal(
+ task_t task,
+ exception_mask_t exception_mask,
+ exception_mask_array_t masks,
+ mach_msg_type_number_t *CountCnt,
+ exception_port_info_array_t ports_info,
+ exception_port_array_t ports,
+ exception_behavior_array_t behaviors,
+ thread_state_flavor_array_t flavors)
+{
+ unsigned int count;
+ boolean_t info_only = (ports_info != NULL);
+ boolean_t dbg_ok = TRUE;
+ ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
if (task == TASK_NULL) {
return KERN_INVALID_ARGUMENT;
return KERN_INVALID_ARGUMENT;
}
+ if (!info_only && !ports) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+#if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
+ if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
+ dbg_ok = TRUE;
+ } else {
+ dbg_ok = FALSE;
+ }
+#endif
+
itk_lock(task);
- if (task->itk_self == IP_NULL) {
+ if (!task->ipc_active) {
itk_unlock(task);
-
return KERN_FAILURE;
}
count = 0;
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+ for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
+ ipc_port_t exc_port = task->exc_actions[i].port;
+ exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
+ thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
+
for (j = 0; j < count; ++j) {
/*
* search for an identical entry, if found
* set corresponding mask for this exception.
*/
- if (task->exc_actions[i].port == ports[j] &&
- task->exc_actions[i].behavior == behaviors[j] &&
- task->exc_actions[i].flavor == flavors[j]) {
+ if (exc_port == port_ptrs[j] &&
+ exc_behavior == behaviors[j] &&
+ exc_flavor == flavors[j]) {
masks[j] |= (1 << i);
break;
}
}
- if (j == count) {
+ if (j == count && count < *CountCnt) {
masks[j] = (1 << i);
- ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
- behaviors[j] = task->exc_actions[i].behavior;
- flavors[j] = task->exc_actions[i].flavor;
- ++count;
- if (count > *CountCnt) {
- break;
+ port_ptrs[j] = exc_port;
+
+ if (info_only) {
+ if (!dbg_ok || !IP_VALID(exc_port)) {
+ /* avoid taking port lock if !dbg_ok */
+ ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
+ } else {
+ uintptr_t receiver;
+ (void)ipc_port_get_receiver_task(exc_port, &receiver);
+ ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
+ ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
+ }
+ } else {
+ ports[j] = ipc_port_copy_send(exc_port);
}
+ behaviors[j] = exc_behavior;
+ flavors[j] = exc_flavor;
+ ++count;
}
}
}
return KERN_SUCCESS;
}
+
+static kern_return_t
+task_get_exception_ports(
+ task_t task,
+ exception_mask_t exception_mask,
+ exception_mask_array_t masks,
+ mach_msg_type_number_t *CountCnt,
+ exception_port_array_t ports,
+ exception_behavior_array_t behaviors,
+ thread_state_flavor_array_t flavors)
+{
+ return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
+ NULL, ports, behaviors, flavors);
+}
+
+kern_return_t
+task_get_exception_ports_info(
+ mach_port_t port,
+ exception_mask_t exception_mask,
+ exception_mask_array_t masks,
+ mach_msg_type_number_t *CountCnt,
+ exception_port_info_array_t ports_info,
+ exception_behavior_array_t behaviors,
+ thread_state_flavor_array_t flavors)
+{
+ kern_return_t kr;
+
+ task_t task = convert_port_to_task_read_no_eval(port);
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
+ ports_info, NULL, behaviors, flavors);
+
+ task_deallocate(task);
+ return kr;
+}
+
+kern_return_t
+task_get_exception_ports_from_user(
+ mach_port_t port,
+ exception_mask_t exception_mask,
+ exception_mask_array_t masks,
+ mach_msg_type_number_t *CountCnt,
+ exception_port_array_t ports,
+ exception_behavior_array_t behaviors,
+ thread_state_flavor_array_t flavors)
+{
+ kern_return_t kr;
+
+ task_t task = convert_port_to_task_no_eval(port);
+
+ if (task == TASK_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
+
+ task_deallocate(task);
+ return kr;
+}
+
+/*
+ * Routine: ipc_thread_port_unpin
+ * Purpose:
+ * Called on the thread port when the thread is
+ * terminating so that the last ref can be deallocated
+ * without a guard exception.
+ * Conditions:
+ * Thread mutex lock is held.
+ * check_bit should be set to true only when port is expected
+ * to have ip_pinned bit set.
+ */
+void
+ipc_thread_port_unpin(
+ ipc_port_t port,
+ __unused bool check_bit)
+{
+ if (port == IP_NULL) {
+ return;
+ }
+ ip_lock(port);
+ imq_lock(&port->ip_messages);
+#if DEVELOPMENT || DEBUG
+ if (pinned_control_port_enabled && check_bit) {
+ assert(ip_is_control(port)); /*remove once we get rid of boot-arg */
+ assert(port->ip_pinned == 1);
+ }
+#endif
+ port->ip_pinned = 0;
+ imq_unlock(&port->ip_messages);
+ ip_unlock(port);
+}