#include <security/mac_mach_internal.h>
+#if CONFIG_EMBEDDED && !SECURE_KERNEL
+extern int cs_relax_platform_task_ports;
+#endif
+
/* forward declarations */
task_t convert_port_to_locked_task(ipc_port_t port);
-
+task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port);
+static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
+static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
+kern_return_t task_conversion_eval(task_t caller, task_t victim);
/*
* Routine: ipc_task_init
task->itk_self = kport;
task->itk_nself = nport;
task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
- task->itk_sself = ipc_port_make_send(kport);
+ if (task_is_a_corpse_fork(task)) {
+ /*
+ * No sender's notification for corpse would not
+ * work with a naked send right in kernel.
+ */
+ task->itk_sself = IP_NULL;
+ } else {
+ task->itk_sself = ipc_port_make_send(kport);
+ }
+ task->itk_debug_control = IP_NULL;
task->itk_space = space;
-#if CONFIG_MACF_MACH
- if (parent)
- mac_task_label_associate(parent, task, &parent->maclabel,
- &task->maclabel, &kport->ip_label);
- else
- mac_task_label_associate_kernel(task, &task->maclabel, &kport->ip_label);
+#if CONFIG_MACF
+ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
+ }
#endif
-
+
if (parent == TASK_NULL) {
ipc_port_t port;
parent->exc_actions[i].behavior;
task->exc_actions[i].privileged =
parent->exc_actions[i].privileged;
+#if CONFIG_MACF
+ mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
+#endif
}/* for */
task->itk_host =
ipc_port_copy_send(parent->itk_host);
if (IP_VALID(task->exc_actions[i].port)) {
ipc_port_release_send(task->exc_actions[i].port);
}
+#if CONFIG_MACF
+ mac_exc_free_action_label(task->exc_actions + i);
+#endif
}
if (IP_VALID(task->itk_host))
if (IP_VALID(task->itk_task_access))
ipc_port_release_send(task->itk_task_access);
+ if (IP_VALID(task->itk_debug_control))
+ ipc_port_release_send(task->itk_debug_control);
+
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
if (IP_VALID(task->itk_registered[i]))
ipc_port_release_send(task->itk_registered[i]);
ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
int i;
+#if CONFIG_MACF
+ /* Fresh label to unset credentials in existing labels. */
+ struct label *unset_label = mac_exc_create_label();
+#endif
+
new_kport = ipc_port_alloc_kernel();
if (new_kport == IP_NULL)
panic("ipc_task_reset");
/* the task is already terminated (can this happen?) */
itk_unlock(task);
ipc_port_dealloc_kernel(new_kport);
+#if CONFIG_MACF
+ mac_exc_free_label(unset_label);
+#endif
return;
}
task->itk_self = new_kport;
old_sself = task->itk_sself;
task->itk_sself = ipc_port_make_send(new_kport);
- ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
+
+ /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
+ ip_lock(old_kport);
+ ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
+ task->exec_token += 1;
+ ip_unlock(old_kport);
+
ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ old_exc_actions[i] = IP_NULL;
+
+ if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
+ continue;
+ }
+
if (!task->exc_actions[i].privileged) {
+#if CONFIG_MACF
+ mac_exc_update_action_label(task->exc_actions + i, unset_label);
+#endif
old_exc_actions[i] = task->exc_actions[i].port;
task->exc_actions[i].port = IP_NULL;
- } else {
- old_exc_actions[i] = IP_NULL;
}
}/* for */
-
+
+ if (IP_VALID(task->itk_debug_control)) {
+ ipc_port_release_send(task->itk_debug_control);
+ }
+ task->itk_debug_control = IP_NULL;
+
itk_unlock(task);
+#if CONFIG_MACF
+ mac_exc_free_label(unset_label);
+#endif
+
/* release the naked send rights */
if (IP_VALID(old_sself))
thread->ith_self = kport;
thread->ith_sself = ipc_port_make_send(kport);
+ thread->ith_special_reply_port = NULL;
thread->exc_actions = NULL;
ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
+
+#if CONFIG_MACF
+ for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
+ mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
+ }
+#endif
}
void
thread_t thread)
{
if (thread->exc_actions != NULL) {
+#if CONFIG_MACF
+ for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
+ mac_exc_free_action_label(thread->exc_actions + i);
+ }
+#endif
+
kfree(thread->exc_actions,
sizeof(struct exception_action) * EXC_TYPES_COUNT);
thread->exc_actions = NULL;
assert(thread->ith_assertions == 0);
#endif
+ /* unbind the thread special reply port */
+ if (IP_VALID(thread->ith_special_reply_port)) {
+ ipc_port_unbind_special_reply_port(thread, TRUE);
+ }
+
assert(ipc_kmsg_queue_empty(&thread->ith_messages));
if (thread->ith_rpc_reply != IP_NULL)
boolean_t has_old_exc_actions = FALSE;
int i;
+#if CONFIG_MACF
+ struct label *new_label = mac_exc_create_label();
+#endif
+
new_kport = ipc_port_alloc_kernel();
if (new_kport == IP_NULL)
panic("ipc_task_reset");
old_kport = thread->ith_self;
- if (old_kport == IP_NULL) {
+ if (old_kport == IP_NULL && thread->inspection == FALSE) {
/* the is already terminated (can this happen?) */
thread_mtx_unlock(thread);
ipc_port_dealloc_kernel(new_kport);
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
return;
}
thread->ith_self = new_kport;
old_sself = thread->ith_sself;
thread->ith_sself = ipc_port_make_send(new_kport);
- ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
+ if (old_kport != IP_NULL) {
+ ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
+ }
ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
/*
if (thread->exc_actions[i].privileged) {
old_exc_actions[i] = IP_NULL;
} else {
+#if CONFIG_MACF
+ mac_exc_update_action_label(thread->exc_actions + i, new_label);
+#endif
old_exc_actions[i] = thread->exc_actions[i].port;
thread->exc_actions[i].port = IP_NULL;
}
thread_mtx_unlock(thread);
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
+
/* release the naked send rights */
if (IP_VALID(old_sself))
}
/* destroy the kernel port */
- ipc_port_dealloc_kernel(old_kport);
+ if (old_kport != IP_NULL) {
+ ipc_port_dealloc_kernel(old_kport);
+ }
+
+ /* unbind the thread special reply port */
+ if (IP_VALID(thread->ith_special_reply_port)) {
+ ipc_port_unbind_special_reply_port(thread, TRUE);
+ }
}
/*
ipc_port_t
retrieve_task_self_fast(
- register task_t task)
+ task_t task)
{
- register ipc_port_t port;
+ ipc_port_t port;
assert(task == current_task());
retrieve_thread_self_fast(
thread_t thread)
{
- register ipc_port_t port;
+ ipc_port_t port;
assert(thread == current_thread());
return name;
}
+/*
+ * Routine: thread_get_special_reply_port [mach trap]
+ * Purpose:
+ * Allocate a special reply port for the calling thread.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_PORT_NULL if there are any resource failures
+ * or other errors.
+ */
+
+mach_port_name_t
+thread_get_special_reply_port(
+ __unused struct thread_get_special_reply_port_args *args)
+{
+ ipc_port_t port;
+ mach_port_name_t name;
+ kern_return_t kr;
+ thread_t thread = current_thread();
+
+ /* unbind the thread special reply port */
+ if (IP_VALID(thread->ith_special_reply_port)) {
+ kr = ipc_port_unbind_special_reply_port(thread, TRUE);
+ if (kr != KERN_SUCCESS) {
+ return MACH_PORT_NULL;
+ }
+ }
+
+ kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
+ if (kr == KERN_SUCCESS) {
+ ipc_port_bind_special_reply_port_locked(port);
+ ip_unlock(port);
+ } else {
+ name = MACH_PORT_NULL;
+ }
+ return name;
+}
+
+/*
+ * Routine: ipc_port_bind_special_reply_port_locked
+ * Purpose:
+ * Bind the given port to current thread as a special reply port.
+ * Conditions:
+ * Port locked.
+ * Returns:
+ * None.
+ */
+
+static void
+ipc_port_bind_special_reply_port_locked(
+ ipc_port_t port)
+{
+ thread_t thread = current_thread();
+ assert(thread->ith_special_reply_port == NULL);
+
+ ip_reference(port);
+ thread->ith_special_reply_port = port;
+ port->ip_specialreply = 1;
+ port->ip_link_sync_qos = 1;
+}
+
+/*
+ * Routine: ipc_port_unbind_special_reply_port
+ * Purpose:
+ * Unbind the thread's special reply port.
+ * If the special port is linked to a port, adjust it's sync qos delta`.
+ * Condition:
+ * Nothing locked.
+ * Returns:
+ * None.
+ */
+static kern_return_t
+ipc_port_unbind_special_reply_port(
+ thread_t thread,
+ boolean_t unbind_active_port)
+{
+ ipc_port_t special_reply_port = thread->ith_special_reply_port;
+
+ ip_lock(special_reply_port);
+
+ /* Return error if port active and unbind_active_port set to FALSE */
+ if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
+ ip_unlock(special_reply_port);
+ return KERN_FAILURE;
+ }
+
+ thread->ith_special_reply_port = NULL;
+ ipc_port_unlink_special_reply_port_locked(special_reply_port, NULL,
+ IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY);
+ /* port unlocked */
+
+ ip_release(special_reply_port);
+ return KERN_SUCCESS;
+}
+
/*
* Routine: thread_get_special_port [kernel call]
* Purpose:
}
switch (which) {
- case TASK_KERNEL_PORT:
+ case TASK_KERNEL_PORT:
port = ipc_port_copy_send(task->itk_sself);
break;
- case TASK_NAME_PORT:
+ case TASK_NAME_PORT:
port = ipc_port_make_send(task->itk_nself);
break;
- case TASK_HOST_PORT:
+ case TASK_HOST_PORT:
port = ipc_port_copy_send(task->itk_host);
break;
- case TASK_BOOTSTRAP_PORT:
+ case TASK_BOOTSTRAP_PORT:
port = ipc_port_copy_send(task->itk_bootstrap);
break;
- case TASK_SEATBELT_PORT:
+ case TASK_SEATBELT_PORT:
port = ipc_port_copy_send(task->itk_seatbelt);
break;
- case TASK_ACCESS_PORT:
+ case TASK_ACCESS_PORT:
port = ipc_port_copy_send(task->itk_task_access);
break;
- default:
- itk_unlock(task);
+ case TASK_DEBUG_CONTROL_PORT:
+ port = ipc_port_copy_send(task->itk_debug_control);
+ break;
+
+ default:
+ itk_unlock(task);
return KERN_INVALID_ARGUMENT;
}
itk_unlock(task);
return KERN_INVALID_ARGUMENT;
switch (which) {
- case TASK_KERNEL_PORT:
- whichp = &task->itk_sself;
- break;
+ case TASK_KERNEL_PORT:
+ whichp = &task->itk_sself;
+ break;
- case TASK_HOST_PORT:
- whichp = &task->itk_host;
- break;
+ case TASK_HOST_PORT:
+ whichp = &task->itk_host;
+ break;
- case TASK_BOOTSTRAP_PORT:
- whichp = &task->itk_bootstrap;
- break;
+ case TASK_BOOTSTRAP_PORT:
+ whichp = &task->itk_bootstrap;
+ break;
- case TASK_SEATBELT_PORT:
- whichp = &task->itk_seatbelt;
- break;
+ case TASK_SEATBELT_PORT:
+ whichp = &task->itk_seatbelt;
+ break;
- case TASK_ACCESS_PORT:
- whichp = &task->itk_task_access;
- break;
-
- default:
- return KERN_INVALID_ARGUMENT;
+ case TASK_ACCESS_PORT:
+ whichp = &task->itk_task_access;
+ break;
+
+ case TASK_DEBUG_CONTROL_PORT:
+ whichp = &task->itk_debug_control;
+ break;
+
+ default:
+ return KERN_INVALID_ARGUMENT;
}/* switch */
itk_lock(task);
return KERN_NO_ACCESS;
}
-#if CONFIG_MACF_MACH
- if (mac_task_check_service(current_task(), task, "set_special_port")) {
- itk_unlock(task);
- return KERN_NO_ACCESS;
- }
-#endif
-
old = *whichp;
*whichp = port;
itk_unlock(task);
return KERN_SUCCESS;
}
+kern_return_t
+task_conversion_eval(task_t caller, task_t victim)
+{
+ /*
+ * Tasks are allowed to resolve their own task ports, and the kernel is
+ * allowed to resolve anyone's task port.
+ */
+ if (caller == kernel_task) {
+ return KERN_SUCCESS;
+ }
+
+ if (caller == victim) {
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Only the kernel can can resolve the kernel's task port. We've established
+ * by this point that the caller is not kernel_task.
+ */
+ if (victim == kernel_task) {
+ return KERN_INVALID_SECURITY;
+ }
+
+#if CONFIG_EMBEDDED
+ /*
+ * On embedded platforms, only a platform binary can resolve the task port
+ * of another platform binary.
+ */
+ if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
+#if SECURE_KERNEL
+ return KERN_INVALID_SECURITY;
+#else
+ if (cs_relax_platform_task_ports) {
+ return KERN_SUCCESS;
+ } else {
+ return KERN_INVALID_SECURITY;
+ }
+#endif /* SECURE_KERNEL */
+ }
+#endif /* CONFIG_EMBEDDED */
+
+ return KERN_SUCCESS;
+}
+
/*
* Routine: convert_port_to_locked_task
* Purpose:
task_t
convert_port_to_locked_task(ipc_port_t port)
{
- int try_failed_count = 0;
+ int try_failed_count = 0;
while (IP_VALID(port)) {
+ task_t ct = current_task();
task_t task;
ip_lock(port);
task = (task_t) port->ip_kobject;
assert(task != TASK_NULL);
+ if (task_conversion_eval(ct, task)) {
+ ip_unlock(port);
+ return TASK_NULL;
+ }
+
/*
* Normal lock ordering puts task_lock() before ip_lock().
* Attempt out-of-order locking here.
return TASK_NULL;
}
+/*
+ * Routine: convert_port_to_locked_task_inspect
+ * Purpose:
+ * Internal helper routine to convert from a port to a locked
+ * task inspect right. Used by internal routines that try to convert from a
+ * task inspect port to a reference on some task related object.
+ * Conditions:
+ * Nothing locked, blocking OK.
+ */
+task_inspect_t
+convert_port_to_locked_task_inspect(ipc_port_t port)
+{
+ int try_failed_count = 0;
+
+ while (IP_VALID(port)) {
+ task_inspect_t task;
+
+ ip_lock(port);
+ if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
+ ip_unlock(port);
+ return TASK_INSPECT_NULL;
+ }
+ task = (task_inspect_t)port->ip_kobject;
+ assert(task != TASK_INSPECT_NULL);
+ /*
+ * Normal lock ordering puts task_lock() before ip_lock().
+ * Attempt out-of-order locking here.
+ */
+ if (task_lock_try((task_t)task)) {
+ ip_unlock(port);
+ return task;
+ }
+ try_failed_count++;
+
+ ip_unlock(port);
+ mutex_pause(try_failed_count);
+ }
+ return TASK_INSPECT_NULL;
+}
+
+
/*
* Routine: convert_port_to_task
* Purpose:
task_t
convert_port_to_task(
ipc_port_t port)
+{
+ return convert_port_to_task_with_exec_token(port, NULL);
+}
+
+/*
+ * Routine: convert_port_to_task_with_exec_token
+ * Purpose:
+ * Convert from a port to a task and return
+ * the exec token stored in the task.
+ * Doesn't consume the port ref; produces a task ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+task_t
+convert_port_to_task_with_exec_token(
+ ipc_port_t port,
+ uint32_t *exec_token)
{
task_t task = TASK_NULL;
if ( ip_active(port) &&
ip_kotype(port) == IKOT_TASK ) {
+ task_t ct = current_task();
task = (task_t)port->ip_kobject;
assert(task != TASK_NULL);
+ if (task_conversion_eval(ct, task)) {
+ ip_unlock(port);
+ return TASK_NULL;
+ }
+
+ if (exec_token) {
+ *exec_token = task->exec_token;
+ }
task_reference_internal(task);
}
return (task);
}
+/*
+ * Routine: convert_port_to_task_inspect
+ * Purpose:
+ * Convert from a port to a task inspection right
+ * Doesn't consume the port ref; produces a task ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+task_inspect_t
+convert_port_to_task_inspect(
+ ipc_port_t port)
+{
+ task_inspect_t task = TASK_INSPECT_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+
+ if (ip_active(port) &&
+ ip_kotype(port) == IKOT_TASK) {
+ task = (task_inspect_t)port->ip_kobject;
+ assert(task != TASK_INSPECT_NULL);
+
+ task_reference_internal(task);
+ }
+
+ ip_unlock(port);
+ }
+
+ return (task);
+}
+
/*
* Routine: convert_port_to_task_suspension_token
* Purpose:
return (space);
}
+/*
+ * Routine: convert_port_to_space_inspect
+ * Purpose:
+ * Convert from a port to a space inspect right.
+ * Doesn't consume the port ref; produces a space inspect ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+ipc_space_inspect_t
+convert_port_to_space_inspect(
+ ipc_port_t port)
+{
+ ipc_space_inspect_t space;
+ task_inspect_t task;
+
+ task = convert_port_to_locked_task_inspect(port);
+
+ if (task == TASK_INSPECT_NULL)
+ return IPC_SPACE_INSPECT_NULL;
+
+ if (!task->active) {
+ task_unlock(task);
+ return IPC_SPACE_INSPECT_NULL;
+ }
+
+ space = (ipc_space_inspect_t)task->itk_space;
+ is_reference((ipc_space_t)space);
+ task_unlock((task_t)task);
+ return space;
+}
+
/*
* Routine: convert_port_to_map
* Purpose:
ip_kotype(port) == IKOT_THREAD ) {
thread = (thread_t)port->ip_kobject;
assert(thread != THREAD_NULL);
+ if (thread->task && thread->task == kernel_task &&
+ current_task() != kernel_task) {
+ ip_unlock(port);
+ return THREAD_NULL;
+ }
thread_reference_internal(thread);
}
return (thread);
}
+/*
+ * Routine: convert_port_to_thread_inspect
+ * Purpose:
+ * Convert from a port to a thread inspection right
+ * Doesn't consume the port ref; produces a thread ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+thread_inspect_t
+convert_port_to_thread_inspect(
+ ipc_port_t port)
+{
+ thread_inspect_t thread = THREAD_INSPECT_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+
+ if (ip_active(port) &&
+ ip_kotype(port) == IKOT_THREAD) {
+ thread = (thread_inspect_t)port->ip_kobject;
+ assert(thread != THREAD_INSPECT_NULL);
+ thread_reference_internal((thread_t)thread);
+ }
+ ip_unlock(port);
+ }
+
+ return thread;
+}
+
+/*
+ * Routine: convert_thread_inspect_to_port
+ * Purpose:
+ * Convert from a thread inspect reference to a port.
+ * Consumes a thread ref;
+ * As we never export thread inspect ports, always
+ * creates a NULL port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_thread_inspect_to_port(thread_inspect_t thread)
+{
+ thread_deallocate(thread);
+ return IP_NULL;
+}
+
+
/*
* Routine: port_name_to_thread
* Purpose:
* A name of MACH_PORT_NULL is valid for the null thread.
* Conditions:
* Nothing locked.
+ *
+ * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
+ * We could avoid extra lock/unlock and extra ref operations on the port.
*/
thread_t
port_name_to_thread(
return task;
}
+task_inspect_t
+port_name_to_task_inspect(
+ mach_port_name_t name)
+{
+ ipc_port_t kern_port;
+ kern_return_t kr;
+ task_inspect_t ti = TASK_INSPECT_NULL;
+
+ if (MACH_PORT_VALID(name)) {
+ kr = ipc_object_copyin(current_space(), name,
+ MACH_MSG_TYPE_COPY_SEND,
+ (ipc_object_t *)&kern_port);
+ if (kr != KERN_SUCCESS)
+ return TASK_NULL;
+
+ ti = convert_port_to_task_inspect(kern_port);
+
+ if (IP_VALID(kern_port))
+ ipc_port_release_send(kern_port);
+ }
+ return ti;
+}
+
+/*
+ * Routine: port_name_to_host
+ * Purpose:
+ * Convert from a port name to a host pointer.
+ * NOTE: This does _not_ return a +1 reference to the host_t
+ * Conditions:
+ * Nothing locked.
+ */
+host_t
+port_name_to_host(
+ mach_port_name_t name)
+{
+
+ host_t host = HOST_NULL;
+ kern_return_t kr;
+ ipc_port_t port;
+
+ if (MACH_PORT_VALID(name)) {
+ kr = ipc_port_translate_send(current_space(), name, &port);
+ if (kr == KERN_SUCCESS) {
+ host = convert_port_to_host(port);
+ ip_unlock(port);
+ }
+ }
+ return host;
+}
+
/*
* Routine: convert_task_to_port
* Purpose:
ipc_port_t port;
itk_lock(task);
+
if (task->itk_self != IP_NULL)
port = ipc_port_make_send(task->itk_self);
else
port = IP_NULL;
+
itk_unlock(task);
task_deallocate(task);
return port;
}
+/*
+ * Routine: convert_task_inspect_to_port
+ * Purpose:
+ * Convert from a task inspect reference to a port.
+ * Consumes a task ref;
+ * As we never export task inspect ports, always
+ * creates a NULL port.
+ * Conditions:
+ * Nothing locked.
+ */
+ipc_port_t
+convert_task_inspect_to_port(
+ task_inspect_t task)
+{
+ task_deallocate(task);
+
+ return IP_NULL;
+}
+
/*
* Routine: convert_task_suspend_token_to_port
* Purpose:
is_release(space);
}
+/*
+ * Routine: space_inspect_deallocate
+ * Purpose:
+ * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+space_inspect_deallocate(
+ ipc_space_inspect_t space)
+{
+ if (space != IS_INSPECT_NULL)
+ is_release((ipc_space_t)space);
+}
+
/*
* Routine: thread/task_set_exception_ports [kernel call]
* Purpose:
boolean_t privileged = current_task()->sec_token.val[0] == 0;
register int i;
+#if CONFIG_MACF
+ struct label *new_label;
+#endif
+
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
* VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
* osfmk/mach/ARCHITECTURE/thread_status.h
*/
- if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
return (KERN_INVALID_ARGUMENT);
+#if CONFIG_MACF
+ new_label = mac_exc_create_label_for_current_proc();
+#endif
+
thread_mtx_lock(thread);
if (!thread->active) {
ipc_thread_init_exc_actions(thread);
}
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
- if (exception_mask & (1 << i)) {
+ if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+ && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
+#endif
+ ) {
old_port[i] = thread->exc_actions[i].port;
thread->exc_actions[i].port = ipc_port_copy_send(new_port);
thread->exc_actions[i].behavior = new_behavior;
thread_mtx_unlock(thread);
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
+
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
boolean_t privileged = current_task()->sec_token.val[0] == 0;
register int i;
+#if CONFIG_MACF
+ struct label *new_label;
+#endif
+
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
}
}
+ /*
+ * Check the validity of the thread_state_flavor by calling the
+ * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
+ * osfmk/mach/ARCHITECTURE/thread_status.h
+ */
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
+ return (KERN_INVALID_ARGUMENT);
+
+#if CONFIG_MACF
+ new_label = mac_exc_create_label_for_current_proc();
+#endif
+
itk_lock(task);
if (task->itk_self == IP_NULL) {
}
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
- if (exception_mask & (1 << i)) {
+ if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+ && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
+#endif
+ ) {
old_port[i] = task->exc_actions[i].port;
task->exc_actions[i].port =
ipc_port_copy_send(new_port);
itk_unlock(task);
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
+
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
boolean_t privileged = current_task()->sec_token.val[0] == 0;
unsigned int i, j, count;
+#if CONFIG_MACF
+ struct label *new_label;
+#endif
+
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
}
}
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
+ return (KERN_INVALID_ARGUMENT);
+
+#if CONFIG_MACF
+ new_label = mac_exc_create_label_for_current_proc();
+#endif
+
thread_mtx_lock(thread);
if (!thread->active) {
assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
- if (exception_mask & (1 << i)) {
+ if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+ && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
+#endif
+ ) {
for (j = 0; j < count; ++j) {
/*
* search for an identical entry, if found
thread_mtx_unlock(thread);
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
+
while (--i >= FIRST_EXCEPTION) {
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
boolean_t privileged = current_task()->sec_token.val[0] == 0;
unsigned int i, j, count;
+#if CONFIG_MACF
+ struct label *new_label;
+#endif
+
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
}
}
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
+ return (KERN_INVALID_ARGUMENT);
+
+#if CONFIG_MACF
+ new_label = mac_exc_create_label_for_current_proc();
+#endif
+
itk_lock(task);
if (task->itk_self == IP_NULL) {
assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
- if (exception_mask & (1 << i)) {
+ if ((exception_mask & (1 << i))
+#if CONFIG_MACF
+ && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
+#endif
+ ) {
for (j = 0; j < count; j++) {
/*
* search for an identical entry, if found
itk_unlock(task);
+#if CONFIG_MACF
+ mac_exc_free_label(new_label);
+#endif
+
while (--i >= FIRST_EXCEPTION) {
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);