/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
+/*
+ * NOTICE: This file was modified by McAfee Research in 2004 to introduce
+ * support for mandatory and extensible security protections. This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ */
/*
*/
#include <vm/vm_map.h>
#include <vm/vm_pageout.h>
-#include <vm/vm_shared_memory_server.h>
#include <vm/vm_protos.h>
+#include <security/mac_mach_internal.h>
+
/* forward declarations */
task_t convert_port_to_locked_task(ipc_port_t port);
{
ipc_space_t space;
ipc_port_t kport;
+ ipc_port_t nport;
kern_return_t kr;
int i;
if (kr != KERN_SUCCESS)
panic("ipc_task_init");
+ space->is_task = task;
kport = ipc_port_alloc_kernel();
if (kport == IP_NULL)
panic("ipc_task_init");
+ nport = ipc_port_alloc_kernel();
+ if (nport == IP_NULL)
+ panic("ipc_task_init");
+
itk_lock_init(task);
task->itk_self = kport;
+ task->itk_nself = nport;
+ task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
task->itk_sself = ipc_port_make_send(kport);
+ task->itk_debug_control = IP_NULL;
task->itk_space = space;
- space->is_fast = FALSE;
if (parent == TASK_NULL) {
ipc_port_t port;
task->itk_host = port;
task->itk_bootstrap = IP_NULL;
+ task->itk_seatbelt = IP_NULL;
+ task->itk_gssd = IP_NULL;
+ task->itk_task_access = IP_NULL;
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
task->itk_registered[i] = IP_NULL;
parent->exc_actions[i].flavor;
task->exc_actions[i].behavior =
parent->exc_actions[i].behavior;
+ task->exc_actions[i].privileged =
+ parent->exc_actions[i].privileged;
}/* for */
task->itk_host =
ipc_port_copy_send(parent->itk_host);
task->itk_bootstrap =
ipc_port_copy_send(parent->itk_bootstrap);
+ task->itk_seatbelt =
+ ipc_port_copy_send(parent->itk_seatbelt);
+
+ task->itk_gssd =
+ ipc_port_copy_send(parent->itk_gssd);
+
+ task->itk_task_access =
+ ipc_port_copy_send(parent->itk_task_access);
+
itk_unlock(parent);
}
}
task_t task)
{
ipc_port_t kport;
+ ipc_port_t nport;
itk_lock(task);
kport = task->itk_self;
if (kport != IP_NULL)
ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
+ nport = task->itk_nself;
+ if (nport != IP_NULL)
+ ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
itk_unlock(task);
}
task_t task)
{
ipc_port_t kport;
+ ipc_port_t nport;
+ ipc_port_t rport;
itk_lock(task);
kport = task->itk_self;
if (kport != IP_NULL)
ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
+ nport = task->itk_nself;
+ if (nport != IP_NULL)
+ ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
+
+ rport = task->itk_resume;
+ if (rport != IP_NULL) {
+ /*
+ * From this point onwards this task is no longer accepting
+ * resumptions.
+ *
+ * There are still outstanding suspensions on this task,
+ * even as it is being torn down. Disconnect the task
+ * from the rport, thereby "orphaning" the rport. The rport
+ * itself will go away only when the last suspension holder
+ * destroys his SO right to it -- when he either
+ * exits, or tries to actually use that last SO right to
+ * resume this (now non-existent) task.
+ */
+ ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
+ }
itk_unlock(task);
}
task_t task)
{
ipc_port_t kport;
+ ipc_port_t nport;
+ ipc_port_t rport;
int i;
itk_lock(task);
itk_unlock(task);
return;
}
-
task->itk_self = IP_NULL;
+
+ nport = task->itk_nself;
+ assert(nport != IP_NULL);
+ task->itk_nself = IP_NULL;
+
+ rport = task->itk_resume;
+ task->itk_resume = IP_NULL;
+
itk_unlock(task);
/* release the naked send rights */
if (IP_VALID(task->itk_bootstrap))
ipc_port_release_send(task->itk_bootstrap);
+ if (IP_VALID(task->itk_seatbelt))
+ ipc_port_release_send(task->itk_seatbelt);
+
+ if (IP_VALID(task->itk_gssd))
+ ipc_port_release_send(task->itk_gssd);
+
+ if (IP_VALID(task->itk_task_access))
+ ipc_port_release_send(task->itk_task_access);
+
+ if (IP_VALID(task->itk_debug_control))
+ ipc_port_release_send(task->itk_debug_control);
+
for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
if (IP_VALID(task->itk_registered[i]))
ipc_port_release_send(task->itk_registered[i]);
- ipc_port_release_send(task->wired_ledger_port);
- ipc_port_release_send(task->paged_ledger_port);
-
- /* destroy the kernel port */
+ /* destroy the kernel ports */
ipc_port_dealloc_kernel(kport);
+ ipc_port_dealloc_kernel(nport);
+ if (rport != IP_NULL)
+ ipc_port_dealloc_kernel(rport);
+
+ itk_lock_destroy(task);
}
/*
* Routine: ipc_task_reset
* Purpose:
* Reset a task's IPC state to protect it when
- * it enters an elevated security context.
+ * it enters an elevated security context. The
+ * task name port can remain the same - since
+ * it represents no specific privilege.
* Conditions:
* Nothing locked. The task must be suspended.
* (Or the current thread must be in the task.)
{
ipc_port_t old_kport, new_kport;
ipc_port_t old_sself;
-#if 0
ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
int i;
-#endif
new_kport = ipc_port_alloc_kernel();
if (new_kport == IP_NULL)
ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
-#if 0
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
- old_exc_actions[i] = task->exc_action[i].port;
- task->exc_actions[i].port = IP_NULL;
+ if (!task->exc_actions[i].privileged) {
+ old_exc_actions[i] = task->exc_actions[i].port;
+ task->exc_actions[i].port = IP_NULL;
+ } else {
+ old_exc_actions[i] = IP_NULL;
+ }
}/* for */
-#endif
-
+
+ if (IP_VALID(task->itk_debug_control)) {
+ ipc_port_release_send(task->itk_debug_control);
+ }
+ task->itk_debug_control = IP_NULL;
+
itk_unlock(task);
/* release the naked send rights */
if (IP_VALID(old_sself))
ipc_port_release_send(old_sself);
-#if 0
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
if (IP_VALID(old_exc_actions[i])) {
ipc_port_release_send(old_exc_actions[i]);
}
}/* for */
-#endif
/* destroy the kernel port */
ipc_port_dealloc_kernel(old_kport);
thread_t thread)
{
ipc_port_t kport;
- int i;
kport = ipc_port_alloc_kernel();
if (kport == IP_NULL)
thread->ith_self = kport;
thread->ith_sself = ipc_port_make_send(kport);
-
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
- thread->exc_actions[i].port = IP_NULL;
+ thread->exc_actions = NULL;
ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
+#if IMPORTANCE_INHERITANCE
+ thread->ith_assertions = 0;
+#endif
+
ipc_kmsg_queue_init(&thread->ith_messages);
thread->ith_rpc_reply = IP_NULL;
}
+void
+ipc_thread_init_exc_actions(
+ thread_t thread)
+{
+ assert(thread->exc_actions == NULL);
+
+ thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
+ bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
+}
+
+void
+ipc_thread_destroy_exc_actions(
+ thread_t thread)
+{
+ if (thread->exc_actions != NULL) {
+ kfree(thread->exc_actions,
+ sizeof(struct exception_action) * EXC_TYPES_COUNT);
+ thread->exc_actions = NULL;
+ }
+}
+
void
ipc_thread_disable(
thread_t thread)
thread->ith_sself = thread->ith_self = IP_NULL;
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
- if (IP_VALID(thread->exc_actions[i].port))
- ipc_port_release_send(thread->exc_actions[i].port);
- }
+ if (thread->exc_actions != NULL) {
+ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+ if (IP_VALID(thread->exc_actions[i].port))
+ ipc_port_release_send(thread->exc_actions[i].port);
+ }
+ ipc_thread_destroy_exc_actions(thread);
+ }
ipc_port_dealloc_kernel(kport);
}
+#if IMPORTANCE_INHERITANCE
+ assert(thread->ith_assertions == 0);
+#endif
+
assert(ipc_kmsg_queue_empty(&thread->ith_messages));
if (thread->ith_rpc_reply != IP_NULL)
thread->ith_rpc_reply = IP_NULL;
}
+/*
+ * Routine: ipc_thread_reset
+ * Purpose:
+ * Reset the IPC state for a given Mach thread when
+ * its task enters an elevated security context.
+ * Both the thread port and its exception ports have
+ * to be reset. Its RPC reply port cannot have any
+ * rights outstanding, so it should be fine.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_thread_reset(
+ thread_t thread)
+{
+ ipc_port_t old_kport, new_kport;
+ ipc_port_t old_sself;
+ ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
+ boolean_t has_old_exc_actions = FALSE;
+ int i;
+
+ new_kport = ipc_port_alloc_kernel();
+ if (new_kport == IP_NULL)
+ panic("ipc_task_reset");
+
+ thread_mtx_lock(thread);
+
+ old_kport = thread->ith_self;
+
+ if (old_kport == IP_NULL) {
+ /* the is already terminated (can this happen?) */
+ thread_mtx_unlock(thread);
+ ipc_port_dealloc_kernel(new_kport);
+ return;
+ }
+
+ thread->ith_self = new_kport;
+ old_sself = thread->ith_sself;
+ thread->ith_sself = ipc_port_make_send(new_kport);
+ ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
+ ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
+
+ /*
+ * Only ports that were set by root-owned processes
+ * (privileged ports) should survive
+ */
+ if (thread->exc_actions != NULL) {
+ has_old_exc_actions = TRUE;
+ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ if (thread->exc_actions[i].privileged) {
+ old_exc_actions[i] = IP_NULL;
+ } else {
+ old_exc_actions[i] = thread->exc_actions[i].port;
+ thread->exc_actions[i].port = IP_NULL;
+ }
+ }
+ }
+
+ thread_mtx_unlock(thread);
+
+ /* release the naked send rights */
+
+ if (IP_VALID(old_sself))
+ ipc_port_release_send(old_sself);
+
+ if (has_old_exc_actions) {
+ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
+ ipc_port_release_send(old_exc_actions[i]);
+ }
+ }
+
+ /* destroy the kernel port */
+ ipc_port_dealloc_kernel(old_kport);
+}
+
/*
* Routine: retrieve_task_self_fast
* Purpose:
int which,
ipc_port_t *portp)
{
- ipc_port_t *whichp;
ipc_port_t port;
if (task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
+ itk_lock(task);
+ if (task->itk_self == IP_NULL) {
+ itk_unlock(task);
+ return KERN_FAILURE;
+ }
+
switch (which) {
case TASK_KERNEL_PORT:
- whichp = &task->itk_sself;
+ port = ipc_port_copy_send(task->itk_sself);
+ break;
+
+ case TASK_NAME_PORT:
+ port = ipc_port_make_send(task->itk_nself);
break;
case TASK_HOST_PORT:
- whichp = &task->itk_host;
+ port = ipc_port_copy_send(task->itk_host);
break;
case TASK_BOOTSTRAP_PORT:
- whichp = &task->itk_bootstrap;
+ port = ipc_port_copy_send(task->itk_bootstrap);
break;
- case TASK_WIRED_LEDGER_PORT:
- whichp = &task->wired_ledger_port;
- break;
+ case TASK_SEATBELT_PORT:
+ port = ipc_port_copy_send(task->itk_seatbelt);
+ break;
+
+ case TASK_ACCESS_PORT:
+ port = ipc_port_copy_send(task->itk_task_access);
+ break;
+
+ case TASK_DEBUG_CONTROL_PORT:
+ port = ipc_port_copy_send(task->itk_debug_control);
+ break;
- case TASK_PAGED_LEDGER_PORT:
- whichp = &task->paged_ledger_port;
- break;
-
default:
+ itk_unlock(task);
return KERN_INVALID_ARGUMENT;
}
-
- itk_lock(task);
- if (task->itk_self == IP_NULL) {
- itk_unlock(task);
- return KERN_FAILURE;
- }
-
- port = ipc_port_copy_send(*whichp);
itk_unlock(task);
*portp = port;
* KERN_INVALID_ARGUMENT The task is null.
* KERN_FAILURE The task/space is dead.
* KERN_INVALID_ARGUMENT Invalid special port.
+ * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
*/
kern_return_t
whichp = &task->itk_bootstrap;
break;
- case TASK_WIRED_LEDGER_PORT:
- whichp = &task->wired_ledger_port;
- break;
+ case TASK_SEATBELT_PORT:
+ whichp = &task->itk_seatbelt;
+ break;
+
+ case TASK_ACCESS_PORT:
+ whichp = &task->itk_task_access;
+ break;
+
+ case TASK_DEBUG_CONTROL_PORT:
+ whichp = &task->itk_debug_control;
+ break;
+
- case TASK_PAGED_LEDGER_PORT:
- whichp = &task->paged_ledger_port;
- break;
-
default:
return KERN_INVALID_ARGUMENT;
}/* switch */
return KERN_FAILURE;
}
+ /* do not allow overwrite of seatbelt or task access ports */
+ if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
+ && IP_VALID(*whichp)) {
+ itk_unlock(task);
+ return KERN_NO_ACCESS;
+ }
+
old = *whichp;
*whichp = port;
itk_unlock(task);
* KERN_SUCCESS Stashed the port rights.
* KERN_INVALID_ARGUMENT The task is null.
* KERN_INVALID_ARGUMENT The task is dead.
+ * KERN_INVALID_ARGUMENT The memory param is null.
* KERN_INVALID_ARGUMENT Too many port rights supplied.
*/
unsigned int i;
if ((task == TASK_NULL) ||
- (portsCnt > TASK_PORT_REGISTER_MAX))
+ (portsCnt > TASK_PORT_REGISTER_MAX) ||
+ (portsCnt && memory == NULL))
return KERN_INVALID_ARGUMENT;
/*
task_t
convert_port_to_locked_task(ipc_port_t port)
{
+ int try_failed_count = 0;
+
while (IP_VALID(port)) {
task_t task;
ip_unlock(port);
return(task);
}
+ try_failed_count++;
ip_unlock(port);
- mutex_pause();
+ mutex_pause(try_failed_count);
}
return TASK_NULL;
}
return (task);
}
+/*
+ * Routine: convert_port_to_task_name
+ * Purpose:
+ * Convert from a port to a task name.
+ * Doesn't consume the port ref; produces a task name ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+task_name_t
+convert_port_to_task_name(
+ ipc_port_t port)
+{
+ task_name_t task = TASK_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+
+ if ( ip_active(port) &&
+ (ip_kotype(port) == IKOT_TASK ||
+ ip_kotype(port) == IKOT_TASK_NAME)) {
+ task = (task_name_t)port->ip_kobject;
+ assert(task != TASK_NAME_NULL);
+
+ task_reference_internal(task);
+ }
+
+ ip_unlock(port);
+ }
+
+ return (task);
+}
+
+/*
+ * Routine: convert_port_to_task_suspension_token
+ * Purpose:
+ * Convert from a port to a task suspension token.
+ * Doesn't consume the port ref; produces a suspension token ref,
+ * which may be null.
+ * Conditions:
+ * Nothing locked.
+ */
+task_suspension_token_t
+convert_port_to_task_suspension_token(
+ ipc_port_t port)
+{
+ task_suspension_token_t task = TASK_NULL;
+
+ if (IP_VALID(port)) {
+ ip_lock(port);
+
+ if ( ip_active(port) &&
+ ip_kotype(port) == IKOT_TASK_RESUME) {
+ task = (task_suspension_token_t)port->ip_kobject;
+ assert(task != TASK_NULL);
+
+ task_reference_internal(task);
+ }
+
+ ip_unlock(port);
+ }
+
+ return (task);
+}
+
/*
* Routine: convert_port_to_space
* Purpose:
return port;
}
+/*
+ * Routine: convert_task_suspend_token_to_port
+ * Purpose:
+ * Convert from a task suspension token to a port.
+ * Consumes a task suspension token ref; produces a naked send-once right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+ipc_port_t
+convert_task_suspension_token_to_port(
+ task_suspension_token_t task)
+{
+ ipc_port_t port;
+
+ task_lock(task);
+ if (task->active) {
+ if (task->itk_resume == IP_NULL) {
+ task->itk_resume = ipc_port_alloc_kernel();
+ if (!IP_VALID(task->itk_resume)) {
+ panic("failed to create resume port");
+ }
+
+ ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
+ }
+
+ /*
+ * Create a send-once right for each instance of a direct user-called
+ * task_suspend2 call. Each time one of these send-once rights is abandoned,
+ * the notification handler will resume the target task.
+ */
+ port = ipc_port_make_sonce(task->itk_resume);
+ assert(IP_VALID(port));
+ } else {
+ port = IP_NULL;
+ }
+
+ task_unlock(task);
+ task_suspension_token_deallocate(task);
+
+ return port;
+}
+
+
+/*
+ * Routine: convert_task_name_to_port
+ * Purpose:
+ * Convert from a task name ref to a port.
+ * Consumes a task name ref; produces a naked send right
+ * which may be invalid.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+convert_task_name_to_port(
+ task_name_t task_name)
+{
+ ipc_port_t port;
+
+ itk_lock(task_name);
+ if (task_name->itk_nself != IP_NULL)
+ port = ipc_port_make_send(task_name->itk_nself);
+ else
+ port = IP_NULL;
+ itk_unlock(task_name);
+
+ task_name_deallocate(task_name);
+ return port;
+}
+
/*
* Routine: convert_thread_to_port
* Purpose:
thread_state_flavor_t new_flavor)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
+ boolean_t privileged = current_task()->sec_token.val[0] == 0;
register int i;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
- if (exception_mask & ~EXC_MASK_ALL)
+ if (exception_mask & ~EXC_MASK_VALID)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
- switch (new_behavior) {
+ switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
* VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
* osfmk/mach/ARCHITECTURE/thread_status.h
*/
- if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
return (KERN_INVALID_ARGUMENT);
thread_mtx_lock(thread);
return (KERN_FAILURE);
}
+ if (thread->exc_actions == NULL) {
+ ipc_thread_init_exc_actions(thread);
+ }
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
old_port[i] = thread->exc_actions[i].port;
thread->exc_actions[i].port = ipc_port_copy_send(new_port);
thread->exc_actions[i].behavior = new_behavior;
thread->exc_actions[i].flavor = new_flavor;
+ thread->exc_actions[i].privileged = privileged;
}
else
old_port[i] = IP_NULL;
thread_state_flavor_t new_flavor)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
+ boolean_t privileged = current_task()->sec_token.val[0] == 0;
register int i;
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
- if (exception_mask & ~EXC_MASK_ALL)
+ if (exception_mask & ~EXC_MASK_VALID)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
- switch (new_behavior) {
+ switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
}
}
+ /*
+ * Check the validity of the thread_state_flavor by calling the
+ * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
+ * osfmk/mach/ARCHITECTURE/thread_status.h
+ */
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
+ return (KERN_INVALID_ARGUMENT);
+
itk_lock(task);
if (task->itk_self == IP_NULL) {
ipc_port_copy_send(new_port);
task->exc_actions[i].behavior = new_behavior;
task->exc_actions[i].flavor = new_flavor;
+ task->exc_actions[i].privileged = privileged;
}
else
old_port[i] = IP_NULL;
thread_state_flavor_array_t flavors)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
+ boolean_t privileged = current_task()->sec_token.val[0] == 0;
unsigned int i, j, count;
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
- if (exception_mask & ~EXC_MASK_ALL)
+ if (exception_mask & ~EXC_MASK_VALID)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
- switch (new_behavior) {
+ switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
}
}
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
+ return (KERN_INVALID_ARGUMENT);
+
thread_mtx_lock(thread);
if (!thread->active) {
return (KERN_FAILURE);
}
- count = 0;
+ if (thread->exc_actions == NULL) {
+ ipc_thread_init_exc_actions(thread);
+ }
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+ assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
+ for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
if (exception_mask & (1 << i)) {
for (j = 0; j < count; ++j) {
/*
thread->exc_actions[i].port = ipc_port_copy_send(new_port);
thread->exc_actions[i].behavior = new_behavior;
thread->exc_actions[i].flavor = new_flavor;
- if (count > *CountCnt)
- break;
+ thread->exc_actions[i].privileged = privileged;
}
else
old_port[i] = IP_NULL;
thread_mtx_unlock(thread);
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
+ while (--i >= FIRST_EXCEPTION) {
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
+ }
if (IP_VALID(new_port)) /* consume send right */
ipc_port_release_send(new_port);
thread_state_flavor_array_t flavors)
{
ipc_port_t old_port[EXC_TYPES_COUNT];
+ boolean_t privileged = current_task()->sec_token.val[0] == 0;
unsigned int i, j, count;
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
- if (exception_mask & ~EXC_MASK_ALL)
+ if (exception_mask & ~EXC_MASK_VALID)
return (KERN_INVALID_ARGUMENT);
if (IP_VALID(new_port)) {
- switch (new_behavior) {
+ switch (new_behavior & ~MACH_EXCEPTION_CODES) {
case EXCEPTION_DEFAULT:
case EXCEPTION_STATE:
}
}
+ if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
+ return (KERN_INVALID_ARGUMENT);
+
itk_lock(task);
if (task->itk_self == IP_NULL) {
return (KERN_FAILURE);
}
- count = 0;
-
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
+ assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
+ for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
if (exception_mask & (1 << i)) {
for (j = 0; j < count; j++) {
/*
}
old_port[i] = task->exc_actions[i].port;
+
task->exc_actions[i].port = ipc_port_copy_send(new_port);
task->exc_actions[i].behavior = new_behavior;
task->exc_actions[i].flavor = new_flavor;
- if (count > *CountCnt)
- break;
+ task->exc_actions[i].privileged = privileged;
}
else
old_port[i] = IP_NULL;
itk_unlock(task);
- for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
+ while (--i >= FIRST_EXCEPTION) {
if (IP_VALID(old_port[i]))
ipc_port_release_send(old_port[i]);
+ }
if (IP_VALID(new_port)) /* consume send right */
ipc_port_release_send(new_port);
if (thread == THREAD_NULL)
return (KERN_INVALID_ARGUMENT);
- if (exception_mask & ~EXC_MASK_ALL)
+ if (exception_mask & ~EXC_MASK_VALID)
return (KERN_INVALID_ARGUMENT);
thread_mtx_lock(thread);
count = 0;
+ if (thread->exc_actions == NULL) {
+ goto done;
+ }
+
for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
if (exception_mask & (1 << i)) {
for (j = 0; j < count; ++j) {
}
}
+done:
thread_mtx_unlock(thread);
*CountCnt = count;
if (task == TASK_NULL)
return (KERN_INVALID_ARGUMENT);
- if (exception_mask & ~EXC_MASK_ALL)
+ if (exception_mask & ~EXC_MASK_VALID)
return (KERN_INVALID_ARGUMENT);
itk_lock(task);