X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..6601e61aa18bf4f09af135ff61fc7f4771d23b06:/osfmk/kern/ipc_tt.c diff --git a/osfmk/kern/ipc_tt.c b/osfmk/kern/ipc_tt.c index a32975e15..24f41f754 100644 --- a/osfmk/kern/ipc_tt.c +++ b/osfmk/kern/ipc_tt.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -56,24 +56,38 @@ * Task and thread related IPC functions. */ +#include #include -#include #include #include #include #include #include #include +#include #include #include #include #include +#include #include + +#include #include +#include #include -#include +#include +#include #include + +#include #include +#include +#include + +/* forward declarations */ +task_t convert_port_to_locked_task(ipc_port_t port); + /* * Routine: ipc_task_init @@ -110,16 +124,21 @@ ipc_task_init( task->itk_self = kport; task->itk_sself = ipc_port_make_send(kport); task->itk_space = space; - space->is_fast = task->kernel_loaded; + space->is_fast = FALSE; if (parent == TASK_NULL) { + ipc_port_t port; + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { task->exc_actions[i].port = IP_NULL; }/* for */ - task->exc_actions[EXC_MACH_SYSCALL].port = - ipc_port_make_send(realhost.host_self); - task->itk_host = ipc_port_make_send(realhost.host_self); + + kr = host_get_host_port(host_priv_self(), &port); + assert(kr == KERN_SUCCESS); + task->itk_host = port; + task->itk_bootstrap = IP_NULL; + for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) task->itk_registered[i] = IP_NULL; } else { @@ -141,6 +160,8 @@ ipc_task_init( parent->exc_actions[i].flavor; task->exc_actions[i].behavior = parent->exc_actions[i].behavior; + task->exc_actions[i].privileged = + parent->exc_actions[i].privileged; }/* for */ task->itk_host = ipc_port_copy_send(parent->itk_host); @@ -231,7 +252,8 @@ ipc_task_terminate( if (IP_VALID(task->exc_actions[i].port)) { ipc_port_release_send(task->exc_actions[i].port); } - }/* for */ + } + if (IP_VALID(task->itk_host)) ipc_port_release_send(task->itk_host); @@ -249,6 +271,72 @@ ipc_task_terminate( ipc_port_dealloc_kernel(kport); } +/* + * Routine: ipc_task_reset + * Purpose: + * Reset a task's IPC state to protect it when + * it enters an elevated security context. + * Conditions: + * Nothing locked. The task must be suspended. + * (Or the current thread must be in the task.) + */ + +void +ipc_task_reset( + task_t task) +{ + ipc_port_t old_kport, new_kport; + ipc_port_t old_sself; + ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; + int i; + + new_kport = ipc_port_alloc_kernel(); + if (new_kport == IP_NULL) + panic("ipc_task_reset"); + + itk_lock(task); + + old_kport = task->itk_self; + + if (old_kport == IP_NULL) { + /* the task is already terminated (can this happen?) */ + itk_unlock(task); + ipc_port_dealloc_kernel(new_kport); + return; + } + + task->itk_self = new_kport; + old_sself = task->itk_sself; + task->itk_sself = ipc_port_make_send(new_kport); + ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); + ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (!task->exc_actions[i].privileged) { + old_exc_actions[i] = task->exc_actions[i].port; + task->exc_actions[i].port = IP_NULL; + } else { + old_exc_actions[i] = IP_NULL; + } + }/* for */ + + itk_unlock(task); + + /* release the naked send rights */ + + if (IP_VALID(old_sself)) + ipc_port_release_send(old_sself); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (IP_VALID(old_exc_actions[i])) { + ipc_port_release_send(old_exc_actions[i]); + } + }/* for */ + + /* destroy the kernel port */ + ipc_port_dealloc_kernel(old_kport); +} + /* * Routine: ipc_thread_init * Purpose: @@ -261,97 +349,140 @@ void ipc_thread_init( thread_t thread) { + ipc_port_t kport; + int i; + + kport = ipc_port_alloc_kernel(); + if (kport == IP_NULL) + panic("ipc_thread_init"); + + thread->ith_self = kport; + thread->ith_sself = ipc_port_make_send(kport); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) + thread->exc_actions[i].port = IP_NULL; + + ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD); + ipc_kmsg_queue_init(&thread->ith_messages); - thread->ith_mig_reply = MACH_PORT_NULL; + thread->ith_rpc_reply = IP_NULL; } +void +ipc_thread_disable( + thread_t thread) +{ + ipc_port_t kport = thread->ith_self; + + if (kport != IP_NULL) + ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); +} + /* * Routine: ipc_thread_terminate * Purpose: * Clean up and destroy a thread's IPC state. * Conditions: - * Nothing locked. The thread must be suspended. - * (Or be the current thread.) + * Nothing locked. */ void ipc_thread_terminate( thread_t thread) { + ipc_port_t kport = thread->ith_self; + + if (kport != IP_NULL) { + int i; + + if (IP_VALID(thread->ith_sself)) + ipc_port_release_send(thread->ith_sself); + + thread->ith_sself = thread->ith_self = IP_NULL; + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { + if (IP_VALID(thread->exc_actions[i].port)) + ipc_port_release_send(thread->exc_actions[i].port); + } + + ipc_port_dealloc_kernel(kport); + } + assert(ipc_kmsg_queue_empty(&thread->ith_messages)); - if (thread->ith_rpc_reply != IP_NULL) - ipc_port_dealloc_reply(thread->ith_rpc_reply); + if (thread->ith_rpc_reply != IP_NULL) + ipc_port_dealloc_reply(thread->ith_rpc_reply); + thread->ith_rpc_reply = IP_NULL; } /* - * Routine: ipc_thr_act_init + * Routine: ipc_thread_reset * Purpose: - * Initialize an thr_act's IPC state. + * Reset the IPC state for a given Mach thread when + * its task enters an elevated security context. + * Both the thread port and its exception ports have + * to be reset. Its RPC reply port cannot have any + * rights outstanding, so it should be fine. * Conditions: * Nothing locked. */ void -ipc_thr_act_init(task_t task, thread_act_t thr_act) -{ - ipc_port_t kport; int i; - - kport = ipc_port_alloc_kernel(); - if (kport == IP_NULL) - panic("ipc_thr_act_init"); - - thr_act->ith_self = kport; - thr_act->ith_sself = ipc_port_make_send(kport); - - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) - thr_act->exc_actions[i].port = IP_NULL; - - thr_act->exc_actions[EXC_MACH_SYSCALL].port = - ipc_port_make_send(realhost.host_self); - - ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT); -} - -void -ipc_thr_act_disable(thread_act_t thr_act) +ipc_thread_reset( + thread_t thread) { + ipc_port_t old_kport, new_kport; + ipc_port_t old_sself; + ipc_port_t old_exc_actions[EXC_TYPES_COUNT]; int i; - ipc_port_t kport; - - kport = thr_act->ith_self; - if (kport != IP_NULL) - ipc_kobject_set(kport, IKO_NULL, IKOT_NONE); -} + new_kport = ipc_port_alloc_kernel(); + if (new_kport == IP_NULL) + panic("ipc_task_reset"); -void -ipc_thr_act_terminate(thread_act_t thr_act) -{ - ipc_port_t kport; int i; + thread_mtx_lock(thread); - kport = thr_act->ith_self; + old_kport = thread->ith_self; - if (kport == IP_NULL) { - /* the thread is already terminated (can this happen?) */ + if (old_kport == IP_NULL) { + /* the is already terminated (can this happen?) */ + thread_mtx_unlock(thread); + ipc_port_dealloc_kernel(new_kport); return; } - thr_act->ith_self = IP_NULL; + thread->ith_self = new_kport; + old_sself = thread->ith_sself; + thread->ith_sself = ipc_port_make_send(new_kport); + ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE); + ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + if (!thread->exc_actions[i].privileged) { + old_exc_actions[i] = thread->exc_actions[i].port; + thread->exc_actions[i].port = IP_NULL; + } else { + old_exc_actions[i] = IP_NULL; + } + }/* for */ + + thread_mtx_unlock(thread); /* release the naked send rights */ - if (IP_VALID(thr_act->ith_sself)) - ipc_port_release_send(thr_act->ith_sself); + if (IP_VALID(old_sself)) + ipc_port_release_send(old_sself); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { - if (IP_VALID(thr_act->exc_actions[i].port)) - ipc_port_release_send(thr_act->exc_actions[i].port); - } + if (IP_VALID(old_exc_actions[i])) { + ipc_port_release_send(old_exc_actions[i]); + } + }/* for */ /* destroy the kernel port */ - ipc_port_dealloc_kernel(kport); + ipc_port_dealloc_kernel(old_kport); } /* @@ -393,27 +524,30 @@ retrieve_task_self_fast( } /* - * Routine: retrieve_act_self_fast + * Routine: retrieve_thread_self_fast * Purpose: - * Optimized version of retrieve_thread_self, - * that only works for the current thread. - * * Return a send right (possibly null/dead) * for the thread's user-visible self port. + * + * Only works for the current thread. + * * Conditions: * Nothing locked. */ ipc_port_t -retrieve_act_self_fast(thread_act_t thr_act) +retrieve_thread_self_fast( + thread_t thread) { register ipc_port_t port; - assert(thr_act == current_act()); - act_lock(thr_act); - assert(thr_act->ith_self != IP_NULL); + assert(thread == current_thread()); + + thread_mtx_lock(thread); - if ((port = thr_act->ith_sself) == thr_act->ith_self) { + assert(thread->ith_self != IP_NULL); + + if ((port = thread->ith_sself) == thread->ith_self) { /* no interposing */ ip_lock(port); @@ -421,9 +555,11 @@ retrieve_act_self_fast(thread_act_t thr_act) ip_reference(port); port->ip_srights++; ip_unlock(port); - } else + } + else port = ipc_port_copy_send(port); - act_unlock(thr_act); + + thread_mtx_unlock(thread); return port; } @@ -440,13 +576,16 @@ retrieve_act_self_fast(thread_act_t thr_act) */ mach_port_name_t -task_self_trap(void) +task_self_trap( + __unused struct task_self_trap_args *args) { task_t task = current_task(); ipc_port_t sright; + mach_port_name_t name; sright = retrieve_task_self_fast(task); - return ipc_port_copyout_send(sright, task->itk_space); + name = ipc_port_copyout_send(sright, task->itk_space); + return name; } /* @@ -461,14 +600,18 @@ task_self_trap(void) */ mach_port_name_t -thread_self_trap(void) +thread_self_trap( + __unused struct thread_self_trap_args *args) { - thread_act_t thr_act = current_act(); - task_t task = thr_act->task; + thread_t thread = current_thread(); + task_t task = thread->task; ipc_port_t sright; + mach_port_name_t name; + + sright = retrieve_thread_self_fast(thread); + name = ipc_port_copyout_send(sright, task->itk_space); + return name; - sright = retrieve_act_self_fast(thr_act); - return ipc_port_copyout_send(sright, task->itk_space); } /* @@ -483,7 +626,8 @@ thread_self_trap(void) */ mach_port_name_t -mach_reply_port(void) +mach_reply_port( + __unused struct mach_reply_port_args *args) { ipc_port_t port; mach_port_name_t name; @@ -494,10 +638,111 @@ mach_reply_port(void) ip_unlock(port); else name = MACH_PORT_NULL; - return name; } +/* + * Routine: thread_get_special_port [kernel call] + * Purpose: + * Clones a send right for one of the thread's + * special ports. + * Conditions: + * Nothing locked. + * Returns: + * KERN_SUCCESS Extracted a send right. + * KERN_INVALID_ARGUMENT The thread is null. + * KERN_FAILURE The thread is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +thread_get_special_port( + thread_t thread, + int which, + ipc_port_t *portp) +{ + kern_return_t result = KERN_SUCCESS; + ipc_port_t *whichp; + + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); + + switch (which) { + + case THREAD_KERNEL_PORT: + whichp = &thread->ith_sself; + break; + + default: + return (KERN_INVALID_ARGUMENT); + } + + thread_mtx_lock(thread); + + if (thread->active) + *portp = ipc_port_copy_send(*whichp); + else + result = KERN_FAILURE; + + thread_mtx_unlock(thread); + + return (result); +} + +/* + * Routine: thread_set_special_port [kernel call] + * Purpose: + * Changes one of the thread's special ports, + * setting it to the supplied send right. + * Conditions: + * Nothing locked. If successful, consumes + * the supplied send right. + * Returns: + * KERN_SUCCESS Changed the special port. + * KERN_INVALID_ARGUMENT The thread is null. + * KERN_FAILURE The thread is dead. + * KERN_INVALID_ARGUMENT Invalid special port. + */ + +kern_return_t +thread_set_special_port( + thread_t thread, + int which, + ipc_port_t port) +{ + kern_return_t result = KERN_SUCCESS; + ipc_port_t *whichp, old = IP_NULL; + + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); + + switch (which) { + + case THREAD_KERNEL_PORT: + whichp = &thread->ith_sself; + break; + + default: + return (KERN_INVALID_ARGUMENT); + } + + thread_mtx_lock(thread); + + if (thread->active) { + old = *whichp; + *whichp = port; + } + else + result = KERN_FAILURE; + + thread_mtx_unlock(thread); + + if (IP_VALID(old)) + ipc_port_release_send(old); + + return (result); +} + /* * Routine: task_get_special_port [kernel call] * Purpose: @@ -656,7 +901,7 @@ mach_ports_register( mach_msg_type_number_t portsCnt) { ipc_port_t ports[TASK_PORT_REGISTER_MAX]; - int i; + unsigned int i; if ((task == TASK_NULL) || (portsCnt > TASK_PORT_REGISTER_MAX)) @@ -702,7 +947,7 @@ mach_ports_register( */ if (portsCnt != 0) - kfree((vm_offset_t) memory, + kfree(memory, (vm_size_t) (portsCnt * sizeof(mach_port_t))); return KERN_SUCCESS; @@ -728,13 +973,11 @@ mach_ports_lookup( mach_port_array_t *portsp, mach_msg_type_number_t *portsCnt) { - vm_offset_t memory; + void *memory; vm_size_t size; ipc_port_t *ports; int i; - kern_return_t kr; - if (task == TASK_NULL) return KERN_INVALID_ARGUMENT; @@ -818,16 +1061,25 @@ convert_port_to_locked_task(ipc_port_t port) */ task_t convert_port_to_task( - ipc_port_t port) + ipc_port_t port) { - task_t task; + task_t task = TASK_NULL; - task = convert_port_to_locked_task(port); - if (task) { - task->ref_count++; - task_unlock(task); + if (IP_VALID(port)) { + ip_lock(port); + + if ( ip_active(port) && + ip_kotype(port) == IKOT_TASK ) { + task = (task_t)port->ip_kobject; + assert(task != TASK_NULL); + + task_reference_internal(task); + } + + ip_unlock(port); } - return task; + + return (task); } /* @@ -862,151 +1114,6 @@ convert_port_to_space( return (space); } -upl_t -convert_port_to_upl( - ipc_port_t port) -{ - upl_t upl; - - ip_lock(port); - if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) { - ip_unlock(port); - return (upl_t)NULL; - } - upl = (upl_t) port->ip_kobject; - ip_unlock(port); - upl_lock(upl); - upl->ref_count+=1; - upl_unlock(upl); - return upl; -} - -/* - * Routine: convert_port_entry_to_map - * Purpose: - * Convert from a port specifying an entry or a task - * to a map. Doesn't consume the port ref; produces a map ref, - * which may be null. Unlike convert_port_to_map, the - * port may be task or a named entry backed. - * Conditions: - * Nothing locked. - */ - - -vm_map_t -convert_port_entry_to_map( - ipc_port_t port) -{ - task_t task; - vm_map_t map; - vm_named_entry_t named_entry; - - if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { - while(TRUE) { - ip_lock(port); - if(ip_active(port) && (ip_kotype(port) - == IKOT_NAMED_ENTRY)) { - named_entry = - (vm_named_entry_t)port->ip_kobject; - if (!(mutex_try(&(named_entry)->Lock))) { - ip_unlock(port); - mutex_pause(); - continue; - } - named_entry->ref_count++; - mutex_unlock(&(named_entry)->Lock); - ip_unlock(port); - if ((named_entry->is_sub_map) && - (named_entry->protection - & VM_PROT_WRITE)) { - map = named_entry->backing.map; - } else { - mach_destroy_memory_entry(port); - return VM_MAP_NULL; - } - vm_map_reference_swap(map); - mach_destroy_memory_entry(port); - break; - } - else - return VM_MAP_NULL; - } - } else { - task_t task; - - task = convert_port_to_locked_task(port); - - if (task == TASK_NULL) - return VM_MAP_NULL; - - if (!task->active) { - task_unlock(task); - return VM_MAP_NULL; - } - - map = task->map; - vm_map_reference_swap(map); - task_unlock(task); - } - - return map; -} - -/* - * Routine: convert_port_entry_to_object - * Purpose: - * Convert from a port specifying a named entry to an - * object. Doesn't consume the port ref; produces a map ref, - * which may be null. - * Conditions: - * Nothing locked. - */ - - -vm_object_t -convert_port_entry_to_object( - ipc_port_t port) -{ - vm_object_t object; - vm_named_entry_t named_entry; - - if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) { - while(TRUE) { - ip_lock(port); - if(ip_active(port) && (ip_kotype(port) - == IKOT_NAMED_ENTRY)) { - named_entry = - (vm_named_entry_t)port->ip_kobject; - if (!(mutex_try(&(named_entry)->Lock))) { - ip_unlock(port); - mutex_pause(); - continue; - } - named_entry->ref_count++; - mutex_unlock(&(named_entry)->Lock); - ip_unlock(port); - if ((!named_entry->is_sub_map) && - (named_entry->protection - & VM_PROT_WRITE)) { - object = named_entry->object; - } else { - mach_destroy_memory_entry(port); - return (vm_object_t)NULL; - } - vm_object_reference(named_entry->object); - mach_destroy_memory_entry(port); - break; - } - else - return (vm_object_t)NULL; - } - } else { - return (vm_object_t)NULL; - } - - return object; -} - /* * Routine: convert_port_to_map * Purpose: @@ -1042,88 +1149,66 @@ convert_port_to_map( /* - * Routine: convert_port_to_act + * Routine: convert_port_to_thread * Purpose: - * Convert from a port to a thr_act. - * Doesn't consume the port ref; produces an thr_act ref, + * Convert from a port to a thread. + * Doesn't consume the port ref; produces an thread ref, * which may be null. * Conditions: * Nothing locked. */ -thread_act_t -convert_port_to_act( ipc_port_t port ) +thread_t +convert_port_to_thread( + ipc_port_t port) { - boolean_t r; - thread_act_t thr_act = 0; + thread_t thread = THREAD_NULL; - r = FALSE; - while (!r && IP_VALID(port)) { + if (IP_VALID(port)) { ip_lock(port); - r = ref_act_port_locked(port, &thr_act); - /* port unlocked */ - } - return (thr_act); -} -boolean_t -ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act ) -{ - thread_act_t thr_act; - - thr_act = 0; - if (ip_active(port) && - (ip_kotype(port) == IKOT_ACT)) { - thr_act = (thread_act_t) port->ip_kobject; - assert(thr_act != THR_ACT_NULL); + if ( ip_active(port) && + ip_kotype(port) == IKOT_THREAD ) { + thread = (thread_t)port->ip_kobject; + assert(thread != THREAD_NULL); - /* - * Normal lock ordering is act_lock(), then ip_lock(). - * Allow out-of-order locking here, using - * act_reference_act_locked() to accomodate it. - */ - if (!act_lock_try(thr_act)) { - ip_unlock(port); - mutex_pause(); - return (FALSE); + thread_reference_internal(thread); } - act_locked_act_reference(thr_act); - act_unlock(thr_act); + + ip_unlock(port); } - *pthr_act = thr_act; - ip_unlock(port); - return (TRUE); + + return (thread); } /* - * Routine: port_name_to_act + * Routine: port_name_to_thread * Purpose: - * Convert from a port name to an act reference - * A name of MACH_PORT_NULL is valid for the null act + * Convert from a port name to an thread reference + * A name of MACH_PORT_NULL is valid for the null thread. * Conditions: * Nothing locked. */ -thread_act_t -port_name_to_act( +thread_t +port_name_to_thread( mach_port_name_t name) { - thread_act_t thr_act = THR_ACT_NULL; - ipc_port_t kern_port; - kern_return_t kr; + thread_t thread = THREAD_NULL; + ipc_port_t kport; if (MACH_PORT_VALID(name)) { - kr = ipc_object_copyin(current_space(), name, - MACH_MSG_TYPE_COPY_SEND, - (ipc_object_t *) &kern_port); - if (kr != KERN_SUCCESS) - return THR_ACT_NULL; + if (ipc_object_copyin(current_space(), name, + MACH_MSG_TYPE_COPY_SEND, + (ipc_object_t *)&kport) != KERN_SUCCESS) + return (THREAD_NULL); - thr_act = convert_port_to_act(kern_port); + thread = convert_port_to_thread(kport); - if (IP_VALID(kern_port)) - ipc_port_release_send(kern_port); + if (IP_VALID(kport)) + ipc_port_release_send(kport); } - return thr_act; + + return (thread); } task_t @@ -1167,12 +1252,6 @@ convert_task_to_port( itk_lock(task); if (task->itk_self != IP_NULL) -#if NORMA_TASK - if (task->map == VM_MAP_NULL) - /* norma placeholder task */ - port = ipc_port_copy_send(task->itk_self); - else -#endif /* NORMA_TASK */ port = ipc_port_make_send(task->itk_self); else port = IP_NULL; @@ -1183,30 +1262,33 @@ convert_task_to_port( } /* - * Routine: convert_act_to_port + * Routine: convert_thread_to_port * Purpose: - * Convert from a thr_act to a port. - * Consumes an thr_act ref; produces a naked send right + * Convert from a thread to a port. + * Consumes an thread ref; produces a naked send right * which may be invalid. * Conditions: * Nothing locked. */ ipc_port_t -convert_act_to_port(thr_act) - thread_act_t thr_act; +convert_thread_to_port( + thread_t thread) { - ipc_port_t port; + ipc_port_t port; + + thread_mtx_lock(thread); - act_lock(thr_act); - if (thr_act->ith_self != IP_NULL) - port = ipc_port_make_send(thr_act->ith_self); + if (thread->ith_self != IP_NULL) + port = ipc_port_make_send(thread->ith_self); else port = IP_NULL; - act_unlock(thr_act); - act_deallocate(thr_act); - return port; + thread_mtx_unlock(thread); + + thread_deallocate(thread); + + return (port); } /* @@ -1245,29 +1327,32 @@ space_deallocate( kern_return_t thread_set_exception_ports( - thread_act_t thr_act, + thread_t thread, exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor) + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) { + ipc_port_t old_port[EXC_TYPES_COUNT]; + boolean_t privileged = current_task()->sec_token.val[0] == 0; register int i; - ipc_port_t old_port[EXC_TYPES_COUNT]; - if (!thr_act) - return KERN_INVALID_ARGUMENT; + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); if (exception_mask & ~EXC_MASK_ALL) - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior) { + case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; + default: - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); } } @@ -1276,101 +1361,104 @@ thread_set_exception_ports( * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in * osfmk/mach/ARCHITECTURE/thread_status.h */ - if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) { - return KERN_INVALID_ARGUMENT; - } + if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) + return (KERN_INVALID_ARGUMENT); - act_lock(thr_act); - if (!thr_act->active) { - act_unlock(thr_act); - return KERN_FAILURE; + thread_mtx_lock(thread); + + if (!thread->active) { + thread_mtx_unlock(thread); + + return (KERN_FAILURE); } - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { - old_port[i] = thr_act->exc_actions[i].port; - thr_act->exc_actions[i].port = - ipc_port_copy_send(new_port); - thr_act->exc_actions[i].behavior = new_behavior; - thr_act->exc_actions[i].flavor = new_flavor; - } else + old_port[i] = thread->exc_actions[i].port; + thread->exc_actions[i].port = ipc_port_copy_send(new_port); + thread->exc_actions[i].behavior = new_behavior; + thread->exc_actions[i].flavor = new_flavor; + thread->exc_actions[i].privileged = privileged; + } + else old_port[i] = IP_NULL; - }/* for */ - /* - * Consume send rights without any lock held. - */ - act_unlock(thr_act); - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + } + + thread_mtx_unlock(thread); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); - return KERN_SUCCESS; -}/* thread_set_exception_port */ + return (KERN_SUCCESS); +} kern_return_t task_set_exception_ports( - task_t task, + task_t task, exception_mask_t exception_mask, - ipc_port_t new_port, - exception_behavior_t new_behavior, - thread_state_flavor_t new_flavor) + ipc_port_t new_port, + exception_behavior_t new_behavior, + thread_state_flavor_t new_flavor) { + ipc_port_t old_port[EXC_TYPES_COUNT]; + boolean_t privileged = current_task()->sec_token.val[0] == 0; register int i; - ipc_port_t old_port[EXC_TYPES_COUNT]; - if (task == TASK_NULL) { - return KERN_INVALID_ARGUMENT; - } + if (task == TASK_NULL) + return (KERN_INVALID_ARGUMENT); - if (exception_mask & ~EXC_MASK_ALL) { - return KERN_INVALID_ARGUMENT; - } + if (exception_mask & ~EXC_MASK_ALL) + return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior) { + case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; + default: - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); } } - /* Cannot easily check "new_flavor", but that just means that - * the flavor in the generated exception message might be garbage: - * GIGO */ - - itk_lock(task); - if (task->itk_self == IP_NULL) { - itk_unlock(task); - return KERN_FAILURE; - } - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + itk_lock(task); + + if (task->itk_self == IP_NULL) { + itk_unlock(task); + + return (KERN_FAILURE); + } + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { old_port[i] = task->exc_actions[i].port; task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; - } else + task->exc_actions[i].privileged = privileged; + } + else old_port[i] = IP_NULL; - }/* for */ + } - /* - * Consume send rights without any lock held. - */ - itk_unlock(task); - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + itk_unlock(task); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); - return KERN_SUCCESS; -}/* task_set_exception_port */ + return (KERN_SUCCESS); +} /* * Routine: thread/task_swap_exception_ports [kernel call] @@ -1401,197 +1489,194 @@ task_set_exception_ports( kern_return_t thread_swap_exception_ports( - thread_act_t thr_act, - exception_mask_t exception_mask, - ipc_port_t new_port, + thread_t thread, + exception_mask_t exception_mask, + ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor, exception_mask_array_t masks, - mach_msg_type_number_t * CountCnt, + mach_msg_type_number_t *CountCnt, exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors ) + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - register int i, - j, - count; - ipc_port_t old_port[EXC_TYPES_COUNT]; + ipc_port_t old_port[EXC_TYPES_COUNT]; + boolean_t privileged = current_task()->sec_token.val[0] == 0; + unsigned int i, j, count; - if (!thr_act) - return KERN_INVALID_ARGUMENT; + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); - if (exception_mask & ~EXC_MASK_ALL) { - return KERN_INVALID_ARGUMENT; - } + if (exception_mask & ~EXC_MASK_ALL) + return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior) { + case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; + default: - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); } } - /* Cannot easily check "new_flavor", but that just means that - * the flavor in the generated exception message might be garbage: - * GIGO */ - act_lock(thr_act); - if (!thr_act->active) { - act_unlock(thr_act); - return KERN_FAILURE; + thread_mtx_lock(thread); + + if (!thread->active) { + thread_mtx_unlock(thread); + + return (KERN_FAILURE); } count = 0; - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { - for (j = 0; j < count; j++) { -/* - * search for an identical entry, if found - * set corresponding mask for this exception. - */ - if (thr_act->exc_actions[i].port == ports[j] && - thr_act->exc_actions[i].behavior ==behaviors[j] - && thr_act->exc_actions[i].flavor ==flavors[j]) - { + for (j = 0; j < count; ++j) { + /* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if ( thread->exc_actions[i].port == ports[j] && + thread->exc_actions[i].behavior == behaviors[j] && + thread->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } - }/* for */ + } + if (j == count) { masks[j] = (1 << i); - ports[j] = - ipc_port_copy_send(thr_act->exc_actions[i].port); + ports[j] = ipc_port_copy_send(thread->exc_actions[i].port); - behaviors[j] = thr_act->exc_actions[i].behavior; - flavors[j] = thr_act->exc_actions[i].flavor; - count++; + behaviors[j] = thread->exc_actions[i].behavior; + flavors[j] = thread->exc_actions[i].flavor; + ++count; } - old_port[i] = thr_act->exc_actions[i].port; - thr_act->exc_actions[i].port = - ipc_port_copy_send(new_port); - thr_act->exc_actions[i].behavior = new_behavior; - thr_act->exc_actions[i].flavor = new_flavor; - if (count > *CountCnt) { + old_port[i] = thread->exc_actions[i].port; + thread->exc_actions[i].port = ipc_port_copy_send(new_port); + thread->exc_actions[i].behavior = new_behavior; + thread->exc_actions[i].flavor = new_flavor; + thread->exc_actions[i].privileged = privileged; + if (count > *CountCnt) break; - } - } else + } + else old_port[i] = IP_NULL; - }/* for */ + } - /* - * Consume send rights without any lock held. - */ - act_unlock(thr_act); - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) + thread_mtx_unlock(thread); + + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); + *CountCnt = count; - return KERN_SUCCESS; -}/* thread_swap_exception_ports */ + + return (KERN_SUCCESS); +} kern_return_t task_swap_exception_ports( - task_t task, - exception_mask_t exception_mask, - ipc_port_t new_port, + task_t task, + exception_mask_t exception_mask, + ipc_port_t new_port, exception_behavior_t new_behavior, thread_state_flavor_t new_flavor, exception_mask_array_t masks, - mach_msg_type_number_t * CountCnt, + mach_msg_type_number_t *CountCnt, exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors ) + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - register int i, - j, - count; - ipc_port_t old_port[EXC_TYPES_COUNT]; + ipc_port_t old_port[EXC_TYPES_COUNT]; + boolean_t privileged = current_task()->sec_token.val[0] == 0; + unsigned int i, j, count; if (task == TASK_NULL) - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); - if (exception_mask & ~EXC_MASK_ALL) { - return KERN_INVALID_ARGUMENT; - } + if (exception_mask & ~EXC_MASK_ALL) + return (KERN_INVALID_ARGUMENT); if (IP_VALID(new_port)) { switch (new_behavior) { + case EXCEPTION_DEFAULT: case EXCEPTION_STATE: case EXCEPTION_STATE_IDENTITY: break; + default: - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); } } - /* Cannot easily check "new_flavor", but that just means that - * the flavor in the generated exception message might be garbage: - * GIGO */ itk_lock(task); + if (task->itk_self == IP_NULL) { itk_unlock(task); - return KERN_FAILURE; + + return (KERN_FAILURE); } count = 0; - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { for (j = 0; j < count; j++) { -/* - * search for an identical entry, if found - * set corresponding mask for this exception. - */ - if (task->exc_actions[i].port == ports[j] && - task->exc_actions[i].behavior == behaviors[j] - && task->exc_actions[i].flavor == flavors[j]) - { + /* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if ( task->exc_actions[i].port == ports[j] && + task->exc_actions[i].behavior == behaviors[j] && + task->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } - }/* for */ + } + if (j == count) { masks[j] = (1 << i); - ports[j] = - ipc_port_copy_send(task->exc_actions[i].port); + ports[j] = ipc_port_copy_send(task->exc_actions[i].port); behaviors[j] = task->exc_actions[i].behavior; flavors[j] = task->exc_actions[i].flavor; - count++; + ++count; } + old_port[i] = task->exc_actions[i].port; - task->exc_actions[i].port = - ipc_port_copy_send(new_port); + task->exc_actions[i].port = ipc_port_copy_send(new_port); task->exc_actions[i].behavior = new_behavior; task->exc_actions[i].flavor = new_flavor; - if (count > *CountCnt) { + task->exc_actions[i].privileged = privileged; + if (count > *CountCnt) break; - } - } else + } + else old_port[i] = IP_NULL; - }/* for */ - + } - /* - * Consume send rights without any lock held. - */ itk_unlock(task); + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) if (IP_VALID(old_port[i])) ipc_port_release_send(old_port[i]); + if (IP_VALID(new_port)) /* consume send right */ ipc_port_release_send(new_port); + *CountCnt = count; - return KERN_SUCCESS; -}/* task_swap_exception_ports */ + return (KERN_SUCCESS); +} /* * Routine: thread/task_get_exception_ports [kernel call] @@ -1614,128 +1699,124 @@ task_swap_exception_ports( kern_return_t thread_get_exception_ports( - thread_act_t thr_act, - exception_mask_t exception_mask, + thread_t thread, + exception_mask_t exception_mask, exception_mask_array_t masks, - mach_msg_type_number_t * CountCnt, + mach_msg_type_number_t *CountCnt, exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors ) + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - register int i, - j, - count; + unsigned int i, j, count; - if (!thr_act) - return KERN_INVALID_ARGUMENT; + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); - if (exception_mask & ~EXC_MASK_ALL) { - return KERN_INVALID_ARGUMENT; - } + if (exception_mask & ~EXC_MASK_ALL) + return (KERN_INVALID_ARGUMENT); - act_lock(thr_act); - if (!thr_act->active) { - act_unlock(thr_act); - return KERN_FAILURE; + thread_mtx_lock(thread); + + if (!thread->active) { + thread_mtx_unlock(thread); + + return (KERN_FAILURE); } count = 0; - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { - for (j = 0; j < count; j++) { -/* - * search for an identical entry, if found - * set corresponding mask for this exception. - */ - if (thr_act->exc_actions[i].port == ports[j] && - thr_act->exc_actions[i].behavior ==behaviors[j] - && thr_act->exc_actions[i].flavor == flavors[j]) - { + for (j = 0; j < count; ++j) { + /* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if ( thread->exc_actions[i].port == ports[j] && + thread->exc_actions[i].behavior ==behaviors[j] && + thread->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } - }/* for */ + } + if (j == count) { masks[j] = (1 << i); - ports[j] = - ipc_port_copy_send(thr_act->exc_actions[i].port); - behaviors[j] = thr_act->exc_actions[i].behavior; - flavors[j] = thr_act->exc_actions[i].flavor; - count++; - if (count >= *CountCnt) { + ports[j] = ipc_port_copy_send(thread->exc_actions[i].port); + behaviors[j] = thread->exc_actions[i].behavior; + flavors[j] = thread->exc_actions[i].flavor; + ++count; + if (count >= *CountCnt) break; - } } } - }/* for */ + } - act_unlock(thr_act); + thread_mtx_unlock(thread); *CountCnt = count; - return KERN_SUCCESS; -}/* thread_get_exception_ports */ + + return (KERN_SUCCESS); +} kern_return_t task_get_exception_ports( - task_t task, - exception_mask_t exception_mask, + task_t task, + exception_mask_t exception_mask, exception_mask_array_t masks, - mach_msg_type_number_t * CountCnt, + mach_msg_type_number_t *CountCnt, exception_port_array_t ports, - exception_behavior_array_t behaviors, - thread_state_flavor_array_t flavors ) + exception_behavior_array_t behaviors, + thread_state_flavor_array_t flavors) { - register int i, - j, - count; + unsigned int i, j, count; if (task == TASK_NULL) - return KERN_INVALID_ARGUMENT; + return (KERN_INVALID_ARGUMENT); - if (exception_mask & ~EXC_MASK_ALL) { - return KERN_INVALID_ARGUMENT; - } + if (exception_mask & ~EXC_MASK_ALL) + return (KERN_INVALID_ARGUMENT); itk_lock(task); + if (task->itk_self == IP_NULL) { itk_unlock(task); - return KERN_FAILURE; + + return (KERN_FAILURE); } count = 0; - for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { + for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) { if (exception_mask & (1 << i)) { - for (j = 0; j < count; j++) { -/* - * search for an identical entry, if found - * set corresponding mask for this exception. - */ - if (task->exc_actions[i].port == ports[j] && - task->exc_actions[i].behavior == behaviors[j] - && task->exc_actions[i].flavor == flavors[j]) - { + for (j = 0; j < count; ++j) { + /* + * search for an identical entry, if found + * set corresponding mask for this exception. + */ + if ( task->exc_actions[i].port == ports[j] && + task->exc_actions[i].behavior == behaviors[j] && + task->exc_actions[i].flavor == flavors[j] ) { masks[j] |= (1 << i); break; } - }/* for */ + } + if (j == count) { masks[j] = (1 << i); - ports[j] = - ipc_port_copy_send(task->exc_actions[i].port); + ports[j] = ipc_port_copy_send(task->exc_actions[i].port); behaviors[j] = task->exc_actions[i].behavior; flavors[j] = task->exc_actions[i].flavor; - count++; - if (count > *CountCnt) { + ++count; + if (count > *CountCnt) break; - } } } - }/* for */ + } itk_unlock(task); *CountCnt = count; - return KERN_SUCCESS; -}/* task_get_exception_ports */ + + return (KERN_SUCCESS); +}