X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6d2010ae8f7a6078e10b361c6962983bab233e0f..0a7de7458d150b5d4dffc935ba399be265ef0a1a:/osfmk/kern/thread_act.c diff --git a/osfmk/kern/thread_act.c b/osfmk/kern/thread_act.c index 455a0fb01..c93dda8e3 100644 --- a/osfmk/kern/thread_act.c +++ b/osfmk/kern/thread_act.c @@ -1,8 +1,8 @@ /* - * Copyright (c) 2000-2007 Apple Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ - * + * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in @@ -11,10 +11,10 @@ * unlawful or unlicensed copies of an Apple operating system, or to * circumvent, violate, or enable the circumvention or violation of, any * terms of an Apple operating system software license agreement. - * + * * Please obtain a copy of the License at * http://www.opensource.apple.com/apsl/ and read it before using this file. - * + * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, @@ -22,7 +22,7 @@ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. * Please see the License for the specific language governing rights and * limitations under the License. - * + * * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* @@ -49,10 +49,9 @@ * * Thread management routines */ + #include #include -#include -#include #include #include @@ -76,30 +75,79 @@ #include #include -#include +#include -void act_abort(thread_t); -void install_special_handler_locked(thread_t); -void special_handler_continue(void); +#include + +static void act_abort(thread_t thread); + +static void thread_suspended(void *arg, wait_result_t result); +static void thread_set_apc_ast(thread_t thread); +static void thread_set_apc_ast_locked(thread_t thread); /* * Internal routine to mark a thread as started. - * Always called with the thread locked. - * - * Note: function intentionally declared with the noinline attribute to - * prevent multiple declaration of probe symbols in this file; we would - * prefer "#pragma noinline", but gcc does not support it. - * PR-6385749 -- the lwp-start probe should fire from within the context - * of the newly created thread. Commented out for now, in case we - * turn it into a dead code probe. + * Always called with the thread mutex locked. */ void -thread_start_internal( - thread_t thread) +thread_start( + thread_t thread) { clear_wait(thread, THREAD_AWAKENED); thread->started = TRUE; - // DTRACE_PROC1(lwp__start, thread_t, thread); +} + +/* + * Internal routine to mark a thread as waiting + * right after it has been created. The caller + * is responsible to call wakeup()/thread_wakeup() + * or thread_terminate() to get it going. + * + * Always called with the thread mutex locked. + * + * Task and task_threads mutexes also held + * (so nobody can set the thread running before + * this point) + * + * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE + * to allow termination from this point forward. + */ +void +thread_start_in_assert_wait( + thread_t thread, + event_t event, + wait_interrupt_t interruptible) +{ + struct waitq *waitq = assert_wait_queue(event); + wait_result_t wait_result; + spl_t spl; + + spl = splsched(); + waitq_lock(waitq); + + /* clear out startup condition (safe because thread not started yet) */ + thread_lock(thread); + assert(!thread->started); + assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT)); + thread->state &= ~(TH_WAIT | TH_UNINT); + thread_unlock(thread); + + /* assert wait interruptibly forever */ + wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), + interruptible, + TIMEOUT_URGENCY_SYS_NORMAL, + TIMEOUT_WAIT_FOREVER, + TIMEOUT_NO_LEEWAY, + thread); + assert(wait_result == THREAD_WAITING); + + /* mark thread started while we still hold the waitq lock */ + thread_lock(thread); + thread->started = TRUE; + thread_unlock(thread); + + waitq_unlock(waitq); + splx(spl); } /* @@ -108,9 +156,9 @@ thread_start_internal( */ kern_return_t thread_terminate_internal( - thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; thread_mtx_lock(thread); @@ -119,24 +167,26 @@ thread_terminate_internal( act_abort(thread); - if (thread->started) + if (thread->started) { clear_wait(thread, THREAD_INTERRUPTED); - else { - thread_start_internal(thread); + } else { + thread_start(thread); } - } - else + } else { result = KERN_TERMINATED; + } - if (thread->affinity_set != NULL) + if (thread->affinity_set != NULL) { thread_affinity_terminate(thread); + } thread_mtx_unlock(thread); - if (thread != current_thread() && result == KERN_SUCCESS) - thread_wait(thread); + if (thread != current_thread() && result == KERN_SUCCESS) { + thread_wait(thread, FALSE); + } - return (result); + return result; } /* @@ -144,32 +194,34 @@ thread_terminate_internal( */ kern_return_t thread_terminate( - thread_t thread) + thread_t thread) { - kern_return_t result; - - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - if ( thread->task == kernel_task && - thread != current_thread() ) - return (KERN_FAILURE); + /* Kernel threads can't be terminated without their own cooperation */ + if (thread->task == kernel_task && thread != current_thread()) { + return KERN_FAILURE; + } - result = thread_terminate_internal(thread); + kern_return_t result = thread_terminate_internal(thread); /* - * If a kernel thread is terminating itself, force an AST here. - * Kernel threads don't normally pass through the AST checking - * code - and all threads finish their own termination in the - * special handler APC. + * If a kernel thread is terminating itself, force handle the APC_AST here. + * Kernel threads don't pass through the return-to-user AST checking code, + * but all threads must finish their own termination in thread_apc_ast. */ if (thread->task == kernel_task) { - ml_set_interrupts_enabled(FALSE); - ast_taken(AST_APC, TRUE); + assert(thread->active == FALSE); + thread_ast_clear(thread, AST_APC); + thread_apc_ast(thread); + panic("thread_terminate"); + /* NOTREACHED */ } - return (result); + return result; } /* @@ -180,13 +232,11 @@ thread_terminate( * Called with thread mutex held. */ void -thread_hold( - register thread_t thread) +thread_hold(thread_t thread) { if (thread->suspend_count++ == 0) { - install_special_handler(thread); - if (thread->started) - thread_wakeup_one(&thread->suspend_count); + thread_set_apc_ast(thread); + assert(thread->suspend_parked == FALSE); } } @@ -194,192 +244,198 @@ thread_hold( * Decrement internal suspension count, setting thread * runnable when count falls to zero. * + * Because the wait is abortsafe, we can't be guaranteed that the thread + * is currently actually waiting even if suspend_parked is set. + * * Called with thread mutex held. */ void -thread_release( - register thread_t thread) -{ - if ( thread->suspend_count > 0 && - --thread->suspend_count == 0 ) { - if (thread->started) - thread_wakeup_one(&thread->suspend_count); - else { - thread_start_internal(thread); +thread_release(thread_t thread) +{ + assertf(thread->suspend_count > 0, "thread %p over-resumed", thread); + + /* fail-safe on non-assert builds */ + if (thread->suspend_count == 0) { + return; + } + + if (--thread->suspend_count == 0) { + if (!thread->started) { + thread_start(thread); + } else if (thread->suspend_parked) { + thread->suspend_parked = FALSE; + thread_wakeup_thread(&thread->suspend_count, thread); } } } kern_return_t -thread_suspend( - register thread_t thread) +thread_suspend(thread_t thread) { - thread_t self = current_thread(); - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL || thread->task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL || thread->task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { - if ( thread->user_stop_count++ == 0 && - thread->suspend_count++ == 0 ) { - install_special_handler(thread); - if (thread != self) - thread_wakeup_one(&thread->suspend_count); + if (thread->user_stop_count++ == 0) { + thread_hold(thread); } - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - if (thread != self && result == KERN_SUCCESS) - thread_wait(thread); + if (thread != current_thread() && result == KERN_SUCCESS) { + thread_wait(thread, FALSE); + } - return (result); + return result; } kern_return_t -thread_resume( - register thread_t thread) +thread_resume(thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL || thread->task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL || thread->task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { if (thread->user_stop_count > 0) { - if ( --thread->user_stop_count == 0 && - --thread->suspend_count == 0 ) { - if (thread->started) - thread_wakeup_one(&thread->suspend_count); - else { - thread_start_internal(thread); - } + if (--thread->user_stop_count == 0) { + thread_release(thread); } - } - else + } else { result = KERN_FAILURE; - } - else + } + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } /* - * thread_depress_abort: + * thread_depress_abort_from_user: * * Prematurely abort priority depression if there is one. */ kern_return_t -thread_depress_abort( - register thread_t thread) +thread_depress_abort_from_user(thread_t thread) { - kern_return_t result; + kern_return_t result; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - thread_mtx_lock(thread); + thread_mtx_lock(thread); - if (thread->active) - result = thread_depress_abort_internal(thread); - else + if (thread->active) { + result = thread_depress_abort(thread); + } else { result = KERN_TERMINATED; + } - thread_mtx_unlock(thread); + thread_mtx_unlock(thread); - return (result); + return result; } /* - * Indicate that the activation should run its - * special handler to detect a condition. + * Indicate that the thread should run the AST_APC callback + * to detect an abort condition. * * Called with thread mutex held. */ -void +static void act_abort( - thread_t thread) + thread_t thread) { - spl_t s = splsched(); + spl_t s = splsched(); thread_lock(thread); if (!(thread->sched_flags & TH_SFLAG_ABORT)) { thread->sched_flags |= TH_SFLAG_ABORT; - install_special_handler_locked(thread); - } - else + thread_set_apc_ast_locked(thread); + thread_depress_abort_locked(thread); + } else { thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY; + } thread_unlock(thread); splx(s); } - + kern_return_t thread_abort( - register thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { act_abort(thread); clear_wait(thread, THREAD_INTERRUPTED); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } kern_return_t thread_abort_safely( - thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { - spl_t s = splsched(); + spl_t s = splsched(); thread_lock(thread); if (!thread->at_safe_point || - clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { + clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { if (!(thread->sched_flags & TH_SFLAG_ABORT)) { thread->sched_flags |= TH_SFLAG_ABORTED_MASK; - install_special_handler_locked(thread); + thread_set_apc_ast_locked(thread); + thread_depress_abort_locked(thread); } } thread_unlock(thread); splx(s); - } - else + } else { result = KERN_TERMINATED; - + } + thread_mtx_unlock(thread); - return (result); + return result; } /*** backward compatibility hacks ***/ @@ -389,40 +445,44 @@ thread_abort_safely( kern_return_t thread_info( - thread_t thread, - thread_flavor_t flavor, - thread_info_t thread_info_out, - mach_msg_type_number_t *thread_info_count) + thread_t thread, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count) { - kern_return_t result; + kern_return_t result; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); - if (thread->active) + if (thread->active || thread->inspection) { result = thread_info_internal( - thread, flavor, thread_info_out, thread_info_count); - else + thread, flavor, thread_info_out, thread_info_count); + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } -kern_return_t -thread_get_state( - register thread_t thread, - int flavor, - thread_state_t state, /* pointer to OUT array */ - mach_msg_type_number_t *state_count) /*IN/OUT*/ +static inline kern_return_t +thread_get_state_internal( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count, /*IN/OUT*/ + boolean_t to_user) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); @@ -432,112 +492,157 @@ thread_get_state( thread_mtx_unlock(thread); - if (thread_stop(thread)) { + if (thread_stop(thread, FALSE)) { thread_mtx_lock(thread); result = machine_thread_get_state( - thread, flavor, state, state_count); + thread, flavor, state, state_count); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_get_state( - thread, flavor, state, state_count); - } - else + thread, flavor, state, state_count); + } + } else if (thread->inspection) { + result = machine_thread_get_state( + thread, flavor, state, state_count); + } else { result = KERN_TERMINATED; + } + + if (to_user && result == KERN_SUCCESS) { + result = machine_thread_state_convert_to_user(thread, flavor, state, + state_count); + } thread_mtx_unlock(thread); - return (result); + return result; +} + +/* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */ + +kern_return_t +thread_get_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count); + +kern_return_t +thread_get_state( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + return thread_get_state_internal(thread, flavor, state, state_count, FALSE); +} + +kern_return_t +thread_get_state_to_user( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + return thread_get_state_internal(thread, flavor, state, state_count, TRUE); } /* * Change thread's machine-dependent state. Called with nothing * locked. Returns same way. */ -static kern_return_t +static inline kern_return_t thread_set_state_internal( - register thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count, - boolean_t from_user) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count, + boolean_t from_user) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { + if (from_user) { + result = machine_thread_state_convert_from_user(thread, flavor, + state, state_count); + if (result != KERN_SUCCESS) { + goto out; + } + } if (thread != current_thread()) { thread_hold(thread); thread_mtx_unlock(thread); - if (thread_stop(thread)) { + if (thread_stop(thread, TRUE)) { thread_mtx_lock(thread); result = machine_thread_set_state( - thread, flavor, state, state_count); + thread, flavor, state, state_count); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_set_state( - thread, flavor, state, state_count); - } - else + thread, flavor, state, state_count); + } + } else { result = KERN_TERMINATED; + } - if ((result == KERN_SUCCESS) && from_user) + if ((result == KERN_SUCCESS) && from_user) { extmod_statistics_incr_thread_set_state(thread); + } +out: thread_mtx_unlock(thread); - return (result); + return result; } -/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */ +/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */ kern_return_t thread_set_state( - register thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count); + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count); kern_return_t thread_set_state( - register thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) { return thread_set_state_internal(thread, flavor, state, state_count, FALSE); } - + kern_return_t thread_set_state_from_user( - register thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) { return thread_set_state_internal(thread, flavor, state, state_count, TRUE); } - + /* * Kernel-internal "thread" interfaces used outside this file: */ @@ -547,12 +652,13 @@ thread_set_state_from_user( */ kern_return_t thread_state_initialize( - register thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); @@ -562,39 +668,39 @@ thread_state_initialize( thread_mtx_unlock(thread); - if (thread_stop(thread)) { + if (thread_stop(thread, TRUE)) { thread_mtx_lock(thread); result = machine_thread_state_initialize( thread ); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); + } else { + result = machine_thread_state_initialize( thread ); } - else - result = machine_thread_state_initialize( thread ); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } kern_return_t thread_dup( - register thread_t target) + thread_t target) { - thread_t self = current_thread(); - kern_return_t result = KERN_SUCCESS; + thread_t self = current_thread(); + kern_return_t result = KERN_SUCCESS; - if (target == THREAD_NULL || target == self) - return (KERN_INVALID_ARGUMENT); + if (target == THREAD_NULL || target == self) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(target); @@ -603,29 +709,79 @@ thread_dup( thread_mtx_unlock(target); - if (thread_stop(target)) { + if (thread_stop(target, TRUE)) { thread_mtx_lock(target); - result = machine_thread_dup(self, target); - if (self->affinity_set != AFFINITY_SET_NULL) + result = machine_thread_dup(self, target, FALSE); + + if (self->affinity_set != AFFINITY_SET_NULL) { thread_affinity_dup(self, target); + } thread_unstop(target); - } - else { + } else { thread_mtx_lock(target); result = KERN_ABORTED; } thread_release(target); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(target); - return (result); + return result; } +kern_return_t +thread_dup2( + thread_t source, + thread_t target) +{ + kern_return_t result = KERN_SUCCESS; + uint32_t active = 0; + + if (source == THREAD_NULL || target == THREAD_NULL || target == source) { + return KERN_INVALID_ARGUMENT; + } + + thread_mtx_lock(source); + active = source->active; + thread_mtx_unlock(source); + + if (!active) { + return KERN_TERMINATED; + } + + thread_mtx_lock(target); + + if (target->active || target->inspection) { + thread_hold(target); + + thread_mtx_unlock(target); + + if (thread_stop(target, TRUE)) { + thread_mtx_lock(target); + result = machine_thread_dup(source, target, TRUE); + if (source->affinity_set != AFFINITY_SET_NULL) { + thread_affinity_dup(source, target); + } + thread_unstop(target); + } else { + thread_mtx_lock(target); + result = KERN_ABORTED; + } + + thread_release(target); + } else { + result = KERN_TERMINATED; + } + + thread_mtx_unlock(target); + + return result; +} + /* * thread_setstatus: * @@ -634,13 +790,22 @@ thread_dup( */ kern_return_t thread_setstatus( - register thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) { + return thread_set_state(thread, flavor, tstate, count); +} - return (thread_set_state(thread, flavor, tstate, count)); +kern_return_t +thread_setstatus_from_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) +{ + return thread_set_state_from_user(thread, flavor, tstate, count); } /* @@ -650,149 +815,142 @@ thread_setstatus( */ kern_return_t thread_getstatus( - register thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - return (thread_get_state(thread, flavor, tstate, count)); + return thread_get_state(thread, flavor, tstate, count); } -/* - * install_special_handler: - * - * Install the special returnhandler that handles suspension and - * termination, if it hasn't been installed already. - * - * Called with the thread mutex held. - */ -void -install_special_handler( - thread_t thread) +kern_return_t +thread_getstatus_to_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - spl_t s = splsched(); - - thread_lock(thread); - install_special_handler_locked(thread); - thread_unlock(thread); - splx(s); + return thread_get_state_to_user(thread, flavor, tstate, count); } /* - * install_special_handler_locked: - * - * Do the work of installing the special_handler. - * - * Called with the thread mutex and scheduling lock held. + * Change thread's machine-dependent userspace TSD base. + * Called with nothing locked. Returns same way. */ -void -install_special_handler_locked( - thread_t thread) +kern_return_t +thread_set_tsd_base( + thread_t thread, + mach_vm_offset_t tsd_base) { - ReturnHandler **rh; + kern_return_t result = KERN_SUCCESS; - /* The work handler must always be the last ReturnHandler on the list, - because it can do tricky things like detach the thr_act. */ - for (rh = &thread->handlers; *rh; rh = &(*rh)->next) - continue; + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (rh != &thread->special_handler.next) - *rh = &thread->special_handler; + thread_mtx_lock(thread); - /* - * Temporarily undepress, so target has - * a chance to do locking required to - * block itself in special_handler(). - */ - if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) - SCHED(compute_priority)(thread, TRUE); + if (thread->active) { + if (thread != current_thread()) { + thread_hold(thread); - thread_ast_set(thread, AST_APC); + thread_mtx_unlock(thread); - if (thread == current_thread()) - ast_propagate(thread->ast); - else { - processor_t processor = thread->last_processor; + if (thread_stop(thread, TRUE)) { + thread_mtx_lock(thread); + result = machine_thread_set_tsd_base(thread, tsd_base); + thread_unstop(thread); + } else { + thread_mtx_lock(thread); + result = KERN_ABORTED; + } - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) - cause_ast_check(processor); + thread_release(thread); + } else { + result = machine_thread_set_tsd_base(thread, tsd_base); + } + } else { + result = KERN_TERMINATED; } + + thread_mtx_unlock(thread); + + return result; } /* - * Activation control support routines internal to this file: + * thread_set_apc_ast: + * + * Register the AST_APC callback that handles suspension and + * termination, if it hasn't been installed already. + * + * Called with the thread mutex held. */ - -void -act_execute_returnhandlers(void) +static void +thread_set_apc_ast(thread_t thread) { - thread_t thread = current_thread(); + spl_t s = splsched(); - thread_ast_clear(thread, AST_APC); - spllo(); - - for (;;) { - ReturnHandler *rh; - - thread_mtx_lock(thread); - - (void)splsched(); - thread_lock(thread); + thread_lock(thread); + thread_set_apc_ast_locked(thread); + thread_unlock(thread); - rh = thread->handlers; - if (rh != NULL) { - thread->handlers = rh->next; + splx(s); +} - thread_unlock(thread); - spllo(); +/* + * thread_set_apc_ast_locked: + * + * Do the work of registering for the AST_APC callback. + * + * Called with the thread mutex and scheduling lock held. + */ +static void +thread_set_apc_ast_locked(thread_t thread) +{ + thread_ast_set(thread, AST_APC); - thread_mtx_unlock(thread); + if (thread == current_thread()) { + ast_propagate(thread); + } else { + processor_t processor = thread->last_processor; - /* Execute it */ - (*rh->handler)(rh, thread); + if (processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread) { + cause_ast_check(processor); } - else - break; } - - thread_unlock(thread); - spllo(); - - thread_mtx_unlock(thread); } /* - * special_handler_continue + * Activation control support routines internal to this file: * - * Continuation routine for the special handler blocks. It checks + */ + +/* + * thread_suspended + * + * Continuation routine for thread suspension. It checks * to see whether there has been any new suspensions. If so, it - * installs the special handler again. Otherwise, it checks to see - * if the current depression needs to be re-instated (it may have - * been temporarily removed in order to get to this point in a hurry). + * installs the AST_APC handler again. */ -void -special_handler_continue(void) +__attribute__((noreturn)) +static void +thread_suspended(__unused void *parameter, wait_result_t result) { - thread_t thread = current_thread(); + thread_t thread = current_thread(); thread_mtx_lock(thread); - if (thread->suspend_count > 0) - install_special_handler(thread); - else { - spl_t s = splsched(); - - thread_lock(thread); - if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { - processor_t myprocessor = thread->last_processor; + if (result == THREAD_INTERRUPTED) { + thread->suspend_parked = FALSE; + } else { + assert(thread->suspend_parked == FALSE); + } - thread->sched_pri = DEPRESSPRI; - myprocessor->current_pri = thread->sched_pri; - } - thread_unlock(thread); - splx(s); + if (thread->suspend_count > 0) { + thread_set_apc_ast(thread); } thread_mtx_unlock(thread); @@ -802,46 +960,42 @@ special_handler_continue(void) } /* - * special_handler - handles suspension, termination. Called - * with nothing locked. Returns (if it returns) the same way. + * thread_apc_ast - handles AST_APC and drives thread suspension and termination. + * Called with nothing locked. Returns (if it returns) the same way. */ void -special_handler( - __unused ReturnHandler *rh, - thread_t thread) +thread_apc_ast(thread_t thread) { - spl_t s; - thread_mtx_lock(thread); - s = splsched(); + assert(thread->suspend_parked == FALSE); + + spl_t s = splsched(); thread_lock(thread); + + /* TH_SFLAG_POLLDEPRESS is OK to have here */ + assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0); + thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; thread_unlock(thread); splx(s); - /* - * If we're suspended, go to sleep and wait for someone to wake us up. - */ - if (thread->active) { - if (thread->suspend_count > 0) { - if (thread->handlers == NULL) { - assert_wait(&thread->suspend_count, THREAD_ABORTSAFE); - thread_mtx_unlock(thread); - thread_block((thread_continue_t)special_handler_continue); - /*NOTREACHED*/ - } - - thread_mtx_unlock(thread); + if (!thread->active) { + /* Thread is ready to terminate, time to tear it down */ + thread_mtx_unlock(thread); - special_handler_continue(); - /*NOTREACHED*/ - } + thread_terminate_self(); + /*NOTREACHED*/ } - else { + + /* If we're suspended, go to sleep and wait for someone to wake us up. */ + if (thread->suspend_count > 0) { + thread->suspend_parked = TRUE; + assert_wait(&thread->suspend_count, + THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER); thread_mtx_unlock(thread); - thread_terminate_self(); + thread_block(thread_suspended); /*NOTREACHED*/ } @@ -851,100 +1005,184 @@ special_handler( /* Prototype, see justification above */ kern_return_t act_set_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t count); + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count); kern_return_t act_set_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_set_state(thread, flavor, state, count)); - + return thread_set_state(thread, flavor, state, count); } kern_return_t act_set_state_from_user( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_set_state_from_user(thread, flavor, state, count)); - + return thread_set_state_from_user(thread, flavor, state, count); } +/* Prototype, see justification above */ kern_return_t act_get_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count); + +kern_return_t +act_get_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_get_state(thread, flavor, state, count)); + return thread_get_state(thread, flavor, state, count); } -void -act_set_astbsd( - thread_t thread) +kern_return_t +act_get_state_to_user( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { - spl_t s = splsched(); - if (thread == current_thread()) { - thread_ast_set(thread, AST_BSD); - ast_propagate(thread->ast); + return KERN_INVALID_ARGUMENT; } - else { - processor_t processor; + + return thread_get_state_to_user(thread, flavor, state, count); +} + +static void +act_set_ast( + thread_t thread, + ast_t ast) +{ + spl_t s = splsched(); + + if (thread == current_thread()) { + thread_ast_set(thread, ast); + ast_propagate(thread); + } else { + processor_t processor; thread_lock(thread); - thread_ast_set(thread, AST_BSD); + thread_ast_set(thread, ast); processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) + if (processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread) { cause_ast_check(processor); + } thread_unlock(thread); } - + splx(s); } -void -act_set_apc( - thread_t thread) +/* + * set AST on thread without causing an AST check + * and without taking the thread lock + * + * If thread is not the current thread, then it may take + * up until the next context switch or quantum expiration + * on that thread for it to notice the AST. + */ +static void +act_set_ast_async(thread_t thread, + ast_t ast) { - spl_t s = splsched(); - + thread_ast_set(thread, ast); + if (thread == current_thread()) { - thread_ast_set(thread, AST_APC); - ast_propagate(thread->ast); + spl_t s = splsched(); + ast_propagate(thread); + splx(s); } - else { - processor_t processor; +} - thread_lock(thread); - thread_ast_set(thread, AST_APC); - processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) - cause_ast_check(processor); - thread_unlock(thread); +void +act_set_astbsd( + thread_t thread) +{ + act_set_ast( thread, AST_BSD ); +} + +void +act_set_astkevent(thread_t thread, uint16_t bits) +{ + atomic_fetch_or(&thread->kevent_ast_bits, bits); + + /* kevent AST shouldn't send immediate IPIs */ + act_set_ast_async(thread, AST_KEVENT); +} + +void +act_set_kperf( + thread_t thread) +{ + /* safety check */ + if (thread != current_thread()) { + if (!ml_get_interrupts_enabled()) { + panic("unsafe act_set_kperf operation"); + } } - - splx(s); + + act_set_ast( thread, AST_KPERF ); +} + +#if CONFIG_MACF +void +act_set_astmacf( + thread_t thread) +{ + act_set_ast( thread, AST_MACF); +} +#endif + +void +act_set_astledger(thread_t thread) +{ + act_set_ast(thread, AST_LEDGER); +} + +/* + * The ledger AST may need to be set while already holding + * the thread lock. This routine skips sending the IPI, + * allowing us to avoid the lock hold. + * + * However, it means the targeted thread must context switch + * to recognize the ledger AST. + */ +void +act_set_astledger_async(thread_t thread) +{ + act_set_ast_async(thread, AST_LEDGER); +} + +void +act_set_io_telemetry_ast(thread_t thread) +{ + act_set_ast(thread, AST_TELEMETRY_IO); }