X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/6601e61aa18bf4f09af135ff61fc7f4771d23b06..c3c9b80d004dbbfdf763edeb97968c6997e3b45b:/osfmk/kern/thread_act.c diff --git a/osfmk/kern/thread_act.c b/osfmk/kern/thread_act.c index 872fc21dc..679c11621 100644 --- a/osfmk/kern/thread_act.c +++ b/osfmk/kern/thread_act.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. - * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ + * + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. - * - * @APPLE_LICENSE_HEADER_END@ + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. + * + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ @@ -43,17 +49,16 @@ * * Thread management routines */ + #include #include -#include -#include -#include #include #include #include #include #include +#include #include #include #include @@ -62,20 +67,89 @@ #include #include #include -#include #include #include #include #include #include #include -#include -#include +#include +#include + +#include + +#include + +static void act_abort(thread_t thread); + +static void thread_suspended(void *arg, wait_result_t result); +static void thread_set_apc_ast(thread_t thread); +static void thread_set_apc_ast_locked(thread_t thread); + +/* + * Internal routine to mark a thread as started. + * Always called with the thread mutex locked. + */ +void +thread_start( + thread_t thread) +{ + clear_wait(thread, THREAD_AWAKENED); + thread->started = TRUE; +} + +/* + * Internal routine to mark a thread as waiting + * right after it has been created. The caller + * is responsible to call wakeup()/thread_wakeup() + * or thread_terminate() to get it going. + * + * Always called with the thread mutex locked. + * + * Task and task_threads mutexes also held + * (so nobody can set the thread running before + * this point) + * + * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE + * to allow termination from this point forward. + */ +void +thread_start_in_assert_wait( + thread_t thread, + event_t event, + wait_interrupt_t interruptible) +{ + struct waitq *waitq = assert_wait_queue(event); + wait_result_t wait_result; + spl_t spl; -void act_abort(thread_t); -void act_set_apc(thread_t); -void install_special_handler_locked(thread_t); -void special_handler_continue(void); + spl = splsched(); + waitq_lock(waitq); + + /* clear out startup condition (safe because thread not started yet) */ + thread_lock(thread); + assert(!thread->started); + assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT)); + thread->state &= ~(TH_WAIT | TH_UNINT); + thread_unlock(thread); + + /* assert wait interruptibly forever */ + wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), + interruptible, + TIMEOUT_URGENCY_SYS_NORMAL, + TIMEOUT_WAIT_FOREVER, + TIMEOUT_NO_LEEWAY, + thread); + assert(wait_result == THREAD_WAITING); + + /* mark thread started while we still hold the waitq lock */ + thread_lock(thread); + thread->started = TRUE; + thread_unlock(thread); + + waitq_unlock(waitq); + splx(spl); +} /* * Internal routine to terminate a thread. @@ -83,9 +157,11 @@ void special_handler_continue(void); */ kern_return_t thread_terminate_internal( - thread_t thread) + thread_t thread, + thread_terminate_options_t options) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; + boolean_t test_pin_bit = false; thread_mtx_lock(thread); @@ -94,22 +170,35 @@ thread_terminate_internal( act_abort(thread); - if (thread->started) + if (thread->started) { clear_wait(thread, THREAD_INTERRUPTED); - else { - clear_wait(thread, THREAD_AWAKENED); - thread->started = TRUE; + } else { + thread_start(thread); } - } - else + /* This bit can be reliably tested only if the thread is still active */ + test_pin_bit = (options == TH_TERMINATE_OPTION_UNPIN) ? true : false; + } else { result = KERN_TERMINATED; + } + + if (thread->affinity_set != NULL) { + thread_affinity_terminate(thread); + } + + /* + * thread_terminate shouldn't be allowed on pthread + * Until thread_terminate is disallowed for pthreads, always unpin the pinned port + * when the thread is being terminated. + */ + ipc_thread_port_unpin(thread->ith_self, test_pin_bit); thread_mtx_unlock(thread); - if (thread != current_thread() && result == KERN_SUCCESS) - thread_wait(thread); + if (thread != current_thread() && result == KERN_SUCCESS) { + thread_wait(thread, FALSE); + } - return (result); + return result; } /* @@ -117,32 +206,48 @@ thread_terminate_internal( */ kern_return_t thread_terminate( - thread_t thread) + thread_t thread) { - kern_return_t result; - - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - if ( thread->task == kernel_task && - thread != current_thread() ) - return (KERN_FAILURE); + /* Kernel threads can't be terminated without their own cooperation */ + if (thread->task == kernel_task && thread != current_thread()) { + return KERN_FAILURE; + } - result = thread_terminate_internal(thread); + kern_return_t result = thread_terminate_internal(thread, TH_TERMINATE_OPTION_NONE); /* - * If a kernel thread is terminating itself, force an AST here. - * Kernel threads don't normally pass through the AST checking - * code - and all threads finish their own termination in the - * special handler APC. + * If a kernel thread is terminating itself, force handle the APC_AST here. + * Kernel threads don't pass through the return-to-user AST checking code, + * but all threads must finish their own termination in thread_apc_ast. */ if (thread->task == kernel_task) { - ml_set_interrupts_enabled(FALSE); - ast_taken(AST_APC, TRUE); + assert(thread->active == FALSE); + thread_ast_clear(thread, AST_APC); + thread_apc_ast(thread); + panic("thread_terminate"); + /* NOTREACHED */ + } + + return result; +} + +kern_return_t +thread_terminate_pinned( + thread_t thread) +{ + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; } - return (result); + assert(thread->task != kernel_task); + + kern_return_t result = thread_terminate_internal(thread, TH_TERMINATE_OPTION_UNPIN); + return result; } /* @@ -153,13 +258,11 @@ thread_terminate( * Called with thread mutex held. */ void -thread_hold( - register thread_t thread) +thread_hold(thread_t thread) { if (thread->suspend_count++ == 0) { - install_special_handler(thread); - if (thread->started) - thread_wakeup_one(&thread->suspend_count); + thread_set_apc_ast(thread); + assert(thread->suspend_parked == FALSE); } } @@ -167,194 +270,198 @@ thread_hold( * Decrement internal suspension count, setting thread * runnable when count falls to zero. * + * Because the wait is abortsafe, we can't be guaranteed that the thread + * is currently actually waiting even if suspend_parked is set. + * * Called with thread mutex held. */ void -thread_release( - register thread_t thread) -{ - if ( thread->suspend_count > 0 && - --thread->suspend_count == 0 ) { - if (thread->started) - thread_wakeup_one(&thread->suspend_count); - else { - clear_wait(thread, THREAD_AWAKENED); - thread->started = TRUE; +thread_release(thread_t thread) +{ + assertf(thread->suspend_count > 0, "thread %p over-resumed", thread); + + /* fail-safe on non-assert builds */ + if (thread->suspend_count == 0) { + return; + } + + if (--thread->suspend_count == 0) { + if (!thread->started) { + thread_start(thread); + } else if (thread->suspend_parked) { + thread->suspend_parked = FALSE; + thread_wakeup_thread(&thread->suspend_count, thread); } } } kern_return_t -thread_suspend( - register thread_t thread) +thread_suspend(thread_t thread) { - thread_t self = current_thread(); - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL || thread->task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL || thread->task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { - if ( thread->user_stop_count++ == 0 && - thread->suspend_count++ == 0 ) { - install_special_handler(thread); - if (thread != self) - thread_wakeup_one(&thread->suspend_count); + if (thread->user_stop_count++ == 0) { + thread_hold(thread); } - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - if (thread != self && result == KERN_SUCCESS) - thread_wait(thread); + if (thread != current_thread() && result == KERN_SUCCESS) { + thread_wait(thread, FALSE); + } - return (result); + return result; } kern_return_t -thread_resume( - register thread_t thread) +thread_resume(thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL || thread->task == kernel_task) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL || thread->task == kernel_task) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { if (thread->user_stop_count > 0) { - if ( --thread->user_stop_count == 0 && - --thread->suspend_count == 0 ) { - if (thread->started) - thread_wakeup_one(&thread->suspend_count); - else { - clear_wait(thread, THREAD_AWAKENED); - thread->started = TRUE; - } + if (--thread->user_stop_count == 0) { + thread_release(thread); } - } - else + } else { result = KERN_FAILURE; - } - else + } + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } /* - * thread_depress_abort: + * thread_depress_abort_from_user: * * Prematurely abort priority depression if there is one. */ kern_return_t -thread_depress_abort( - register thread_t thread) +thread_depress_abort_from_user(thread_t thread) { - kern_return_t result; + kern_return_t result; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - thread_mtx_lock(thread); + thread_mtx_lock(thread); - if (thread->active) - result = thread_depress_abort_internal(thread); - else + if (thread->active) { + result = thread_depress_abort(thread); + } else { result = KERN_TERMINATED; + } - thread_mtx_unlock(thread); + thread_mtx_unlock(thread); - return (result); + return result; } /* - * Indicate that the activation should run its - * special handler to detect a condition. + * Indicate that the thread should run the AST_APC callback + * to detect an abort condition. * * Called with thread mutex held. */ -void +static void act_abort( - thread_t thread) + thread_t thread) { - spl_t s = splsched(); + spl_t s = splsched(); thread_lock(thread); - if (!(thread->state & TH_ABORT)) { - thread->state |= TH_ABORT; - install_special_handler_locked(thread); + if (!(thread->sched_flags & TH_SFLAG_ABORT)) { + thread->sched_flags |= TH_SFLAG_ABORT; + thread_set_apc_ast_locked(thread); + thread_depress_abort_locked(thread); + } else { + thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY; } - else - thread->state &= ~TH_ABORT_SAFELY; thread_unlock(thread); splx(s); } - + kern_return_t thread_abort( - register thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { act_abort(thread); clear_wait(thread, THREAD_INTERRUPTED); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } kern_return_t thread_abort_safely( - thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { - spl_t s = splsched(); + spl_t s = splsched(); thread_lock(thread); if (!thread->at_safe_point || - clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { - if (!(thread->state & TH_ABORT)) { - thread->state |= (TH_ABORT|TH_ABORT_SAFELY); - install_special_handler_locked(thread); + clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { + if (!(thread->sched_flags & TH_SFLAG_ABORT)) { + thread->sched_flags |= TH_SFLAG_ABORTED_MASK; + thread_set_apc_ast_locked(thread); + thread_depress_abort_locked(thread); } } thread_unlock(thread); splx(s); - } - else + } else { result = KERN_TERMINATED; - + } + thread_mtx_unlock(thread); - return (result); + return result; } /*** backward compatibility hacks ***/ @@ -364,40 +471,44 @@ thread_abort_safely( kern_return_t thread_info( - thread_t thread, - thread_flavor_t flavor, - thread_info_t thread_info_out, - mach_msg_type_number_t *thread_info_count) + thread_t thread, + thread_flavor_t flavor, + thread_info_t thread_info_out, + mach_msg_type_number_t *thread_info_count) { - kern_return_t result; + kern_return_t result; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); - if (thread->active) + if (thread->active || thread->inspection) { result = thread_info_internal( - thread, flavor, thread_info_out, thread_info_count); - else + thread, flavor, thread_info_out, thread_info_count); + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } -kern_return_t -thread_get_state( - register thread_t thread, - int flavor, - thread_state_t state, /* pointer to OUT array */ - mach_msg_type_number_t *state_count) /*IN/OUT*/ +static inline kern_return_t +thread_get_state_internal( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count, /*IN/OUT*/ + boolean_t to_user) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); @@ -407,81 +518,218 @@ thread_get_state( thread_mtx_unlock(thread); - if (thread_stop(thread)) { + if (thread_stop(thread, FALSE)) { thread_mtx_lock(thread); result = machine_thread_get_state( - thread, flavor, state, state_count); + thread, flavor, state, state_count); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_get_state( - thread, flavor, state, state_count); - } - else + thread, flavor, state, state_count); + } + } else if (thread->inspection) { + result = machine_thread_get_state( + thread, flavor, state, state_count); + } else { result = KERN_TERMINATED; + } + + if (to_user && result == KERN_SUCCESS) { + result = machine_thread_state_convert_to_user(thread, flavor, state, + state_count); + } thread_mtx_unlock(thread); - return (result); + return result; +} + +/* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */ + +kern_return_t +thread_get_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count); + +kern_return_t +thread_get_state( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + return thread_get_state_internal(thread, flavor, state, state_count, FALSE); +} + +kern_return_t +thread_get_state_to_user( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + return thread_get_state_internal(thread, flavor, state, state_count, TRUE); } /* * Change thread's machine-dependent state. Called with nothing * locked. Returns same way. */ -kern_return_t -thread_set_state( - register thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t state_count) +static inline kern_return_t +thread_set_state_internal( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count, + boolean_t from_user) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); if (thread->active) { + if (from_user) { + result = machine_thread_state_convert_from_user(thread, flavor, + state, state_count); + if (result != KERN_SUCCESS) { + goto out; + } + } if (thread != current_thread()) { thread_hold(thread); thread_mtx_unlock(thread); - if (thread_stop(thread)) { + if (thread_stop(thread, TRUE)) { thread_mtx_lock(thread); result = machine_thread_set_state( - thread, flavor, state, state_count); + thread, flavor, state, state_count); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); - } - else + } else { result = machine_thread_set_state( - thread, flavor, state, state_count); - } - else + thread, flavor, state, state_count); + } + } else { result = KERN_TERMINATED; + } + if ((result == KERN_SUCCESS) && from_user) { + extmod_statistics_incr_thread_set_state(thread); + } + +out: thread_mtx_unlock(thread); - return (result); + return result; +} + +/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */ +kern_return_t +thread_set_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count); + +kern_return_t +thread_set_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) +{ + return thread_set_state_internal(thread, flavor, state, state_count, FALSE); } - - + +kern_return_t +thread_set_state_from_user( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) +{ + return thread_set_state_internal(thread, flavor, state, state_count, TRUE); +} + +kern_return_t +thread_convert_thread_state( + thread_t thread, + int direction, + thread_state_flavor_t flavor, + thread_state_t in_state, /* pointer to IN array */ + mach_msg_type_number_t in_state_count, + thread_state_t out_state, /* pointer to OUT array */ + mach_msg_type_number_t *out_state_count) /*IN/OUT*/ +{ + kern_return_t kr; + thread_t to_thread = THREAD_NULL; + thread_t from_thread = THREAD_NULL; + mach_msg_type_number_t state_count = in_state_count; + + if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF && + direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) { + return KERN_INVALID_ARGUMENT; + } + + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } + + if (state_count > *out_state_count) { + return KERN_INSUFFICIENT_BUFFER_SIZE; + } + + if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) { + to_thread = thread; + from_thread = current_thread(); + } else { + to_thread = current_thread(); + from_thread = thread; + } + + /* Authenticate and convert thread state to kernel representation */ + kr = machine_thread_state_convert_from_user(from_thread, flavor, + in_state, state_count); + + /* Return early if one of the thread was jop disabled while other wasn't */ + if (kr != KERN_SUCCESS) { + return kr; + } + + /* Convert thread state to target thread user representation */ + kr = machine_thread_state_convert_to_user(to_thread, flavor, + in_state, &state_count); + + if (kr == KERN_SUCCESS) { + if (state_count <= *out_state_count) { + memcpy(out_state, in_state, state_count * sizeof(uint32_t)); + *out_state_count = state_count; + } else { + kr = KERN_INSUFFICIENT_BUFFER_SIZE; + } + } + + return kr; +} + /* * Kernel-internal "thread" interfaces used outside this file: */ @@ -491,12 +739,13 @@ thread_set_state( */ kern_return_t thread_state_initialize( - register thread_t thread) + thread_t thread) { - kern_return_t result = KERN_SUCCESS; + kern_return_t result = KERN_SUCCESS; - if (thread == THREAD_NULL) - return (KERN_INVALID_ARGUMENT); + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(thread); @@ -506,39 +755,38 @@ thread_state_initialize( thread_mtx_unlock(thread); - if (thread_stop(thread)) { + if (thread_stop(thread, TRUE)) { thread_mtx_lock(thread); result = machine_thread_state_initialize( thread ); thread_unstop(thread); - } - else { + } else { thread_mtx_lock(thread); result = KERN_ABORTED; } thread_release(thread); + } else { + result = machine_thread_state_initialize( thread ); } - else - result = machine_thread_state_initialize( thread ); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(thread); - return (result); + return result; } - kern_return_t thread_dup( - register thread_t target) + thread_t target) { - thread_t self = current_thread(); - kern_return_t result = KERN_SUCCESS; + thread_t self = current_thread(); + kern_return_t result = KERN_SUCCESS; - if (target == THREAD_NULL || target == self) - return (KERN_INVALID_ARGUMENT); + if (target == THREAD_NULL || target == self) { + return KERN_INVALID_ARGUMENT; + } thread_mtx_lock(target); @@ -547,27 +795,79 @@ thread_dup( thread_mtx_unlock(target); - if (thread_stop(target)) { + if (thread_stop(target, TRUE)) { thread_mtx_lock(target); - result = machine_thread_dup(self, target); + result = machine_thread_dup(self, target, FALSE); + + if (self->affinity_set != AFFINITY_SET_NULL) { + thread_affinity_dup(self, target); + } thread_unstop(target); - } - else { + } else { thread_mtx_lock(target); result = KERN_ABORTED; } thread_release(target); - } - else + } else { result = KERN_TERMINATED; + } thread_mtx_unlock(target); - return (result); + return result; } +kern_return_t +thread_dup2( + thread_t source, + thread_t target) +{ + kern_return_t result = KERN_SUCCESS; + uint32_t active = 0; + + if (source == THREAD_NULL || target == THREAD_NULL || target == source) { + return KERN_INVALID_ARGUMENT; + } + + thread_mtx_lock(source); + active = source->active; + thread_mtx_unlock(source); + + if (!active) { + return KERN_TERMINATED; + } + + thread_mtx_lock(target); + + if (target->active || target->inspection) { + thread_hold(target); + + thread_mtx_unlock(target); + + if (thread_stop(target, TRUE)) { + thread_mtx_lock(target); + result = machine_thread_dup(source, target, TRUE); + if (source->affinity_set != AFFINITY_SET_NULL) { + thread_affinity_dup(source, target); + } + thread_unstop(target); + } else { + thread_mtx_lock(target); + result = KERN_ABORTED; + } + + thread_release(target); + } else { + result = KERN_TERMINATED; + } + + thread_mtx_unlock(target); + + return result; +} + /* * thread_setstatus: * @@ -576,13 +876,22 @@ thread_dup( */ kern_return_t thread_setstatus( - register thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) { + return thread_set_state(thread, flavor, tstate, count); +} - return (thread_set_state(thread, flavor, tstate, count)); +kern_return_t +thread_setstatus_from_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) +{ + return thread_set_state_from_user(thread, flavor, tstate, count); } /* @@ -592,150 +901,142 @@ thread_setstatus( */ kern_return_t thread_getstatus( - register thread_t thread, - int flavor, - thread_state_t tstate, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - return (thread_get_state(thread, flavor, tstate, count)); + return thread_get_state(thread, flavor, tstate, count); } -/* - * install_special_handler: - * - * Install the special returnhandler that handles suspension and - * termination, if it hasn't been installed already. - * - * Called with the thread mutex held. - */ -void -install_special_handler( - thread_t thread) +kern_return_t +thread_getstatus_to_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - spl_t s = splsched(); - - thread_lock(thread); - install_special_handler_locked(thread); - thread_unlock(thread); - splx(s); + return thread_get_state_to_user(thread, flavor, tstate, count); } /* - * install_special_handler_locked: - * - * Do the work of installing the special_handler. - * - * Called with the thread mutex and scheduling lock held. + * Change thread's machine-dependent userspace TSD base. + * Called with nothing locked. Returns same way. */ -void -install_special_handler_locked( - thread_t thread) +kern_return_t +thread_set_tsd_base( + thread_t thread, + mach_vm_offset_t tsd_base) { - ReturnHandler **rh; + kern_return_t result = KERN_SUCCESS; - /* The work handler must always be the last ReturnHandler on the list, - because it can do tricky things like detach the thr_act. */ - for (rh = &thread->handlers; *rh; rh = &(*rh)->next) - continue; + if (thread == THREAD_NULL) { + return KERN_INVALID_ARGUMENT; + } - if (rh != &thread->special_handler.next) - *rh = &thread->special_handler; + thread_mtx_lock(thread); - /* - * Temporarily undepress, so target has - * a chance to do locking required to - * block itself in special_handler(). - */ - if (thread->sched_mode & TH_MODE_ISDEPRESSED) - compute_priority(thread, TRUE); + if (thread->active) { + if (thread != current_thread()) { + thread_hold(thread); - thread_ast_set(thread, AST_APC); + thread_mtx_unlock(thread); - if (thread == current_thread()) - ast_propagate(thread->ast); - else { - processor_t processor = thread->last_processor; + if (thread_stop(thread, TRUE)) { + thread_mtx_lock(thread); + result = machine_thread_set_tsd_base(thread, tsd_base); + thread_unstop(thread); + } else { + thread_mtx_lock(thread); + result = KERN_ABORTED; + } - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) - cause_ast_check(processor); + thread_release(thread); + } else { + result = machine_thread_set_tsd_base(thread, tsd_base); + } + } else { + result = KERN_TERMINATED; } + + thread_mtx_unlock(thread); + + return result; } /* - * Activation control support routines internal to this file: + * thread_set_apc_ast: + * + * Register the AST_APC callback that handles suspension and + * termination, if it hasn't been installed already. + * + * Called with the thread mutex held. */ - -void -act_execute_returnhandlers(void) +static void +thread_set_apc_ast(thread_t thread) { - thread_t thread = current_thread(); - - thread_ast_clear(thread, AST_APC); - spllo(); - - for (;;) { - ReturnHandler *rh; - - thread_mtx_lock(thread); + spl_t s = splsched(); - (void)splsched(); - thread_lock(thread); + thread_lock(thread); + thread_set_apc_ast_locked(thread); + thread_unlock(thread); - rh = thread->handlers; - if (rh != NULL) { - thread->handlers = rh->next; + splx(s); +} - thread_unlock(thread); - spllo(); +/* + * thread_set_apc_ast_locked: + * + * Do the work of registering for the AST_APC callback. + * + * Called with the thread mutex and scheduling lock held. + */ +static void +thread_set_apc_ast_locked(thread_t thread) +{ + thread_ast_set(thread, AST_APC); - thread_mtx_unlock(thread); + if (thread == current_thread()) { + ast_propagate(thread); + } else { + processor_t processor = thread->last_processor; - /* Execute it */ - (*rh->handler)(rh, thread); + if (processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread) { + cause_ast_check(processor); } - else - break; } - - thread_unlock(thread); - spllo(); - - thread_mtx_unlock(thread); } /* - * special_handler_continue + * Activation control support routines internal to this file: + * + */ + +/* + * thread_suspended * - * Continuation routine for the special handler blocks. It checks + * Continuation routine for thread suspension. It checks * to see whether there has been any new suspensions. If so, it - * installs the special handler again. Otherwise, it checks to see - * if the current depression needs to be re-instated (it may have - * been temporarily removed in order to get to this point in a hurry). + * installs the AST_APC handler again. */ -void -special_handler_continue(void) +__attribute__((noreturn)) +static void +thread_suspended(__unused void *parameter, wait_result_t result) { - thread_t thread = current_thread(); + thread_t thread = current_thread(); thread_mtx_lock(thread); - if (thread->suspend_count > 0) - install_special_handler(thread); - else { - spl_t s = splsched(); - - thread_lock(thread); - if (thread->sched_mode & TH_MODE_ISDEPRESSED) { - processor_t myprocessor = thread->last_processor; + if (result == THREAD_INTERRUPTED) { + thread->suspend_parked = FALSE; + } else { + assert(thread->suspend_parked == FALSE); + } - thread->sched_pri = DEPRESSPRI; - myprocessor->current_pri = thread->sched_pri; - thread->sched_mode &= ~TH_MODE_PREEMPT; - } - thread_unlock(thread); - splx(s); + if (thread->suspend_count > 0) { + thread_set_apc_ast(thread); } thread_mtx_unlock(thread); @@ -745,127 +1046,250 @@ special_handler_continue(void) } /* - * special_handler - handles suspension, termination. Called - * with nothing locked. Returns (if it returns) the same way. + * thread_apc_ast - handles AST_APC and drives thread suspension and termination. + * Called with nothing locked. Returns (if it returns) the same way. */ void -special_handler( - __unused ReturnHandler *rh, - thread_t thread) +thread_apc_ast(thread_t thread) { - spl_t s; - thread_mtx_lock(thread); - s = splsched(); + assert(thread->suspend_parked == FALSE); + + spl_t s = splsched(); thread_lock(thread); - thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */ + + /* TH_SFLAG_POLLDEPRESS is OK to have here */ + assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0); + + thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; thread_unlock(thread); splx(s); - /* - * If we're suspended, go to sleep and wait for someone to wake us up. - */ - if (thread->active) { - if (thread->suspend_count > 0) { - if (thread->handlers == NULL) { - assert_wait(&thread->suspend_count, THREAD_ABORTSAFE); - thread_mtx_unlock(thread); - thread_block((thread_continue_t)special_handler_continue); - /*NOTREACHED*/ - } - - thread_mtx_unlock(thread); + if (!thread->active) { + /* Thread is ready to terminate, time to tear it down */ + thread_mtx_unlock(thread); - special_handler_continue(); - /*NOTREACHED*/ - } + thread_terminate_self(); + /*NOTREACHED*/ } - else { + + /* If we're suspended, go to sleep and wait for someone to wake us up. */ + if (thread->suspend_count > 0) { + thread->suspend_parked = TRUE; + assert_wait(&thread->suspend_count, + THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER); thread_mtx_unlock(thread); - thread_terminate_self(); + thread_block(thread_suspended); /*NOTREACHED*/ } thread_mtx_unlock(thread); } + +/* Prototype, see justification above */ +kern_return_t +act_set_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count); + kern_return_t act_set_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_set_state(thread, flavor, state, count)); - + return thread_set_state(thread, flavor, state, count); } +kern_return_t +act_set_state_from_user( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) +{ + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } + + return thread_set_state_from_user(thread, flavor, state, count); +} + +/* Prototype, see justification above */ +kern_return_t +act_get_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count); + kern_return_t act_get_state( - thread_t thread, - int flavor, - thread_state_t state, - mach_msg_type_number_t *count) + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { - if (thread == current_thread()) - return (KERN_INVALID_ARGUMENT); + if (thread == current_thread()) { + return KERN_INVALID_ARGUMENT; + } - return (thread_get_state(thread, flavor, state, count)); + return thread_get_state(thread, flavor, state, count); } -void -act_set_astbsd( - thread_t thread) +kern_return_t +act_get_state_to_user( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { - spl_t s = splsched(); - if (thread == current_thread()) { - thread_ast_set(thread, AST_BSD); - ast_propagate(thread->ast); + return KERN_INVALID_ARGUMENT; } - else { - processor_t processor; + + return thread_get_state_to_user(thread, flavor, state, count); +} + +static void +act_set_ast( + thread_t thread, + ast_t ast) +{ + spl_t s = splsched(); + + if (thread == current_thread()) { + thread_ast_set(thread, ast); + ast_propagate(thread); + } else { + processor_t processor; thread_lock(thread); - thread_ast_set(thread, AST_BSD); + thread_ast_set(thread, ast); processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) + if (processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread) { cause_ast_check(processor); + } thread_unlock(thread); } - + splx(s); } -void -act_set_apc( - thread_t thread) +/* + * set AST on thread without causing an AST check + * and without taking the thread lock + * + * If thread is not the current thread, then it may take + * up until the next context switch or quantum expiration + * on that thread for it to notice the AST. + */ +static void +act_set_ast_async(thread_t thread, + ast_t ast) { - spl_t s = splsched(); - + thread_ast_set(thread, ast); + if (thread == current_thread()) { - thread_ast_set(thread, AST_APC); - ast_propagate(thread->ast); + spl_t s = splsched(); + ast_propagate(thread); + splx(s); } - else { - processor_t processor; +} - thread_lock(thread); - thread_ast_set(thread, AST_APC); - processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->active_thread == thread ) - cause_ast_check(processor); - thread_unlock(thread); +void +act_set_astbsd( + thread_t thread) +{ + act_set_ast( thread, AST_BSD ); +} + +void +act_set_astkevent(thread_t thread, uint16_t bits) +{ + os_atomic_or(&thread->kevent_ast_bits, bits, relaxed); + + /* kevent AST shouldn't send immediate IPIs */ + act_set_ast_async(thread, AST_KEVENT); +} + +uint16_t +act_clear_astkevent(thread_t thread, uint16_t bits) +{ + /* + * avoid the atomic operation if none of the bits is set, + * which will be the common case. + */ + uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed); + if (cur & bits) { + cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed); } - - splx(s); + return cur & bits; +} + +void +act_set_ast_reset_pcs(thread_t thread) +{ + act_set_ast(thread, AST_RESET_PCS); +} + +void +act_set_kperf( + thread_t thread) +{ + /* safety check */ + if (thread != current_thread()) { + if (!ml_get_interrupts_enabled()) { + panic("unsafe act_set_kperf operation"); + } + } + + act_set_ast( thread, AST_KPERF ); +} + +#if CONFIG_MACF +void +act_set_astmacf( + thread_t thread) +{ + act_set_ast( thread, AST_MACF); +} +#endif + +void +act_set_astledger(thread_t thread) +{ + act_set_ast(thread, AST_LEDGER); +} + +/* + * The ledger AST may need to be set while already holding + * the thread lock. This routine skips sending the IPI, + * allowing us to avoid the lock hold. + * + * However, it means the targeted thread must context switch + * to recognize the ledger AST. + */ +void +act_set_astledger_async(thread_t thread) +{ + act_set_ast_async(thread, AST_LEDGER); +} + +void +act_set_io_telemetry_ast(thread_t thread) +{ + act_set_ast(thread, AST_TELEMETRY_IO); }