X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/de355530ae67247cbd0da700edb3a2a1dae884c2..e8c3f78193f1895ea514044358b93b1add9322f3:/osfmk/kern/thread_act.c?ds=sidebyside diff --git a/osfmk/kern/thread_act.c b/osfmk/kern/thread_act.c index f7e5000ec..4faa1e9b5 100644 --- a/osfmk/kern/thread_act.c +++ b/osfmk/kern/thread_act.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2016 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_FREE_COPYRIGHT@ @@ -41,79 +47,108 @@ * * Author: Bryan Ford, University of Utah CSS * - * Thread_Activation management routines + * Thread management routines */ -#include -#include +#include #include -#include -#include +#include + +#include +#include #include #include +#include #include -#include #include -#include -#include #include #include #include #include #include #include -#include #include #include #include #include -#include /*** ??? fix so this can be removed ***/ #include -#include -#include +#include +#include -/* - * Debugging printf control - */ -#if MACH_ASSERT -unsigned int watchacts = 0 /* WA_ALL */ - ; /* Do-it-yourself & patchable */ -#endif +#include + +#include + +static void act_abort(thread_t thread); + +static void thread_suspended(void *arg, wait_result_t result); +static void thread_set_apc_ast(thread_t thread); +static void thread_set_apc_ast_locked(thread_t thread); /* - * Track the number of times we need to swapin a thread to deallocate it. + * Internal routine to mark a thread as started. + * Always called with the thread mutex locked. */ -int act_free_swapin = 0; -boolean_t first_act; +void +thread_start( + thread_t thread) +{ + clear_wait(thread, THREAD_AWAKENED); + thread->started = TRUE; +} /* - * Forward declarations for functions local to this file. + * Internal routine to mark a thread as waiting + * right after it has been created. The caller + * is responsible to call wakeup()/thread_wakeup() + * or thread_terminate() to get it going. + * + * Always called with the thread mutex locked. + * + * Task and task_threads mutexes also held + * (so nobody can set the thread running before + * this point) + * + * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE + * to allow termination from this point forward. */ -kern_return_t act_abort( thread_act_t, boolean_t); -void special_handler(ReturnHandler *, thread_act_t); -kern_return_t act_set_state_locked(thread_act_t, int, - thread_state_t, - mach_msg_type_number_t); -kern_return_t act_get_state_locked(thread_act_t, int, - thread_state_t, - mach_msg_type_number_t *); -void act_set_astbsd(thread_act_t); -void act_set_apc(thread_act_t); -void act_user_to_kernel(thread_act_t); -void act_ulock_release_all(thread_act_t thr_act); - -void install_special_handler_locked(thread_act_t); +void +thread_start_in_assert_wait( + thread_t thread, + event_t event, + wait_interrupt_t interruptible) +{ + struct waitq *waitq = assert_wait_queue(event); + wait_result_t wait_result; + spl_t spl; -static void act_disable(thread_act_t); + spl = splsched(); + waitq_lock(waitq); -struct thread_activation pageout_act; + /* clear out startup condition (safe because thread not started yet) */ + thread_lock(thread); + assert(!thread->started); + assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT)); + thread->state &= ~(TH_WAIT | TH_UNINT); + thread_unlock(thread); -static zone_t thr_act_zone; + /* assert wait interruptibly forever */ + wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event), + interruptible, + TIMEOUT_URGENCY_SYS_NORMAL, + TIMEOUT_WAIT_FOREVER, + TIMEOUT_NO_LEEWAY, + thread); + assert (wait_result == THREAD_WAITING); -/* - * Thread interfaces accessed via a thread_activation: - */ + /* mark thread started while we still hold the waitq lock */ + thread_lock(thread); + thread->started = TRUE; + thread_unlock(thread); + waitq_unlock(waitq); + splx(spl); +} /* * Internal routine to terminate a thread. @@ -121,39 +156,33 @@ static zone_t thr_act_zone; */ kern_return_t thread_terminate_internal( - register thread_act_t act) + thread_t thread) { - kern_return_t result; - thread_t thread; + kern_return_t result = KERN_SUCCESS; - thread = act_lock_thread(act); + thread_mtx_lock(thread); - if (!act->active) { - act_unlock_thread(act); - return (KERN_TERMINATED); - } + if (thread->active) { + thread->active = FALSE; - act_disable(act); - result = act_abort(act, FALSE); + act_abort(thread); - /* - * Make sure this thread enters the kernel - * Must unlock the act, but leave the shuttle - * captured in this act. - */ - if (thread != current_thread()) { - act_unlock(act); + if (thread->started) + clear_wait(thread, THREAD_INTERRUPTED); + else { + thread_start(thread); + } + } + else + result = KERN_TERMINATED; - if (thread_stop(thread)) - thread_unstop(thread); - else - result = KERN_ABORTED; + if (thread->affinity_set != NULL) + thread_affinity_terminate(thread); - act_lock(act); - } + thread_mtx_unlock(thread); - clear_wait(thread, act->inited? THREAD_INTERRUPTED: THREAD_AWAKENED); - act_unlock_thread(act); + if (thread != current_thread() && result == KERN_SUCCESS) + thread_wait(thread, FALSE); return (result); } @@ -163,31 +192,29 @@ thread_terminate_internal( */ kern_return_t thread_terminate( - register thread_act_t act) + thread_t thread) { - kern_return_t result; - - if (act == THR_ACT_NULL) + if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); - if ( (act->task == kernel_task || - act->kernel_loaded ) && - act != current_act() ) + /* Kernel threads can't be terminated without their own cooperation */ + if (thread->task == kernel_task && thread != current_thread()) return (KERN_FAILURE); - result = thread_terminate_internal(act); + kern_return_t result = thread_terminate_internal(thread); /* - * If a kernel thread is terminating itself, force an AST here. - * Kernel threads don't normally pass through the AST checking - * code - and all threads finish their own termination in the - * special handler APC. + * If a kernel thread is terminating itself, force handle the APC_AST here. + * Kernel threads don't pass through the return-to-user AST checking code, + * but all threads must finish their own termination in thread_apc_ast. */ - if ( act->task == kernel_task || - act->kernel_loaded ) { - assert(act == current_act()); - ast_taken(AST_APC, FALSE); + if (thread->task == kernel_task) { + assert(thread->active == FALSE); + thread_ast_clear(thread, AST_APC); + thread_apc_ast(thread); + panic("thread_terminate"); + /* NOTREACHED */ } return (result); @@ -198,591 +225,546 @@ thread_terminate( * This is a recursive-style suspension of the thread, a count of * suspends is maintained. * - * Called with act_lock held. + * Called with thread mutex held. */ void -thread_hold( - register thread_act_t act) +thread_hold(thread_t thread) { - thread_t thread = act->thread; - - if (act->suspend_count++ == 0) { - install_special_handler(act); - if ( act->inited && - thread != THREAD_NULL && - thread->top_act == act ) - thread_wakeup_one(&act->suspend_count); + if (thread->suspend_count++ == 0) { + thread_set_apc_ast(thread); + assert(thread->suspend_parked == FALSE); } } /* - * Decrement internal suspension count for thr_act, setting thread + * Decrement internal suspension count, setting thread * runnable when count falls to zero. * - * Called with act_lock held. + * Because the wait is abortsafe, we can't be guaranteed that the thread + * is currently actually waiting even if suspend_parked is set. + * + * Called with thread mutex held. */ void -thread_release( - register thread_act_t act) +thread_release(thread_t thread) { - thread_t thread = act->thread; - - if ( act->suspend_count > 0 && - --act->suspend_count == 0 && - thread != THREAD_NULL && - thread->top_act == act ) { - if (!act->inited) { - clear_wait(thread, THREAD_AWAKENED); - act->inited = TRUE; + assertf(thread->suspend_count > 0, "thread %p over-resumed", thread); + + /* fail-safe on non-assert builds */ + if (thread->suspend_count == 0) + return; + + if (--thread->suspend_count == 0) { + if (!thread->started) { + thread_start(thread); + } else if (thread->suspend_parked) { + thread->suspend_parked = FALSE; + thread_wakeup_thread(&thread->suspend_count, thread); } - else - thread_wakeup_one(&act->suspend_count); } } kern_return_t -thread_suspend( - register thread_act_t act) +thread_suspend(thread_t thread) { - thread_t thread; + kern_return_t result = KERN_SUCCESS; - if (act == THR_ACT_NULL) + if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(act); + thread_mtx_lock(thread); - if (!act->active) { - act_unlock_thread(act); - return (KERN_TERMINATED); + if (thread->active) { + if (thread->user_stop_count++ == 0) + thread_hold(thread); + } else { + result = KERN_TERMINATED; } - if ( act->user_stop_count++ == 0 && - act->suspend_count++ == 0 ) { - install_special_handler(act); - if ( thread != current_thread() && - thread != THREAD_NULL && - thread->top_act == act ) { - assert(act->inited); - thread_wakeup_one(&act->suspend_count); - act_unlock_thread(act); - - thread_wait(thread); - } - else - act_unlock_thread(act); - } - else - act_unlock_thread(act); + thread_mtx_unlock(thread); + + if (thread != current_thread() && result == KERN_SUCCESS) + thread_wait(thread, FALSE); - return (KERN_SUCCESS); + return (result); } kern_return_t -thread_resume( - register thread_act_t act) +thread_resume(thread_t thread) { - kern_return_t result = KERN_SUCCESS; - thread_t thread; + kern_return_t result = KERN_SUCCESS; - if (act == THR_ACT_NULL) + if (thread == THREAD_NULL || thread->task == kernel_task) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(act); - - if (act->active) { - if (act->user_stop_count > 0) { - if ( --act->user_stop_count == 0 && - --act->suspend_count == 0 && - thread != THREAD_NULL && - thread->top_act == act ) { - if (!act->inited) { - clear_wait(thread, THREAD_AWAKENED); - act->inited = TRUE; - } - else - thread_wakeup_one(&act->suspend_count); - } - } - else + thread_mtx_lock(thread); + + if (thread->active) { + if (thread->user_stop_count > 0) { + if (--thread->user_stop_count == 0) + thread_release(thread); + } else { result = KERN_FAILURE; - } - else + } + } else { result = KERN_TERMINATED; + } - act_unlock_thread(act); + thread_mtx_unlock(thread); return (result); } -/* - * This routine walks toward the head of an RPC chain starting at - * a specified thread activation. An alert bit is set and a special - * handler is installed for each thread it encounters. - * - * The target thread act and thread shuttle are already locked. - */ -kern_return_t -post_alert( - register thread_act_t act, - unsigned alert_bits) -{ - panic("post_alert"); -} - /* - * thread_depress_abort: + * thread_depress_abort_from_user: * * Prematurely abort priority depression if there is one. */ kern_return_t -thread_depress_abort( - register thread_act_t thr_act) +thread_depress_abort_from_user(thread_t thread) { - register thread_t thread; - kern_return_t result; + kern_return_t result; - if (thr_act == THR_ACT_NULL) + if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(thr_act); - /* if activation is terminating, this operation is not meaningful */ - if (!thr_act->active) { - act_unlock_thread(thr_act); + thread_mtx_lock(thread); - return (KERN_TERMINATED); - } - - result = _mk_sp_thread_depress_abort(thread, FALSE); + if (thread->active) + result = thread_depress_abort(thread); + else + result = KERN_TERMINATED; - act_unlock_thread(thr_act); + thread_mtx_unlock(thread); return (result); } /* - * Indicate that the activation should run its - * special handler to detect the condition. + * Indicate that the thread should run the AST_APC callback + * to detect an abort condition. * - * Called with act_lock held. + * Called with thread mutex held. */ -kern_return_t +static void act_abort( - thread_act_t act, - boolean_t chain_break ) + thread_t thread) { - thread_t thread = act->thread; spl_t s = splsched(); - assert(thread->top_act == act); - thread_lock(thread); - if (!(thread->state & TH_ABORT)) { - thread->state |= TH_ABORT; - install_special_handler_locked(act); + + if (!(thread->sched_flags & TH_SFLAG_ABORT)) { + thread->sched_flags |= TH_SFLAG_ABORT; + thread_set_apc_ast_locked(thread); + thread_depress_abort_locked(thread); } else { - thread->state &= ~TH_ABORT_SAFELY; + thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY; } + thread_unlock(thread); splx(s); - - return (KERN_SUCCESS); } - + kern_return_t thread_abort( - register thread_act_t act) + thread_t thread) { - kern_return_t result; - thread_t thread; + kern_return_t result = KERN_SUCCESS; - if (act == THR_ACT_NULL) + if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(act); + thread_mtx_lock(thread); - if (!act->active) { - act_unlock_thread(act); - return (KERN_TERMINATED); + if (thread->active) { + act_abort(thread); + clear_wait(thread, THREAD_INTERRUPTED); } + else + result = KERN_TERMINATED; - result = act_abort(act, FALSE); - clear_wait(thread, THREAD_INTERRUPTED); - act_unlock_thread(act); + thread_mtx_unlock(thread); return (result); } kern_return_t thread_abort_safely( - thread_act_t act) + thread_t thread) { - thread_t thread; - kern_return_t ret; - spl_t s; + kern_return_t result = KERN_SUCCESS; - if ( act == THR_ACT_NULL ) + if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(act); + thread_mtx_lock(thread); - if (!act->active) { - act_unlock_thread(act); - return (KERN_TERMINATED); - } + if (thread->active) { + spl_t s = splsched(); - s = splsched(); - thread_lock(thread); - if (!thread->at_safe_point || - clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { - if (!(thread->state & TH_ABORT)) { - thread->state |= (TH_ABORT|TH_ABORT_SAFELY); - install_special_handler_locked(act); + thread_lock(thread); + if (!thread->at_safe_point || + clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) { + if (!(thread->sched_flags & TH_SFLAG_ABORT)) { + thread->sched_flags |= TH_SFLAG_ABORTED_MASK; + thread_set_apc_ast_locked(thread); + thread_depress_abort_locked(thread); + } } + thread_unlock(thread); + splx(s); + } else { + result = KERN_TERMINATED; } - thread_unlock(thread); - splx(s); - - act_unlock_thread(act); - return (KERN_SUCCESS); + thread_mtx_unlock(thread); + + return (result); } /*** backward compatibility hacks ***/ #include #include #include -#include kern_return_t thread_info( - thread_act_t thr_act, + thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, mach_msg_type_number_t *thread_info_count) { - register thread_t thread; kern_return_t result; - if (thr_act == THR_ACT_NULL) + if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(thr_act); - if (!thr_act->active) { - act_unlock_thread(thr_act); - - return (KERN_TERMINATED); - } + thread_mtx_lock(thread); - result = thread_info_shuttle(thr_act, flavor, - thread_info_out, thread_info_count); + if (thread->active || thread->inspection) + result = thread_info_internal( + thread, flavor, thread_info_out, thread_info_count); + else + result = KERN_TERMINATED; - act_unlock_thread(thr_act); + thread_mtx_unlock(thread); return (result); } -/* - * Routine: thread_get_special_port [kernel call] - * Purpose: - * Clones a send right for one of the thread's - * special ports. - * Conditions: - * Nothing locked. - * Returns: - * KERN_SUCCESS Extracted a send right. - * KERN_INVALID_ARGUMENT The thread is null. - * KERN_FAILURE The thread is dead. - * KERN_INVALID_ARGUMENT Invalid special port. - */ - -kern_return_t -thread_get_special_port( - thread_act_t thr_act, - int which, - ipc_port_t *portp) +static inline kern_return_t +thread_get_state_internal( + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count, /*IN/OUT*/ + boolean_t to_user) { - ipc_port_t *whichp; - ipc_port_t port; - thread_t thread; - -#if MACH_ASSERT - if (watchacts & WA_PORT) - printf("thread_get_special_port(thr_act=%x, which=%x port@%x=%x\n", - thr_act, which, portp, (portp ? *portp : 0)); -#endif /* MACH_ASSERT */ - - if (!thr_act) - return KERN_INVALID_ARGUMENT; - thread = act_lock_thread(thr_act); - switch (which) { - case THREAD_KERNEL_PORT: - whichp = &thr_act->ith_sself; - break; - - default: - act_unlock_thread(thr_act); - return KERN_INVALID_ARGUMENT; - } + kern_return_t result = KERN_SUCCESS; - if (!thr_act->active) { - act_unlock_thread(thr_act); - return KERN_FAILURE; - } + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); - port = ipc_port_copy_send(*whichp); - act_unlock_thread(thr_act); + thread_mtx_lock(thread); - *portp = port; - return KERN_SUCCESS; -} + if (thread->active) { + if (thread != current_thread()) { + thread_hold(thread); -/* - * Routine: thread_set_special_port [kernel call] - * Purpose: - * Changes one of the thread's special ports, - * setting it to the supplied send right. - * Conditions: - * Nothing locked. If successful, consumes - * the supplied send right. - * Returns: - * KERN_SUCCESS Changed the special port. - * KERN_INVALID_ARGUMENT The thread is null. - * KERN_FAILURE The thread is dead. - * KERN_INVALID_ARGUMENT Invalid special port. - */ + thread_mtx_unlock(thread); -kern_return_t -thread_set_special_port( - thread_act_t thr_act, - int which, - ipc_port_t port) -{ - ipc_port_t *whichp; - ipc_port_t old; - thread_t thread; - -#if MACH_ASSERT - if (watchacts & WA_PORT) - printf("thread_set_special_port(thr_act=%x,which=%x,port=%x\n", - thr_act, which, port); -#endif /* MACH_ASSERT */ - - if (thr_act == 0) - return KERN_INVALID_ARGUMENT; - - thread = act_lock_thread(thr_act); - switch (which) { - case THREAD_KERNEL_PORT: - whichp = &thr_act->ith_self; - break; - - default: - act_unlock_thread(thr_act); - return KERN_INVALID_ARGUMENT; + if (thread_stop(thread, FALSE)) { + thread_mtx_lock(thread); + result = machine_thread_get_state( + thread, flavor, state, state_count); + thread_unstop(thread); + } + else { + thread_mtx_lock(thread); + result = KERN_ABORTED; + } + + thread_release(thread); + } + else + result = machine_thread_get_state( + thread, flavor, state, state_count); + } + else if (thread->inspection) + { + result = machine_thread_get_state( + thread, flavor, state, state_count); } + else + result = KERN_TERMINATED; - if (!thr_act->active) { - act_unlock_thread(thr_act); - return KERN_FAILURE; + if (to_user && result == KERN_SUCCESS) { + result = machine_thread_state_convert_to_user(thread, flavor, state, + state_count); } - old = *whichp; - *whichp = port; - act_unlock_thread(thr_act); + thread_mtx_unlock(thread); - if (IP_VALID(old)) - ipc_port_release_send(old); - return KERN_SUCCESS; + return (result); } -/* - * thread state should always be accessible by locking the thread - * and copying it. The activation messes things up so for right - * now if it's not the top of the chain, use a special handler to - * get the information when the shuttle returns to the activation. - */ +/* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */ + +kern_return_t +thread_get_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *state_count); + kern_return_t thread_get_state( - register thread_act_t act, + thread_t thread, + int flavor, + thread_state_t state, /* pointer to OUT array */ + mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + return thread_get_state_internal(thread, flavor, state, state_count, FALSE); +} + +kern_return_t +thread_get_state_to_user( + thread_t thread, int flavor, thread_state_t state, /* pointer to OUT array */ mach_msg_type_number_t *state_count) /*IN/OUT*/ +{ + return thread_get_state_internal(thread, flavor, state, state_count, TRUE); +} + +/* + * Change thread's machine-dependent state. Called with nothing + * locked. Returns same way. + */ +static inline kern_return_t +thread_set_state_internal( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count, + boolean_t from_user) { kern_return_t result = KERN_SUCCESS; - thread_t thread; - if (act == THR_ACT_NULL || act == current_act()) + if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(act); + thread_mtx_lock(thread); - if (!act->active) { - act_unlock_thread(act); - return (KERN_TERMINATED); - } - - thread_hold(act); + if (thread->active) { + if (from_user) { + result = machine_thread_state_convert_from_user(thread, flavor, + state, state_count); + if (result != KERN_SUCCESS) { + goto out; + } + } + if (thread != current_thread()) { + thread_hold(thread); - for (;;) { - thread_t thread1; + thread_mtx_unlock(thread); - if ( thread == THREAD_NULL || - thread->top_act != act ) - break; - act_unlock_thread(act); + if (thread_stop(thread, TRUE)) { + thread_mtx_lock(thread); + result = machine_thread_set_state( + thread, flavor, state, state_count); + thread_unstop(thread); + } + else { + thread_mtx_lock(thread); + result = KERN_ABORTED; + } - if (!thread_stop(thread)) { - result = KERN_ABORTED; - (void)act_lock_thread(act); - thread = THREAD_NULL; - break; + thread_release(thread); } - - thread1 = act_lock_thread(act); - if (thread1 == thread) - break; - - thread_unstop(thread); - thread = thread1; + else + result = machine_thread_set_state( + thread, flavor, state, state_count); } + else + result = KERN_TERMINATED; - if (result == KERN_SUCCESS) - result = act_machine_get_state(act, flavor, state, state_count); - - if ( thread != THREAD_NULL && - thread->top_act == act ) - thread_unstop(thread); + if ((result == KERN_SUCCESS) && from_user) + extmod_statistics_incr_thread_set_state(thread); - thread_release(act); - act_unlock_thread(act); +out: + thread_mtx_unlock(thread); return (result); } -/* - * Change thread's machine-dependent state. Called with nothing - * locked. Returns same way. - */ +/* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */ +kern_return_t +thread_set_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count); + kern_return_t thread_set_state( - register thread_act_t act, + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t state_count) +{ + return thread_set_state_internal(thread, flavor, state, state_count, FALSE); +} + +kern_return_t +thread_set_state_from_user( + thread_t thread, int flavor, thread_state_t state, mach_msg_type_number_t state_count) +{ + return thread_set_state_internal(thread, flavor, state, state_count, TRUE); +} + +/* + * Kernel-internal "thread" interfaces used outside this file: + */ + +/* Initialize (or re-initialize) a thread state. Called from execve + * with nothing locked, returns same way. + */ +kern_return_t +thread_state_initialize( + thread_t thread) { kern_return_t result = KERN_SUCCESS; - thread_t thread; - if (act == THR_ACT_NULL || act == current_act()) + if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(act); + thread_mtx_lock(thread); + + if (thread->active) { + if (thread != current_thread()) { + thread_hold(thread); - if (!act->active) { - act_unlock_thread(act); - return (KERN_TERMINATED); + thread_mtx_unlock(thread); + + if (thread_stop(thread, TRUE)) { + thread_mtx_lock(thread); + result = machine_thread_state_initialize( thread ); + thread_unstop(thread); + } + else { + thread_mtx_lock(thread); + result = KERN_ABORTED; + } + + thread_release(thread); + } + else + result = machine_thread_state_initialize( thread ); } + else + result = KERN_TERMINATED; - thread_hold(act); + thread_mtx_unlock(thread); - for (;;) { - thread_t thread1; + return (result); +} - if ( thread == THREAD_NULL || - thread->top_act != act ) - break; - act_unlock_thread(act); - if (!thread_stop(thread)) { - result = KERN_ABORTED; - (void)act_lock_thread(act); - thread = THREAD_NULL; - break; - } +kern_return_t +thread_dup( + thread_t target) +{ + thread_t self = current_thread(); + kern_return_t result = KERN_SUCCESS; - thread1 = act_lock_thread(act); - if (thread1 == thread) - break; + if (target == THREAD_NULL || target == self) + return (KERN_INVALID_ARGUMENT); - thread_unstop(thread); - thread = thread1; - } + thread_mtx_lock(target); + + if (target->active) { + thread_hold(target); + + thread_mtx_unlock(target); - if (result == KERN_SUCCESS) - result = act_machine_set_state(act, flavor, state, state_count); + if (thread_stop(target, TRUE)) { + thread_mtx_lock(target); + result = machine_thread_dup(self, target, FALSE); + + if (self->affinity_set != AFFINITY_SET_NULL) + thread_affinity_dup(self, target); + thread_unstop(target); + } + else { + thread_mtx_lock(target); + result = KERN_ABORTED; + } - if ( thread != THREAD_NULL && - thread->top_act == act ) - thread_unstop(thread); + thread_release(target); + } + else + result = KERN_TERMINATED; - thread_release(act); - act_unlock_thread(act); + thread_mtx_unlock(target); return (result); } -/* - * Kernel-internal "thread" interfaces used outside this file: - */ kern_return_t -thread_dup( - register thread_act_t target) +thread_dup2( + thread_t source, + thread_t target) { kern_return_t result = KERN_SUCCESS; - thread_act_t self = current_act(); - thread_t thread; + uint32_t active = 0; - if (target == THR_ACT_NULL || target == self) + if (source == THREAD_NULL || target == THREAD_NULL || target == source) return (KERN_INVALID_ARGUMENT); - thread = act_lock_thread(target); + thread_mtx_lock(source); + active = source->active; + thread_mtx_unlock(source); - if (!target->active) { - act_unlock_thread(target); - return (KERN_TERMINATED); + if (!active) { + return KERN_TERMINATED; } - thread_hold(target); + thread_mtx_lock(target); - for (;;) { - thread_t thread1; + if (target->active || target->inspection) { + thread_hold(target); - if ( thread == THREAD_NULL || - thread->top_act != target ) - break; - act_unlock_thread(target); + thread_mtx_unlock(target); - if (!thread_stop(thread)) { + if (thread_stop(target, TRUE)) { + thread_mtx_lock(target); + result = machine_thread_dup(source, target, TRUE); + if (source->affinity_set != AFFINITY_SET_NULL) + thread_affinity_dup(source, target); + thread_unstop(target); + } + else { + thread_mtx_lock(target); result = KERN_ABORTED; - (void)act_lock_thread(target); - thread = THREAD_NULL; - break; } - thread1 = act_lock_thread(target); - if (thread1 == thread) - break; - - thread_unstop(thread); - thread = thread1; + thread_release(target); } + else + result = KERN_TERMINATED; - if (result == KERN_SUCCESS) - result = act_thread_dup(self, target); - - if ( thread != THREAD_NULL && - thread->top_act == target ) - thread_unstop(thread); - - thread_release(target); - act_unlock_thread(target); + thread_mtx_unlock(target); return (result); } - /* * thread_setstatus: * @@ -791,29 +773,24 @@ thread_dup( */ kern_return_t thread_setstatus( - register thread_act_t act, + thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t count) { - kern_return_t result = KERN_SUCCESS; - thread_t thread; - - thread = act_lock_thread(act); - if ( act != current_act() && - (act->suspend_count == 0 || - thread == THREAD_NULL || - (thread->state & TH_RUN) || - thread->top_act != act) ) - result = KERN_FAILURE; - - if (result == KERN_SUCCESS) - result = act_machine_set_state(act, flavor, tstate, count); + return (thread_set_state(thread, flavor, tstate, count)); +} - act_unlock_thread(act); +kern_return_t +thread_setstatus_from_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t count) +{ - return (result); + return (thread_set_state_from_user(thread, flavor, tstate, count)); } /* @@ -823,1064 +800,368 @@ thread_setstatus( */ kern_return_t thread_getstatus( - register thread_act_t act, + thread_t thread, int flavor, thread_state_t tstate, mach_msg_type_number_t *count) { - kern_return_t result = KERN_SUCCESS; - thread_t thread; - - thread = act_lock_thread(act); - - if ( act != current_act() && - (act->suspend_count == 0 || - thread == THREAD_NULL || - (thread->state & TH_RUN) || - thread->top_act != act) ) - result = KERN_FAILURE; - - if (result == KERN_SUCCESS) - result = act_machine_get_state(act, flavor, tstate, count); - - act_unlock_thread(act); - - return (result); + return (thread_get_state(thread, flavor, tstate, count)); } -/* - * Kernel-internal thread_activation interfaces used outside this file: - */ - -/* - * act_init() - Initialize activation handling code - */ -void -act_init() +kern_return_t +thread_getstatus_to_user( + thread_t thread, + int flavor, + thread_state_t tstate, + mach_msg_type_number_t *count) { - thr_act_zone = zinit( - sizeof(struct thread_activation), - ACT_MAX * sizeof(struct thread_activation), /* XXX */ - ACT_CHUNK * sizeof(struct thread_activation), - "activations"); - first_act = TRUE; - act_machine_init(); + return (thread_get_state_to_user(thread, flavor, tstate, count)); } - /* - * act_create - Create a new activation in a specific task. + * Change thread's machine-dependent userspace TSD base. + * Called with nothing locked. Returns same way. */ kern_return_t -act_create(task_t task, - thread_act_t *new_act) +thread_set_tsd_base( + thread_t thread, + mach_vm_offset_t tsd_base) { - thread_act_t thr_act; - int rc; - vm_map_t map; - - if (first_act) { - thr_act = &pageout_act; - first_act = FALSE; - } else - thr_act = (thread_act_t)zalloc(thr_act_zone); - if (thr_act == 0) - return(KERN_RESOURCE_SHORTAGE); - -#if MACH_ASSERT - if (watchacts & WA_ACT_LNK) - printf("act_create(task=%x,thr_act@%x=%x)\n", - task, new_act, thr_act); -#endif /* MACH_ASSERT */ - - /* Start by zeroing everything; then init non-zero items only */ - bzero((char *)thr_act, sizeof(*thr_act)); - - if (thr_act == &pageout_act) - thr_act->thread = &pageout_thread; - -#ifdef MACH_BSD - { - /* - * Take care of the uthread allocation - * do it early in order to make KERN_RESOURCE_SHORTAGE - * handling trivial - * uthread_alloc() will bzero the storage allocated. - */ - extern void *uthread_alloc(task_t, thread_act_t); - - thr_act->uthread = uthread_alloc(task, thr_act); - if(thr_act->uthread == 0) { - /* Put the thr_act back on the thr_act zone */ - zfree(thr_act_zone, (vm_offset_t)thr_act); - return(KERN_RESOURCE_SHORTAGE); - } - } -#endif /* MACH_BSD */ + kern_return_t result = KERN_SUCCESS; - /* - * Start with one reference for the caller and one for the - * act being alive. - */ - act_lock_init(thr_act); - thr_act->ref_count = 2; - - /* Latch onto the task. */ - thr_act->task = task; - task_reference(task); - - /* special_handler will always be last on the returnhandlers list. */ - thr_act->special_handler.next = 0; - thr_act->special_handler.handler = special_handler; - -#if MACH_PROF - thr_act->act_profiled = FALSE; - thr_act->act_profiled_own = FALSE; - thr_act->profil_buffer = NULLPROFDATA; -#endif - - /* Initialize the held_ulocks queue as empty */ - queue_init(&thr_act->held_ulocks); - - /* Inherit the profiling status of the parent task */ - act_prof_init(thr_act, task); - - ipc_thr_act_init(task, thr_act); - act_machine_create(task, thr_act); - - /* - * If thr_act created in kernel-loaded task, alter its saved - * state to so indicate - */ - if (task->kernel_loaded) { - act_user_to_kernel(thr_act); - } - - /* Cache the task's map and take a reference to it */ - map = task->map; - thr_act->map = map; - - /* Inline vm_map_reference cause we don't want to increment res_count */ - mutex_lock(&map->s_lock); - map->ref_count++; - mutex_unlock(&map->s_lock); - - *new_act = thr_act; - return KERN_SUCCESS; -} - -/* - * act_free - called when an thr_act's ref_count drops to zero. - * - * This can only happen after the activation has been reaped, and - * all other references to it have gone away. We can now release - * the last critical resources, unlink the activation from the - * task, and release the reference on the thread shuttle itself. - * - * Called with activation locked. - */ -#if MACH_ASSERT -int dangerous_bzero = 1; /* paranoia & safety */ -#endif - -void -act_free(thread_act_t thr_act) -{ - task_t task; - thread_t thr; - vm_map_t map; - unsigned int ref; - void * task_proc; - -#if MACH_ASSERT - if (watchacts & WA_EXIT) - printf("act_free(%x(%d)) thr=%x tsk=%x(%d) %sactive\n", - thr_act, thr_act->ref_count, thr_act->thread, - thr_act->task, - thr_act->task ? thr_act->task->ref_count : 0, - thr_act->active ? " " : " !"); -#endif /* MACH_ASSERT */ - - assert(!thr_act->active); - - task = thr_act->task; - task_lock(task); - - task_proc = task->bsd_info; - if (thr = thr_act->thread) { - time_value_t user_time, system_time; - - thread_read_times(thr, &user_time, &system_time); - time_value_add(&task->total_user_time, &user_time); - time_value_add(&task->total_system_time, &system_time); - - /* Unlink the thr_act from the task's thr_act list, - * so it doesn't appear in calls to task_threads and such. - * The thr_act still keeps its ref on the task, however. - */ - queue_remove(&task->thr_acts, thr_act, thread_act_t, thr_acts); - thr_act->thr_acts.next = NULL; - task->thr_act_count--; - task->res_act_count--; - task_unlock(task); - task_deallocate(task); - thread_deallocate(thr); - act_machine_destroy(thr_act); - } else { - /* - * Must have never really gotten started - * no unlinking from the task and no need - * to free the shuttle. - */ - task_unlock(task); - task_deallocate(task); - } - - act_prof_deallocate(thr_act); - ipc_thr_act_terminate(thr_act); - - /* - * Drop the cached map reference. - * Inline version of vm_map_deallocate() because we - * don't want to decrement the map's residence count here. - */ - map = thr_act->map; - mutex_lock(&map->s_lock); - ref = --map->ref_count; - mutex_unlock(&map->s_lock); - if (ref == 0) - vm_map_destroy(map); - -#ifdef MACH_BSD - { - /* - * Free uthread BEFORE the bzero. - * Not doing so will result in a leak. - */ - extern void uthread_free(task_t, void *, void *); - - void *ut = thr_act->uthread; - thr_act->uthread = 0; - uthread_free(task, ut, task_proc); - } -#endif /* MACH_BSD */ - -#if MACH_ASSERT - if (dangerous_bzero) /* dangerous if we're still using it! */ - bzero((char *)thr_act, sizeof(*thr_act)); -#endif /* MACH_ASSERT */ - /* Put the thr_act back on the thr_act zone */ - zfree(thr_act_zone, (vm_offset_t)thr_act); -} - - -/* - * act_attach - Attach an thr_act to the top of a thread ("push the stack"). - * - * The thread_shuttle must be either the current one or a brand-new one. - * Assumes the thr_act is active but not in use. - * - * Already locked: thr_act plus "appropriate" thread-related locks - * (see act_lock_thread()). - */ -void -act_attach( - thread_act_t thr_act, - thread_t thread, - unsigned init_alert_mask) -{ - thread_act_t lower; - -#if MACH_ASSERT - assert(thread == current_thread() || thread->top_act == THR_ACT_NULL); - if (watchacts & WA_ACT_LNK) - printf("act_attach(thr_act %x(%d) thread %x(%d) mask %d)\n", - thr_act, thr_act->ref_count, thread, thread->ref_count, - init_alert_mask); -#endif /* MACH_ASSERT */ - - /* - * Chain the thr_act onto the thread's thr_act stack. - * Set mask and auto-propagate alerts from below. - */ - thr_act->ref_count++; - thr_act->thread = thread; - thr_act->higher = THR_ACT_NULL; /*safety*/ - thr_act->alerts = 0; - thr_act->alert_mask = init_alert_mask; - lower = thr_act->lower = thread->top_act; - - if (lower != THR_ACT_NULL) { - lower->higher = thr_act; - thr_act->alerts = (lower->alerts & init_alert_mask); - } - - thread->top_act = thr_act; -} - -/* - * act_detach - * - * Remove the current thr_act from the top of the current thread, i.e. - * "pop the stack". Assumes already locked: thr_act plus "appropriate" - * thread-related locks (see act_lock_thread). - */ -void -act_detach( - thread_act_t cur_act) -{ - thread_t cur_thread = cur_act->thread; - -#if MACH_ASSERT - if (watchacts & (WA_EXIT|WA_ACT_LNK)) - printf("act_detach: thr_act %x(%d), thrd %x(%d) task=%x(%d)\n", - cur_act, cur_act->ref_count, - cur_thread, cur_thread->ref_count, - cur_act->task, - cur_act->task ? cur_act->task->ref_count : 0); -#endif /* MACH_ASSERT */ - - /* Unlink the thr_act from the thread's thr_act stack */ - cur_thread->top_act = cur_act->lower; - cur_act->thread = 0; - cur_act->ref_count--; - assert(cur_act->ref_count > 0); - -#if MACH_ASSERT - cur_act->lower = cur_act->higher = THR_ACT_NULL; - if (cur_thread->top_act) - cur_thread->top_act->higher = THR_ACT_NULL; -#endif /* MACH_ASSERT */ - - return; -} + if (thread == THREAD_NULL) + return (KERN_INVALID_ARGUMENT); + thread_mtx_lock(thread); -/* - * Synchronize a thread operation with migration. - * Called with nothing locked. - * Returns with thr_act locked. - */ -thread_t -act_lock_thread( - thread_act_t thr_act) -{ + if (thread->active) { + if (thread != current_thread()) { + thread_hold(thread); - /* - * JMM - We have moved away from explicit RPC locks - * and towards a generic migration approach. The wait - * queue lock will be the point of synchronization for - * the shuttle linkage when this is rolled out. Until - * then, just lock the act. - */ - act_lock(thr_act); - return (thr_act->thread); -} + thread_mtx_unlock(thread); -/* - * Unsynchronize with migration (i.e., undo an act_lock_thread() call). - * Called with thr_act locked, plus thread locks held that are - * "correct" for thr_act's state. Returns with nothing locked. - */ -void -act_unlock_thread(thread_act_t thr_act) -{ - act_unlock(thr_act); -} + if (thread_stop(thread, TRUE)) { + thread_mtx_lock(thread); + result = machine_thread_set_tsd_base(thread, tsd_base); + thread_unstop(thread); + } + else { + thread_mtx_lock(thread); + result = KERN_ABORTED; + } -/* - * Synchronize with migration given a pointer to a shuttle (instead of an - * activation). Called with nothing locked; returns with all - * "appropriate" thread-related locks held (see act_lock_thread()). - */ -thread_act_t -thread_lock_act( - thread_t thread) -{ - thread_act_t thr_act; - - while (1) { - thr_act = thread->top_act; - if (!thr_act) - break; - if (!act_lock_try(thr_act)) { - mutex_pause(); - continue; + thread_release(thread); } - break; + else + result = machine_thread_set_tsd_base(thread, tsd_base); } - return (thr_act); -} + else + result = KERN_TERMINATED; -/* - * Unsynchronize with an activation starting from a pointer to - * a shuttle. - */ -void -thread_unlock_act( - thread_t thread) -{ - thread_act_t thr_act; + thread_mtx_unlock(thread); - if (thr_act = thread->top_act) { - act_unlock(thr_act); - } + return (result); } /* - * switch_act + * thread_set_apc_ast: * - * If a new activation is given, switch to it. If not, - * switch to the lower activation (pop). Returns the old - * activation. This is for migration support. - */ -thread_act_t -switch_act( - thread_act_t act) -{ - thread_t thread; - thread_act_t old, new; - unsigned cpu; - spl_t spl; - - - disable_preemption(); - - cpu = cpu_number(); - thread = current_thread(); - - /* - * Find the old and new activation for switch. - */ - old = thread->top_act; - - if (act) { - new = act; - new->thread = thread; - } - else { - new = old->lower; - } - - assert(new != THR_ACT_NULL); - assert(cpu_to_processor(cpu)->cpu_data->active_thread == thread); - active_kloaded[cpu] = (new->kernel_loaded) ? new : 0; - - /* This is where all the work happens */ - machine_switch_act(thread, old, new, cpu); - - /* - * Push or pop an activation on the chain. - */ - if (act) { - act_attach(new, thread, 0); - } - else { - act_detach(old); - } - - enable_preemption(); - - return(old); -} - -/* - * install_special_handler - * Install the special returnhandler that handles suspension and - * termination, if it hasn't been installed already. + * Register the AST_APC callback that handles suspension and + * termination, if it hasn't been installed already. * - * Already locked: RPC-related locks for thr_act, but not - * scheduling lock (thread_lock()) of the associated thread. + * Called with the thread mutex held. */ -void -install_special_handler( - thread_act_t thr_act) +static void +thread_set_apc_ast(thread_t thread) { - spl_t spl; - thread_t thread = thr_act->thread; - -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: install_special_hdlr(%x)\n",current_act(),thr_act); -#endif /* MACH_ASSERT */ + spl_t s = splsched(); - spl = splsched(); thread_lock(thread); - install_special_handler_locked(thr_act); + thread_set_apc_ast_locked(thread); thread_unlock(thread); - splx(spl); + + splx(s); } /* - * install_special_handler_locked - * Do the work of installing the special_handler. + * thread_set_apc_ast_locked: + * + * Do the work of registering for the AST_APC callback. * - * Already locked: RPC-related locks for thr_act, plus the - * scheduling lock (thread_lock()) of the associated thread. + * Called with the thread mutex and scheduling lock held. */ -void -install_special_handler_locked( - thread_act_t act) +static void +thread_set_apc_ast_locked(thread_t thread) { - thread_t thread = act->thread; - ReturnHandler **rh; - - /* The work handler must always be the last ReturnHandler on the list, - because it can do tricky things like detach the thr_act. */ - for (rh = &act->handlers; *rh; rh = &(*rh)->next) - continue; - if (rh != &act->special_handler.next) - *rh = &act->special_handler; - - if (act == thread->top_act) { - /* - * Temporarily undepress, so target has - * a chance to do locking required to - * block itself in special_handler(). - */ - if (thread->sched_mode & TH_MODE_ISDEPRESSED) - compute_priority(thread, TRUE); - } + thread_ast_set(thread, AST_APC); - thread_ast_set(act, AST_APC); - if (act == current_act()) - ast_propagate(act->ast); - else { - processor_t processor = thread->last_processor; + if (thread == current_thread()) { + ast_propagate(thread); + } else { + processor_t processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) + if (processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread) { cause_ast_check(processor); + } } } -kern_return_t -thread_apc_set( - thread_act_t act, - thread_apc_handler_t apc) -{ - extern thread_apc_handler_t bsd_ast; - - assert(apc == bsd_ast); - return (KERN_FAILURE); -} - -kern_return_t -thread_apc_clear( - thread_act_t act, - thread_apc_handler_t apc) -{ - extern thread_apc_handler_t bsd_ast; - - assert(apc == bsd_ast); - return (KERN_FAILURE); -} - /* * Activation control support routines internal to this file: - */ - -/* - * act_execute_returnhandlers() - does just what the name says * - * This is called by system-dependent code when it detects that - * thr_act->handlers is non-null while returning into user mode. */ -void -act_execute_returnhandlers(void) -{ - thread_act_t act = current_act(); - -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("execute_rtn_hdlrs: act=%x\n", act); -#endif /* MACH_ASSERT */ - - thread_ast_clear(act, AST_APC); - spllo(); - - for (;;) { - ReturnHandler *rh; - thread_t thread = act_lock_thread(act); - - (void)splsched(); - thread_lock(thread); - rh = act->handlers; - if (!rh) { - thread_unlock(thread); - spllo(); - act_unlock_thread(act); - return; - } - act->handlers = rh->next; - thread_unlock(thread); - spllo(); - act_unlock_thread(act); - -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf( (rh == &act->special_handler) ? - "\tspecial_handler\n" : "\thandler=%x\n", rh->handler); -#endif /* MACH_ASSERT */ - - /* Execute it */ - (*rh->handler)(rh, act); - } -} /* - * special_handler_continue + * thread_suspended * - * Continuation routine for the special handler blocks. It checks + * Continuation routine for thread suspension. It checks * to see whether there has been any new suspensions. If so, it - * installs the special handler again. Otherwise, it checks to see - * if the current depression needs to be re-instated (it may have - * been temporarily removed in order to get to this point in a hurry). + * installs the AST_APC handler again. */ -void -special_handler_continue(void) +__attribute__((noreturn)) +static void +thread_suspended(__unused void *parameter, wait_result_t result) { - thread_act_t self = current_act(); + thread_t thread = current_thread(); - if (self->suspend_count > 0) - install_special_handler(self); - else { - thread_t thread = self->thread; - spl_t s = splsched(); + thread_mtx_lock(thread); - thread_lock(thread); - if (thread->sched_mode & TH_MODE_ISDEPRESSED) { - processor_t myprocessor = thread->last_processor; + if (result == THREAD_INTERRUPTED) + thread->suspend_parked = FALSE; + else + assert(thread->suspend_parked == FALSE); - thread->sched_pri = DEPRESSPRI; - myprocessor->current_pri = thread->sched_pri; - thread->sched_mode &= ~TH_MODE_PREEMPT; - } - thread_unlock(thread); - splx(s); - } + if (thread->suspend_count > 0) + thread_set_apc_ast(thread); + + thread_mtx_unlock(thread); thread_exception_return(); /*NOTREACHED*/ } /* - * special_handler - handles suspension, termination. Called - * with nothing locked. Returns (if it returns) the same way. + * thread_apc_ast - handles AST_APC and drives thread suspension and termination. + * Called with nothing locked. Returns (if it returns) the same way. */ void -special_handler( - ReturnHandler *rh, - thread_act_t self) +thread_apc_ast(thread_t thread) { - thread_t thread = act_lock_thread(self); - spl_t s; + thread_mtx_lock(thread); - assert(thread != THREAD_NULL); + assert(thread->suspend_parked == FALSE); - s = splsched(); + spl_t s = splsched(); thread_lock(thread); - thread->state &= ~(TH_ABORT|TH_ABORT_SAFELY); /* clear any aborts */ - thread_unlock(thread); - splx(s); - /* - * If someone has killed this invocation, - * invoke the return path with a terminated exception. - */ - if (!self->active) { - act_unlock_thread(self); - act_machine_return(KERN_TERMINATED); - } + /* TH_SFLAG_POLLDEPRESS is OK to have here */ + assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0); - /* - * If we're suspended, go to sleep and wait for someone to wake us up. - */ - if (self->suspend_count > 0) { - if (self->handlers == NULL) { - assert_wait(&self->suspend_count, THREAD_ABORTSAFE); - act_unlock_thread(self); - thread_block(special_handler_continue); - /* NOTREACHED */ - } + thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK; + thread_unlock(thread); + splx(s); - act_unlock_thread(self); + if (!thread->active) { + /* Thread is ready to terminate, time to tear it down */ + thread_mtx_unlock(thread); - special_handler_continue(); + thread_terminate_self(); /*NOTREACHED*/ } - act_unlock_thread(self); -} - -/* - * Update activation that belongs to a task created via kernel_task_create(). - */ -void -act_user_to_kernel( - thread_act_t thr_act) -{ - pcb_user_to_kernel(thr_act); - thr_act->kernel_loading = TRUE; -} - -/* - * Already locked: activation (shuttle frozen within) - * - * Mark an activation inactive, and prepare it to terminate - * itself. - */ -static void -act_disable( - thread_act_t thr_act) -{ + /* If we're suspended, go to sleep and wait for someone to wake us up. */ + if (thread->suspend_count > 0) { + thread->suspend_parked = TRUE; + assert_wait(&thread->suspend_count, + THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER); + thread_mtx_unlock(thread); -#if MACH_ASSERT - if (watchacts & WA_EXIT) { - printf("act_%x: act_disable_tl(thr_act=%x(%d))%sactive", - current_act(), thr_act, thr_act->ref_count, - (thr_act->active ? " " : " !")); - printf("\n"); - (void) dump_act(thr_act); + thread_block(thread_suspended); + /*NOTREACHED*/ } -#endif /* MACH_ASSERT */ - - thr_act->active = 0; - /* Drop the thr_act reference taken for being active. - * (There is still at least one reference left: - * the one we were passed.) - * Inline the deallocate because thr_act is locked. - */ - act_locked_act_deallocate(thr_act); + thread_mtx_unlock(thread); } -/* - * act_alert - Register an alert from this activation. - * - * Each set bit is propagated upward from (but not including) this activation, - * until the top of the chain is reached or the bit is masked. - */ +/* Prototype, see justification above */ kern_return_t -act_alert(thread_act_t thr_act, unsigned alerts) -{ - thread_t thread = act_lock_thread(thr_act); - -#if MACH_ASSERT - if (watchacts & WA_ACT_LNK) - printf("act_alert %x: %x\n", thr_act, alerts); -#endif /* MACH_ASSERT */ - - if (thread) { - thread_act_t act_up = thr_act; - while ((alerts) && (act_up != thread->top_act)) { - act_up = act_up->higher; - alerts &= act_up->alert_mask; - act_up->alerts |= alerts; - } - /* - * XXXX If we reach the top, and it is blocked in glue - * code, do something to kick it. XXXX - */ - } - act_unlock_thread(thr_act); - - return KERN_SUCCESS; -} - -kern_return_t act_alert_mask(thread_act_t thr_act, unsigned alert_mask) -{ - panic("act_alert_mask NOT YET IMPLEMENTED\n"); - return KERN_SUCCESS; -} - -typedef struct GetSetState { - struct ReturnHandler rh; - int flavor; - void *state; - int *pcount; - int result; -} GetSetState; - -/* Local Forward decls */ -kern_return_t get_set_state( - thread_act_t thr_act, int flavor, - thread_state_t state, int *pcount, - void (*handler)(ReturnHandler *rh, thread_act_t thr_act)); -void get_state_handler(ReturnHandler *rh, thread_act_t thr_act); -void set_state_handler(ReturnHandler *rh, thread_act_t thr_act); +act_set_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count); -/* - * get_set_state(thr_act ...) - * - * General code to install g/set_state handler. - * Called with thr_act's act_lock() and "appropriate" - * thread-related locks held. (See act_lock_thread().) - */ kern_return_t -get_set_state( - thread_act_t act, - int flavor, - thread_state_t state, - int *pcount, - void (*handler)( - ReturnHandler *rh, - thread_act_t act)) +act_set_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) { - GetSetState gss; - - /* Initialize a small parameter structure */ - gss.rh.handler = handler; - gss.flavor = flavor; - gss.state = state; - gss.pcount = pcount; - gss.result = KERN_ABORTED; /* iff wait below is interrupted */ - - /* Add it to the thr_act's return handler list */ - gss.rh.next = act->handlers; - act->handlers = &gss.rh; - - act_set_apc(act); - -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) { - printf("act_%x: get_set_state(act=%x flv=%x state=%x ptr@%x=%x)", - current_act(), act, flavor, state, - pcount, (pcount ? *pcount : 0)); - printf((handler == get_state_handler ? "get_state_hdlr\n" : - (handler == set_state_handler ? "set_state_hdlr\n" : - "hndler=%x\n")), handler); - } -#endif /* MACH_ASSERT */ - - assert(act->thread); - assert(act != current_act()); - - for (;;) { - wait_result_t result; - - if ( act->inited && - act->thread->top_act == act ) - thread_wakeup_one(&act->suspend_count); - - /* - * Wait must be interruptible to avoid deadlock (e.g.) with - * task_suspend() when caller and target of get_set_state() - * are in same task. - */ - result = assert_wait(&gss, THREAD_ABORTSAFE); - act_unlock_thread(act); - - if (result == THREAD_WAITING) - result = thread_block(THREAD_CONTINUE_NULL); - - assert(result != THREAD_WAITING); - - if (gss.result != KERN_ABORTED) { - assert(result != THREAD_INTERRUPTED); - break; - } - - /* JMM - What about other aborts (like BSD signals)? */ - if (current_act()->handlers) - act_execute_returnhandlers(); - - act_lock_thread(act); - } - -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: get_set_state returns %x\n", - current_act(), gss.result); -#endif /* MACH_ASSERT */ + if (thread == current_thread()) + return (KERN_INVALID_ARGUMENT); - return (gss.result); + return (thread_set_state(thread, flavor, state, count)); + } -void -set_state_handler(ReturnHandler *rh, thread_act_t thr_act) +kern_return_t +act_set_state_from_user( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t count) { - GetSetState *gss = (GetSetState*)rh; - -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: set_state_handler(rh=%x,thr_act=%x)\n", - current_act(), rh, thr_act); -#endif /* MACH_ASSERT */ + if (thread == current_thread()) + return (KERN_INVALID_ARGUMENT); - gss->result = act_machine_set_state(thr_act, gss->flavor, - gss->state, *gss->pcount); - thread_wakeup((event_t)gss); -} - -void -get_state_handler(ReturnHandler *rh, thread_act_t thr_act) -{ - GetSetState *gss = (GetSetState*)rh; - -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: get_state_handler(rh=%x,thr_act=%x)\n", - current_act(), rh, thr_act); -#endif /* MACH_ASSERT */ - - gss->result = act_machine_get_state(thr_act, gss->flavor, - gss->state, - (mach_msg_type_number_t *) gss->pcount); - thread_wakeup((event_t)gss); + return (thread_set_state_from_user(thread, flavor, state, count)); + } +/* Prototype, see justification above */ kern_return_t -act_get_state_locked(thread_act_t thr_act, int flavor, thread_state_t state, - mach_msg_type_number_t *pcount) -{ -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: act_get_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n", - current_act(), thr_act, flavor, state, pcount, - (pcount? *pcount : 0)); -#endif /* MACH_ASSERT */ - - return(get_set_state(thr_act, flavor, state, (int*)pcount, get_state_handler)); -} +act_get_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count); kern_return_t -act_set_state_locked(thread_act_t thr_act, int flavor, thread_state_t state, - mach_msg_type_number_t count) +act_get_state( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { -#if MACH_ASSERT - if (watchacts & WA_ACT_HDLR) - printf("act_%x: act_set_state_L(thr_act=%x,flav=%x,st=%x,pcnt@%x=%x)\n", - current_act(), thr_act, flavor, state, count, count); -#endif /* MACH_ASSERT */ + if (thread == current_thread()) + return (KERN_INVALID_ARGUMENT); - return(get_set_state(thr_act, flavor, state, (int*)&count, set_state_handler)); + return (thread_get_state(thread, flavor, state, count)); } kern_return_t -act_set_state(thread_act_t thr_act, int flavor, thread_state_t state, - mach_msg_type_number_t count) +act_get_state_to_user( + thread_t thread, + int flavor, + thread_state_t state, + mach_msg_type_number_t *count) { - if (thr_act == THR_ACT_NULL || thr_act == current_act()) - return(KERN_INVALID_ARGUMENT); + if (thread == current_thread()) + return (KERN_INVALID_ARGUMENT); - act_lock_thread(thr_act); - return(act_set_state_locked(thr_act, flavor, state, count)); - + return (thread_get_state_to_user(thread, flavor, state, count)); } -kern_return_t -act_get_state(thread_act_t thr_act, int flavor, thread_state_t state, - mach_msg_type_number_t *pcount) +static void +act_set_ast( + thread_t thread, + ast_t ast) { - if (thr_act == THR_ACT_NULL || thr_act == current_act()) - return(KERN_INVALID_ARGUMENT); - - act_lock_thread(thr_act); - return(act_get_state_locked(thr_act, flavor, state, pcount)); -} + spl_t s = splsched(); -void -act_set_astbsd( - thread_act_t act) -{ - spl_t s = splsched(); - - if (act == current_act()) { - thread_ast_set(act, AST_BSD); - ast_propagate(act->ast); - } - else { - thread_t thread = act->thread; - processor_t processor; + if (thread == current_thread()) { + thread_ast_set(thread, ast); + ast_propagate(thread); + } else { + processor_t processor; thread_lock(thread); - thread_ast_set(act, AST_BSD); + thread_ast_set(thread, ast); processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) + if ( processor != PROCESSOR_NULL && + processor->state == PROCESSOR_RUNNING && + processor->active_thread == thread ) cause_ast_check(processor); thread_unlock(thread); } - + splx(s); } -void -act_set_apc( - thread_act_t act) +/* + * set AST on thread without causing an AST check + * and without taking the thread lock + * + * If thread is not the current thread, then it may take + * up until the next context switch or quantum expiration + * on that thread for it to notice the AST. + */ +static void +act_set_ast_async(thread_t thread, + ast_t ast) { - spl_t s = splsched(); - - if (act == current_act()) { - thread_ast_set(act, AST_APC); - ast_propagate(act->ast); - } - else { - thread_t thread = act->thread; - processor_t processor; + thread_ast_set(thread, ast); - thread_lock(thread); - thread_ast_set(act, AST_APC); - processor = thread->last_processor; - if ( processor != PROCESSOR_NULL && - processor->state == PROCESSOR_RUNNING && - processor->cpu_data->active_thread == thread ) - cause_ast_check(processor); - thread_unlock(thread); + if (thread == current_thread()) { + spl_t s = splsched(); + ast_propagate(thread); + splx(s); } - - splx(s); } void -act_ulock_release_all(thread_act_t thr_act) +act_set_astbsd( + thread_t thread) { - ulock_t ulock; + act_set_ast( thread, AST_BSD ); +} - while (!queue_empty(&thr_act->held_ulocks)) { - ulock = (ulock_t) queue_first(&thr_act->held_ulocks); - (void) lock_make_unstable(ulock, thr_act); - (void) lock_release_internal(ulock, thr_act); - } +void +act_set_astkevent(thread_t thread, uint16_t bits) +{ + atomic_fetch_or(&thread->kevent_ast_bits, bits); + + /* kevent AST shouldn't send immediate IPIs */ + act_set_ast_async(thread, AST_KEVENT); } -/* - * Provide routines (for export to other components) of things that - * are implemented as macros insternally. - */ -thread_act_t -thread_self(void) +void +act_set_kperf( + thread_t thread) { - thread_act_t self = current_act_fast(); + /* safety check */ + if (thread != current_thread()) + if( !ml_get_interrupts_enabled() ) + panic("unsafe act_set_kperf operation"); - act_reference(self); - return self; + act_set_ast( thread, AST_KPERF ); } -thread_act_t -mach_thread_self(void) +#if CONFIG_MACF +void +act_set_astmacf( + thread_t thread) { - thread_act_t self = current_act_fast(); + act_set_ast( thread, AST_MACF); +} +#endif - act_reference(self); - return self; +void +act_set_astledger(thread_t thread) +{ + act_set_ast(thread, AST_LEDGER); } -#undef act_reference +/* + * The ledger AST may need to be set while already holding + * the thread lock. This routine skips sending the IPI, + * allowing us to avoid the lock hold. + * + * However, it means the targeted thread must context switch + * to recognize the ledger AST. + */ void -act_reference( - thread_act_t thr_act) +act_set_astledger_async(thread_t thread) { - act_reference_fast(thr_act); + act_set_ast_async(thread, AST_LEDGER); } -#undef act_deallocate void -act_deallocate( - thread_act_t thr_act) +act_set_io_telemetry_ast(thread_t thread) { - act_deallocate_fast(thr_act); + act_set_ast(thread, AST_TELEMETRY_IO); } +