X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..c7d2c2c6ee645e10cbccdd01c6191873ec77239d:/osfmk/kern/ast.c diff --git a/osfmk/kern/ast.c b/osfmk/kern/ast.c index f454fe9a3..f2ceba343 100644 --- a/osfmk/kern/ast.c +++ b/osfmk/kern/ast.c @@ -1,23 +1,29 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2012 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -59,243 +65,248 @@ * */ -#include -#include -#include -#include - #include #include #include #include #include -#include #include #include -#include -#include #include #include +#include +#if CONFIG_TELEMETRY +#include +#endif +#include +#include #include -#if TASK_SWAPPER -#include -#endif /* TASK_SWAPPER */ +#include // for CHUD AST hook +#include +#include // for MACF AST hook + +volatile perfASTCallback perfASTHook; -volatile ast_t need_ast[NCPUS]; void ast_init(void) { -#ifndef MACHINE_AST - register int i; - - for (i=0; itop_act; -#ifdef MACH_BSD - extern void bsd_ast(thread_act_t); - extern void bsdinit_task(void); -#endif - - mp_disable_preemption(); - mycpu = cpu_number(); - reasons = need_ast[mycpu] & mask; - need_ast[mycpu] &= ~reasons; - mp_enable_preemption(); - - ml_set_interrupts_enabled(interrupt); + boolean_t preempt_trap = (reasons == AST_PREEMPTION); + ast_t *myast = ast_pending(); + thread_t thread = current_thread(); + perfASTCallback perf_hook = perfASTHook; /* - * No ast for an idle thread + * CHUD hook - all threads including idle processor threads */ - if (self->state & TH_IDLE) - return; + if (perf_hook) { + if (*myast & AST_CHUD_ALL) { + (*perf_hook)(reasons, myast); + + if (*myast == AST_NONE) + return; + } + } + else + *myast &= ~AST_CHUD_ALL; + + reasons &= *myast; + *myast &= ~reasons; /* - * Check for preemption + * Handle ASTs for all threads + * except idle processor threads. */ - if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) { - reasons &= ~AST_URGENT; - if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) { - mp_disable_preemption(); - mypr = current_processor(); - if (csw_needed(self, mypr)) { - reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM); + if (!(thread->state & TH_IDLE)) { + /* + * Check for urgent preemption. + */ + if ( (reasons & AST_URGENT) && + waitq_wait_possible(thread) ) { + if (reasons & AST_PREEMPT) { + counter(c_ast_taken_block++); + thread_block_reason(THREAD_CONTINUE_NULL, NULL, + reasons & AST_PREEMPTION); } - mp_enable_preemption(); - } - if (reasons & (AST_BLOCK | AST_QUANTUM)) { - counter(c_ast_taken_block++); - thread_block_reason((void (*)(void))0, - (reasons & (AST_BLOCK | AST_QUANTUM))); + + reasons &= ~AST_PREEMPTION; } - if (reasons == 0) - return; - } + + /* + * The kernel preempt traps + * skip all other ASTs. + */ + if (!preempt_trap) { + ml_set_interrupts_enabled(enable); #ifdef MACH_BSD - /* - * Check for BSD hardcoded hooks - */ - if (reasons & AST_BSD) { - thread_ast_clear(act,AST_BSD); - bsd_ast(act); - } - if (reasons & AST_BSD_INIT) { - thread_ast_clear(act,AST_BSD_INIT); - bsdinit_task(); - } + /* + * Handle BSD hook. + */ + if (reasons & AST_BSD) { + thread_ast_clear(thread, AST_BSD); + bsd_ast(thread); + } #endif +#if CONFIG_MACF + /* + * Handle MACF hook. + */ + if (reasons & AST_MACF) { + thread_ast_clear(thread, AST_MACF); + mac_thread_userret(thread); + } +#endif + /* + * Thread APC hook. + */ + if (reasons & AST_APC) { + thread_ast_clear(thread, AST_APC); + special_handler(thread); + } + + if (reasons & AST_GUARD) { + thread_ast_clear(thread, AST_GUARD); + guard_ast(thread); + } + + if (reasons & AST_LEDGER) { + thread_ast_clear(thread, AST_LEDGER); + ledger_ast(thread); + } -#if TASK_SWAPPER - /* must be before AST_APC */ - if (reasons & AST_SWAPOUT) { - spl_t s; - swapout_ast(); - s = splsched(); - mp_disable_preemption(); - mycpu = cpu_number(); - if (need_ast[mycpu] & AST_APC) { - /* generated in swapout_ast() to get suspended */ - reasons |= AST_APC; /* process now ... */ - need_ast[mycpu] &= ~AST_APC; /* ... and not later */ - } - mp_enable_preemption(); - splx(s); - } -#endif /* TASK_SWAPPER */ + /* + * Kernel Profiling Hook + */ + if (reasons & AST_KPERF) { + thread_ast_clear(thread, AST_KPERF); + chudxnu_thread_ast(thread); + } - /* - * migration APC hook - */ - if (reasons & AST_APC) { - act_execute_returnhandlers(); - } +#if CONFIG_TELEMETRY + if (reasons & AST_TELEMETRY_ALL) { + boolean_t interrupted_userspace = FALSE; + boolean_t is_windowed = FALSE; - /* - * thread_block needs to know if the thread's quantum - * expired so the thread can be put on the tail of - * run queue. One of the previous actions might well - * have woken a high-priority thread, so we also use - * csw_needed check. - */ - reasons &= (AST_BLOCK | AST_QUANTUM); - if (reasons == 0) { - mp_disable_preemption(); - mypr = current_processor(); - if (csw_needed(self, mypr)) { - reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM); + assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */ + interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE; + is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE); + thread_ast_clear(thread, AST_TELEMETRY_ALL); + telemetry_ast(thread, interrupted_userspace, is_windowed); + } +#endif + + ml_set_interrupts_enabled(FALSE); + +#if CONFIG_SCHED_SFI + if (reasons & AST_SFI) { + sfi_ast(thread); + } +#endif + + /* + * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set. + */ + thread_lock(thread); + if (reasons & AST_PREEMPT) + reasons = csw_check(current_processor(), reasons & AST_QUANTUM); + thread_unlock(thread); + + assert(waitq_wait_possible(thread)); + + if (reasons & AST_PREEMPT) { + counter(c_ast_taken_block++); + thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION); + } } - mp_enable_preemption(); - } - if ((reasons & (AST_BLOCK | AST_QUANTUM)) && - (wait_queue_assert_possible(self))) { - counter(c_ast_taken_block++); - /* - * JMM - SMP machines don't like blocking at a continuation - * here - why not? Could be a combination of set_state and - * suspension on the thread_create_running API? - * - * thread_block_reason(thread_exception_return, reasons); - */ - thread_block_reason((void (*)(void))0, reasons); } + + ml_set_interrupts_enabled(enable); } +/* + * Called at splsched. + */ void -ast_check(void) +ast_check( + processor_t processor) { - register int mycpu; - register processor_t myprocessor; - register thread_t thread = current_thread(); - spl_t s = splsched(); + thread_t thread = processor->active_thread; - mp_disable_preemption(); - mycpu = cpu_number(); + if (processor->state == PROCESSOR_RUNNING || + processor->state == PROCESSOR_SHUTDOWN) { + ast_t preempt; - /* - * Check processor state for ast conditions. - */ - myprocessor = cpu_to_processor(mycpu); - switch(myprocessor->state) { - case PROCESSOR_OFF_LINE: - case PROCESSOR_IDLE: - case PROCESSOR_DISPATCHING: /* - * No ast. - */ - break; - -#if NCPUS > 1 - case PROCESSOR_ASSIGN: - /* - * Need ast to force action thread onto processor. - * - * XXX Should check if action thread is already there. + * Propagate thread ast to processor. */ - ast_on(AST_BLOCK); - break; -#endif /* NCPUS > 1 */ + pal_ast_check(thread); - case PROCESSOR_RUNNING: - case PROCESSOR_SHUTDOWN: - /* - * Propagate thread ast to processor. If we already - * need an ast, don't look for more reasons. - */ - ast_propagate(current_act()->ast); - if (ast_needed(mycpu)) - break; + ast_propagate(thread->ast); /* * Context switch check. */ - if (csw_needed(thread, myprocessor)) { - ast_on((myprocessor->first_quantum ? - AST_BLOCK : AST_QUANTUM)); - } - break; + thread_lock(thread); - default: - panic("ast_check: Bad processor state"); + processor->current_pri = thread->sched_pri; + processor->current_thmode = thread->sched_mode; + processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread); + + if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) + ast_on(preempt); + + thread_unlock(thread); } - mp_enable_preemption(); - splx(s); } /* - * JMM - Temporary exports to other components + * Set AST flags on current processor + * Called at splsched */ -#undef ast_on -#undef ast_off +void +ast_on(ast_t reasons) +{ + ast_t *pending_ast = ast_pending(); + *pending_ast |= reasons; +} + +/* + * Clear AST flags on current processor + * Called at splsched + */ void -ast_on(ast_t reason) +ast_off(ast_t reasons) { - boolean_t intr; + ast_t *pending_ast = ast_pending(); - intr = ml_set_interrupts_enabled(FALSE); - ast_on_fast(reason); - (void *)ml_set_interrupts_enabled(intr); + *pending_ast &= ~reasons; } +/* + * Re-set current processor's per-thread AST flags to those set on thread + * Called at splsched + */ void -ast_off(ast_t reason) +ast_context(thread_t thread) { - ast_off_fast(reason); + ast_t *pending_ast = ast_pending(); + + *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast); } + +