X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/1c79356b52d46aa6b508fb032f5ae709b1f2897b..6601e61aa18bf4f09af135ff61fc7f4771d23b06:/osfmk/kern/ast.c diff --git a/osfmk/kern/ast.c b/osfmk/kern/ast.c index f454fe9a3..ed95755ec 100644 --- a/osfmk/kern/ast.c +++ b/osfmk/kern/ast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. * * @APPLE_LICENSE_HEADER_START@ * @@ -60,242 +60,151 @@ */ #include -#include #include -#include #include #include #include #include #include -#include #include #include -#include -#include #include #include +#include #include -#if TASK_SWAPPER -#include -#endif /* TASK_SWAPPER */ -volatile ast_t need_ast[NCPUS]; +#ifdef __ppc__ +#include // for CHUD AST hook +#endif void ast_init(void) { -#ifndef MACHINE_AST - register int i; - - for (i=0; itop_act; -#ifdef MACH_BSD - extern void bsd_ast(thread_act_t); - extern void bsdinit_task(void); -#endif - - mp_disable_preemption(); - mycpu = cpu_number(); - reasons = need_ast[mycpu] & mask; - need_ast[mycpu] &= ~reasons; - mp_enable_preemption(); - - ml_set_interrupts_enabled(interrupt); - - /* - * No ast for an idle thread - */ - if (self->state & TH_IDLE) - return; + boolean_t preempt_trap = (reasons == AST_PREEMPTION); + ast_t *myast = ast_pending(); + thread_t thread = current_thread(); +#ifdef __ppc__ /* - * Check for preemption + * CHUD hook - all threads including idle processor threads */ - if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) { - reasons &= ~AST_URGENT; - if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) { - mp_disable_preemption(); - mypr = current_processor(); - if (csw_needed(self, mypr)) { - reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM); + if(perfASTHook) { + if(*myast & AST_PPC_CHUD_ALL) { + perfASTHook(0, NULL, 0, 0); + + if(*myast == AST_NONE) { + return; // nothing left to do } - mp_enable_preemption(); } - if (reasons & (AST_BLOCK | AST_QUANTUM)) { - counter(c_ast_taken_block++); - thread_block_reason((void (*)(void))0, - (reasons & (AST_BLOCK | AST_QUANTUM))); - } - if (reasons == 0) - return; - } - -#ifdef MACH_BSD - /* - * Check for BSD hardcoded hooks - */ - if (reasons & AST_BSD) { - thread_ast_clear(act,AST_BSD); - bsd_ast(act); - } - if (reasons & AST_BSD_INIT) { - thread_ast_clear(act,AST_BSD_INIT); - bsdinit_task(); + } else { + *myast &= ~AST_PPC_CHUD_ALL; } #endif -#if TASK_SWAPPER - /* must be before AST_APC */ - if (reasons & AST_SWAPOUT) { - spl_t s; - swapout_ast(); - s = splsched(); - mp_disable_preemption(); - mycpu = cpu_number(); - if (need_ast[mycpu] & AST_APC) { - /* generated in swapout_ast() to get suspended */ - reasons |= AST_APC; /* process now ... */ - need_ast[mycpu] &= ~AST_APC; /* ... and not later */ - } - mp_enable_preemption(); - splx(s); - } -#endif /* TASK_SWAPPER */ - - /* - * migration APC hook - */ - if (reasons & AST_APC) { - act_execute_returnhandlers(); - } + reasons &= *myast; + *myast &= ~reasons; - /* - * thread_block needs to know if the thread's quantum - * expired so the thread can be put on the tail of - * run queue. One of the previous actions might well - * have woken a high-priority thread, so we also use - * csw_needed check. + /* + * Handle ASTs for all threads + * except idle processor threads. */ - reasons &= (AST_BLOCK | AST_QUANTUM); - if (reasons == 0) { - mp_disable_preemption(); - mypr = current_processor(); - if (csw_needed(self, mypr)) { - reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM); - } - mp_enable_preemption(); - } - if ((reasons & (AST_BLOCK | AST_QUANTUM)) && - (wait_queue_assert_possible(self))) { - counter(c_ast_taken_block++); + if (!(thread->state & TH_IDLE)) { /* - * JMM - SMP machines don't like blocking at a continuation - * here - why not? Could be a combination of set_state and - * suspension on the thread_create_running API? - * - * thread_block_reason(thread_exception_return, reasons); + * Check for urgent preemption. */ - thread_block_reason((void (*)(void))0, reasons); - } -} - -void -ast_check(void) -{ - register int mycpu; - register processor_t myprocessor; - register thread_t thread = current_thread(); - spl_t s = splsched(); + if ( (reasons & AST_URGENT) && + wait_queue_assert_possible(thread) ) { + if (reasons & AST_PREEMPT) { + counter(c_ast_taken_block++); + thread_block_reason(THREAD_CONTINUE_NULL, NULL, + AST_PREEMPT | AST_URGENT); + } - mp_disable_preemption(); - mycpu = cpu_number(); + reasons &= ~AST_PREEMPTION; + } - /* - * Check processor state for ast conditions. - */ - myprocessor = cpu_to_processor(mycpu); - switch(myprocessor->state) { - case PROCESSOR_OFF_LINE: - case PROCESSOR_IDLE: - case PROCESSOR_DISPATCHING: /* - * No ast. + * The kernel preempt traps + * skip all other ASTs. */ - break; + if (!preempt_trap) { + ml_set_interrupts_enabled(enable); -#if NCPUS > 1 - case PROCESSOR_ASSIGN: - /* - * Need ast to force action thread onto processor. - * - * XXX Should check if action thread is already there. - */ - ast_on(AST_BLOCK); - break; -#endif /* NCPUS > 1 */ +#ifdef MACH_BSD + /* + * Handle BSD hook. + */ + if (reasons & AST_BSD) { + thread_ast_clear(thread, AST_BSD); + bsd_ast(thread); + } +#endif - case PROCESSOR_RUNNING: - case PROCESSOR_SHUTDOWN: - /* - * Propagate thread ast to processor. If we already - * need an ast, don't look for more reasons. - */ - ast_propagate(current_act()->ast); - if (ast_needed(mycpu)) - break; + /* + * Thread APC hook. + */ + if (reasons & AST_APC) + act_execute_returnhandlers(); - /* - * Context switch check. - */ - if (csw_needed(thread, myprocessor)) { - ast_on((myprocessor->first_quantum ? - AST_BLOCK : AST_QUANTUM)); - } - break; + ml_set_interrupts_enabled(FALSE); + + /* + * Check for preemption. + */ + if (reasons & AST_PREEMPT) { + processor_t myprocessor = current_processor(); - default: - panic("ast_check: Bad processor state"); + if (csw_needed(thread, myprocessor)) + reasons = AST_PREEMPT; + else + reasons = AST_NONE; + } + if ( (reasons & AST_PREEMPT) && + wait_queue_assert_possible(thread) ) { + counter(c_ast_taken_block++); + thread_block_reason((thread_continue_t)thread_exception_return, NULL, AST_PREEMPT); + } + } } - mp_enable_preemption(); - splx(s); + + ml_set_interrupts_enabled(enable); } /* - * JMM - Temporary exports to other components + * Called at splsched. */ -#undef ast_on -#undef ast_off - void -ast_on(ast_t reason) +ast_check( + processor_t processor) { - boolean_t intr; + register thread_t thread = processor->active_thread; - intr = ml_set_interrupts_enabled(FALSE); - ast_on_fast(reason); - (void *)ml_set_interrupts_enabled(intr); -} + processor->current_pri = thread->sched_pri; + if ( processor->state == PROCESSOR_RUNNING || + processor->state == PROCESSOR_SHUTDOWN ) { + register ast_t preempt; -void -ast_off(ast_t reason) -{ - ast_off_fast(reason); + /* + * Propagate thread ast to processor. + */ + ast_propagate(thread->ast); + + /* + * Context switch check. + */ + if ((preempt = csw_check(thread, processor)) != AST_NONE) + ast_on(preempt); + } }