X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3903760236c30e3b5ace7a4eefac3a269d68957c..a39ff7e25e19b3a8c3020042a3872ca9ec9659f1:/osfmk/kern/ast.c diff --git a/osfmk/kern/ast.c b/osfmk/kern/ast.c index 62f060d65..8f282ce58 100644 --- a/osfmk/kern/ast.c +++ b/osfmk/kern/ast.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000-2012 Apple Inc. All rights reserved. + * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -53,21 +53,9 @@ * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -/* - */ - -/* - * - * This file contains routines to check whether an ast is needed. - * - * ast_check() - check whether ast is needed for interrupt or context - * switch. Usually called by clock interrupt handler. - * - */ #include #include -#include #include #include #include @@ -80,205 +68,287 @@ #endif #include #include +#include #include #include -#include // for CHUD AST hook -#include #include // for MACF AST hook +#include -volatile perfASTCallback perfASTHook; - +static void __attribute__((noinline, noreturn, disable_tail_calls)) +thread_preempted(__unused void* parameter, __unused wait_result_t result) +{ + /* + * We've been scheduled again after a userspace preemption, + * try again to return to userspace. + */ + thread_exception_return(); +} +/* + * AST_URGENT was detected while in kernel mode + * Called with interrupts disabled, returns the same way + * Must return to caller + */ void -ast_init(void) +ast_taken_kernel(void) { -} + assert(ml_get_interrupts_enabled() == FALSE); -#ifdef CONFIG_DTRACE -extern void dtrace_ast(void); -#endif + thread_t thread = current_thread(); + + /* Idle threads handle preemption themselves */ + if ((thread->state & TH_IDLE)) { + ast_off(AST_PREEMPTION); + return; + } + + /* + * It's possible for this to be called after AST_URGENT + * has already been handled, due to races in enable_preemption + */ + if (ast_peek(AST_URGENT) != AST_URGENT) + return; + + /* + * Don't preempt if the thread is already preparing to block. + * TODO: the thread can cheese this with clear_wait() + */ + if (waitq_wait_possible(thread) == FALSE) { + /* Consume AST_URGENT or the interrupt will call us again */ + ast_consume(AST_URGENT); + return; + } + + /* TODO: Should we csw_check again to notice if conditions have changed? */ + + ast_t urgent_reason = ast_consume(AST_PREEMPTION); + + assert(urgent_reason & AST_PREEMPT); + + counter(c_ast_taken_block++); + + thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason); + + assert(ml_get_interrupts_enabled() == FALSE); +} /* - * Called at splsched. + * An AST flag was set while returning to user mode + * Called with interrupts disabled, returns with interrupts enabled + * May call continuation instead of returning */ void -ast_taken( - ast_t reasons, - boolean_t enable -) +ast_taken_user(void) { - boolean_t preempt_trap = (reasons == AST_PREEMPTION); - ast_t *myast = ast_pending(); - thread_t thread = current_thread(); - perfASTCallback perf_hook = perfASTHook; + assert(ml_get_interrupts_enabled() == FALSE); + + thread_t thread = current_thread(); + + /* We are about to return to userspace, there must not be a pending wait */ + assert(waitq_wait_possible(thread)); + assert((thread->state & TH_IDLE) == 0); + + /* TODO: Add more 'return to userspace' assertions here */ /* - * CHUD hook - all threads including idle processor threads + * If this thread was urgently preempted in userspace, + * take the preemption before processing the ASTs. + * The trap handler will call us again if we have more ASTs, so it's + * safe to block in a continuation here. */ - if (perf_hook) { - if (*myast & AST_CHUD_ALL) { - (*perf_hook)(reasons, myast); - - if (*myast == AST_NONE) - return; - } + if (ast_peek(AST_URGENT) == AST_URGENT) { + ast_t urgent_reason = ast_consume(AST_PREEMPTION); + + assert(urgent_reason & AST_PREEMPT); + + /* TODO: Should we csw_check again to notice if conditions have changed? */ + + thread_block_reason(thread_preempted, NULL, urgent_reason); + /* NOTREACHED */ } - else - *myast &= ~AST_CHUD_ALL; - reasons &= *myast; - *myast &= ~reasons; + /* + * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel + * on a different processor. Only the ast bit on the thread will be set. + * + * Force a propagate for concurrent updates without an IPI. + */ + ast_propagate(thread); /* - * Handle ASTs for all threads - * except idle processor threads. + * Consume all non-preemption processor ASTs matching reasons + * because we're handling them here. + * + * If one of the AST handlers blocks in a continuation, + * we'll reinstate the unserviced thread-level AST flags + * from the thread to the processor on context switch. + * If one of the AST handlers sets another AST, + * the trap handler will call ast_taken_user again. + * + * We expect the AST handlers not to thread_exception_return + * without an ast_propagate or context switch to reinstate + * the per-processor ASTs. + * + * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs? */ - if (!(thread->state & TH_IDLE)) { - /* - * Check for urgent preemption. - */ - if ( (reasons & AST_URGENT) && - waitq_wait_possible(thread) ) { - if (reasons & AST_PREEMPT) { - counter(c_ast_taken_block++); - thread_block_reason(THREAD_CONTINUE_NULL, NULL, - reasons & AST_PREEMPTION); - } - - reasons &= ~AST_PREEMPTION; - } + ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE); - /* - * The kernel preempt traps - * skip all other ASTs. - */ - if (!preempt_trap) { - ml_set_interrupts_enabled(enable); + ml_set_interrupts_enabled(TRUE); #if CONFIG_DTRACE - if (reasons & AST_DTRACE) { - dtrace_ast(); - } + if (reasons & AST_DTRACE) { + dtrace_ast(); + } #endif -#ifdef MACH_BSD - /* - * Handle BSD hook. - */ - if (reasons & AST_BSD) { - thread_ast_clear(thread, AST_BSD); - bsd_ast(thread); - } +#ifdef MACH_BSD + if (reasons & AST_BSD) { + thread_ast_clear(thread, AST_BSD); + bsd_ast(thread); + } #endif + #if CONFIG_MACF - /* - * Handle MACF hook. - */ - if (reasons & AST_MACF) { - thread_ast_clear(thread, AST_MACF); - mac_thread_userret(thread); - } + if (reasons & AST_MACF) { + thread_ast_clear(thread, AST_MACF); + mac_thread_userret(thread); + } #endif - /* - * Thread APC hook. - */ - if (reasons & AST_APC) { - thread_ast_clear(thread, AST_APC); - thread_apc_ast(thread); - } - - if (reasons & AST_GUARD) { - thread_ast_clear(thread, AST_GUARD); - guard_ast(thread); - } - - if (reasons & AST_LEDGER) { - thread_ast_clear(thread, AST_LEDGER); - ledger_ast(thread); - } - - /* - * Kernel Profiling Hook - */ - if (reasons & AST_KPERF) { - thread_ast_clear(thread, AST_KPERF); - kperf_kpc_thread_ast(thread); - } + + if (reasons & AST_APC) { + thread_ast_clear(thread, AST_APC); + thread_apc_ast(thread); + } + + if (reasons & AST_GUARD) { + thread_ast_clear(thread, AST_GUARD); + guard_ast(thread); + } + + if (reasons & AST_LEDGER) { + thread_ast_clear(thread, AST_LEDGER); + ledger_ast(thread); + } + + if (reasons & AST_KPERF) { + thread_ast_clear(thread, AST_KPERF); + kperf_kpc_thread_ast(thread); + } + + if (reasons & AST_KEVENT) { + thread_ast_clear(thread, AST_KEVENT); + uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0); + if (bits) kevent_ast(thread, bits); + } #if CONFIG_TELEMETRY - if (reasons & AST_TELEMETRY_ALL) { - boolean_t interrupted_userspace = FALSE; - boolean_t io_telemetry = FALSE; - - assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */ - interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE; - io_telemetry = ((reasons & AST_TELEMETRY_IO) ? TRUE : FALSE); - thread_ast_clear(thread, AST_TELEMETRY_ALL); - telemetry_ast(thread, interrupted_userspace, io_telemetry); - } + if (reasons & AST_TELEMETRY_ALL) { + ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL; + thread_ast_clear(thread, AST_TELEMETRY_ALL); + telemetry_ast(thread, telemetry_reasons); + } #endif - ml_set_interrupts_enabled(FALSE); + spl_t s = splsched(); #if CONFIG_SCHED_SFI - if (reasons & AST_SFI) { - sfi_ast(thread); - } + /* + * SFI is currently a per-processor AST, not a per-thread AST + * TODO: SFI should be a per-thread AST + */ + if (ast_consume(AST_SFI) == AST_SFI) { + sfi_ast(thread); + } #endif - /* - * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set. - */ - thread_lock(thread); - if (reasons & AST_PREEMPT) - reasons = csw_check(current_processor(), reasons & AST_QUANTUM); - thread_unlock(thread); + /* We are about to return to userspace, there must not be a pending wait */ + assert(waitq_wait_possible(thread)); + + /* + * We've handled all per-thread ASTs, time to handle non-urgent preemption. + * + * We delay reading the preemption bits until now in case the thread + * blocks while handling per-thread ASTs. + * + * If one of the AST handlers had managed to set a new AST bit, + * thread_exception_return will call ast_taken again. + */ + ast_t preemption_reasons = ast_consume(AST_PREEMPTION); + + if (preemption_reasons & AST_PREEMPT) { + /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */ + + thread_lock(thread); + preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM)); + thread_unlock(thread); - assert(waitq_wait_possible(thread)); +#if CONFIG_SCHED_SFI + /* csw_check might tell us that SFI is needed */ + if (preemption_reasons & AST_SFI) { + sfi_ast(thread); + } +#endif - if (reasons & AST_PREEMPT) { - counter(c_ast_taken_block++); - thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION); - } + if (preemption_reasons & AST_PREEMPT) { + counter(c_ast_taken_block++); + /* switching to a continuation implicitly re-enables interrupts */ + thread_block_reason(thread_preempted, NULL, preemption_reasons); + /* NOTREACHED */ } } - ml_set_interrupts_enabled(enable); + splx(s); } /* - * Called at splsched. + * Handle preemption IPI or IPI in response to setting an AST flag + * Triggered by cause_ast_check + * Called at splsched */ void -ast_check( - processor_t processor) +ast_check(processor_t processor) { + if (processor->state != PROCESSOR_RUNNING && + processor->state != PROCESSOR_SHUTDOWN) + return; + thread_t thread = processor->active_thread; - if (processor->state == PROCESSOR_RUNNING || - processor->state == PROCESSOR_SHUTDOWN) { - ast_t preempt; + assert(thread == current_thread()); - /* - * Propagate thread ast to processor. - */ - pal_ast_check(thread); + thread_lock(thread); - ast_propagate(thread->ast); + /* + * Propagate thread ast to processor. + * (handles IPI in response to setting AST flag) + */ + ast_propagate(thread); + + boolean_t needs_callout = false; + processor->current_pri = thread->sched_pri; + processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread); + processor->current_recommended_pset_type = recommended_pset_type(thread); + perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread); + if (thread_class != processor->current_perfctl_class) { + /* We updated the perfctl class of this thread from another core. + * Since we dont do CLPC callouts from another core, do a callout + * here to let CLPC know that the currently running thread has a new + * class. + */ + needs_callout = true; + } + processor->current_perfctl_class = thread_class; - /* - * Context switch check. - */ - thread_lock(thread); + ast_t preempt; - processor->current_pri = thread->sched_pri; - processor->current_thmode = thread->sched_mode; - processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread); + if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) + ast_on(preempt); - if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) - ast_on(preempt); + thread_unlock(thread); - thread_unlock(thread); + if (needs_callout) { + machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, + mach_approximate_time(), 0, thread); } } @@ -306,6 +376,37 @@ ast_off(ast_t reasons) *pending_ast &= ~reasons; } +/* + * Consume the requested subset of the AST flags set on the processor + * Return the bits that were set + * Called at splsched + */ +ast_t +ast_consume(ast_t reasons) +{ + ast_t *pending_ast = ast_pending(); + + reasons &= *pending_ast; + *pending_ast &= ~reasons; + + return reasons; +} + +/* + * Read the requested subset of the AST flags set on the processor + * Return the bits that were set, don't modify the processor + * Called at splsched + */ +ast_t +ast_peek(ast_t reasons) +{ + ast_t *pending_ast = ast_pending(); + + reasons &= *pending_ast; + + return reasons; +} + /* * Re-set current processor's per-thread AST flags to those set on thread * Called at splsched @@ -318,9 +419,20 @@ ast_context(thread_t thread) *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast); } +/* + * Propagate ASTs set on a thread to the current processor + * Called at splsched + */ +void +ast_propagate(thread_t thread) +{ + ast_on(thread->ast); +} + void ast_dtrace_on(void) { ast_on(AST_DTRACE); } +