X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/d7e50217d7adf6e52786a38bcaa4cd698cb9a79e..a39ff7e25e19b3a8c3020042a3872ca9ec9659f1:/osfmk/kern/ast.c diff --git a/osfmk/kern/ast.c b/osfmk/kern/ast.c index 0764da904..8f282ce58 100644 --- a/osfmk/kern/ast.c +++ b/osfmk/kern/ast.c @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2017 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -50,171 +53,386 @@ * any improvements or extensions that they make and grant Carnegie Mellon * the rights to redistribute these changes. */ -/* - */ - -/* - * - * This file contains routines to check whether an ast is needed. - * - * ast_check() - check whether ast is needed for interrupt or context - * switch. Usually called by clock interrupt handler. - * - */ - -#include -#include -#include #include #include -#include #include #include #include #include -#include -#include #include #include +#include +#if CONFIG_TELEMETRY +#include +#endif +#include +#include +#include +#include #include +#include // for MACF AST hook +#include -volatile ast_t need_ast[NCPUS]; +static void __attribute__((noinline, noreturn, disable_tail_calls)) +thread_preempted(__unused void* parameter, __unused wait_result_t result) +{ + /* + * We've been scheduled again after a userspace preemption, + * try again to return to userspace. + */ + thread_exception_return(); +} +/* + * AST_URGENT was detected while in kernel mode + * Called with interrupts disabled, returns the same way + * Must return to caller + */ void -ast_init(void) +ast_taken_kernel(void) { -#ifndef MACHINE_AST - register int i; + assert(ml_get_interrupts_enabled() == FALSE); - for (i=0; istate & TH_IDLE)) { + ast_off(AST_PREEMPTION); + return; + } + + /* + * It's possible for this to be called after AST_URGENT + * has already been handled, due to races in enable_preemption + */ + if (ast_peek(AST_URGENT) != AST_URGENT) + return; + + /* + * Don't preempt if the thread is already preparing to block. + * TODO: the thread can cheese this with clear_wait() + */ + if (waitq_wait_possible(thread) == FALSE) { + /* Consume AST_URGENT or the interrupt will call us again */ + ast_consume(AST_URGENT); + return; } -#endif /* MACHINE_AST */ + + /* TODO: Should we csw_check again to notice if conditions have changed? */ + + ast_t urgent_reason = ast_consume(AST_PREEMPTION); + + assert(urgent_reason & AST_PREEMPT); + + counter(c_ast_taken_block++); + + thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason); + + assert(ml_get_interrupts_enabled() == FALSE); } +/* + * An AST flag was set while returning to user mode + * Called with interrupts disabled, returns with interrupts enabled + * May call continuation instead of returning + */ void -ast_taken( - ast_t reasons, - boolean_t enable -) +ast_taken_user(void) { - register int mycpu; - register processor_t myprocessor; - register thread_t self = current_thread(); - boolean_t preempt_trap = (reasons == AST_PREEMPT); + assert(ml_get_interrupts_enabled() == FALSE); - disable_preemption(); - mycpu = cpu_number(); - reasons &= need_ast[mycpu]; - need_ast[mycpu] &= ~reasons; - enable_preemption(); + thread_t thread = current_thread(); - /* - * No ast for an idle thread - */ - if (self->state & TH_IDLE) - goto enable_and_return; + /* We are about to return to userspace, there must not be a pending wait */ + assert(waitq_wait_possible(thread)); + assert((thread->state & TH_IDLE) == 0); + + /* TODO: Add more 'return to userspace' assertions here */ /* - * Check for urgent preemption + * If this thread was urgently preempted in userspace, + * take the preemption before processing the ASTs. + * The trap handler will call us again if we have more ASTs, so it's + * safe to block in a continuation here. */ - if ((reasons & AST_URGENT) && wait_queue_assert_possible(self)) { - if (reasons & AST_BLOCK) { - counter(c_ast_taken_block++); - thread_block_reason((void (*)(void))0, AST_BLOCK); - } + if (ast_peek(AST_URGENT) == AST_URGENT) { + ast_t urgent_reason = ast_consume(AST_PREEMPTION); - reasons &= ~AST_PREEMPT; - if (reasons == 0) - goto enable_and_return; - } + assert(urgent_reason & AST_PREEMPT); - if (preempt_trap) - goto enable_and_return; + /* TODO: Should we csw_check again to notice if conditions have changed? */ + + thread_block_reason(thread_preempted, NULL, urgent_reason); + /* NOTREACHED */ + } - ml_set_interrupts_enabled(enable); + /* + * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel + * on a different processor. Only the ast bit on the thread will be set. + * + * Force a propagate for concurrent updates without an IPI. + */ + ast_propagate(thread); -#ifdef MACH_BSD /* - * Check for BSD hook + * Consume all non-preemption processor ASTs matching reasons + * because we're handling them here. + * + * If one of the AST handlers blocks in a continuation, + * we'll reinstate the unserviced thread-level AST flags + * from the thread to the processor on context switch. + * If one of the AST handlers sets another AST, + * the trap handler will call ast_taken_user again. + * + * We expect the AST handlers not to thread_exception_return + * without an ast_propagate or context switch to reinstate + * the per-processor ASTs. + * + * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs? */ + ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE); + + ml_set_interrupts_enabled(TRUE); + +#if CONFIG_DTRACE + if (reasons & AST_DTRACE) { + dtrace_ast(); + } +#endif + +#ifdef MACH_BSD if (reasons & AST_BSD) { - extern void bsd_ast(thread_act_t act); - thread_act_t act = self->top_act; + thread_ast_clear(thread, AST_BSD); + bsd_ast(thread); + } +#endif - thread_ast_clear(act, AST_BSD); - bsd_ast(act); +#if CONFIG_MACF + if (reasons & AST_MACF) { + thread_ast_clear(thread, AST_MACF); + mac_thread_userret(thread); } #endif - /* - * migration APC hook - */ if (reasons & AST_APC) { - act_execute_returnhandlers(); + thread_ast_clear(thread, AST_APC); + thread_apc_ast(thread); + } + + if (reasons & AST_GUARD) { + thread_ast_clear(thread, AST_GUARD); + guard_ast(thread); + } + + if (reasons & AST_LEDGER) { + thread_ast_clear(thread, AST_LEDGER); + ledger_ast(thread); + } + + if (reasons & AST_KPERF) { + thread_ast_clear(thread, AST_KPERF); + kperf_kpc_thread_ast(thread); + } + + if (reasons & AST_KEVENT) { + thread_ast_clear(thread, AST_KEVENT); + uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0); + if (bits) kevent_ast(thread, bits); } - /* - * Check for normal preemption +#if CONFIG_TELEMETRY + if (reasons & AST_TELEMETRY_ALL) { + ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL; + thread_ast_clear(thread, AST_TELEMETRY_ALL); + telemetry_ast(thread, telemetry_reasons); + } +#endif + + spl_t s = splsched(); + +#if CONFIG_SCHED_SFI + /* + * SFI is currently a per-processor AST, not a per-thread AST + * TODO: SFI should be a per-thread AST */ - reasons &= AST_BLOCK; - if (reasons == 0) { - disable_preemption(); - myprocessor = current_processor(); - if (csw_needed(self, myprocessor)) - reasons = AST_BLOCK; - enable_preemption(); - } - if ( (reasons & AST_BLOCK) && - wait_queue_assert_possible(self) ) { - counter(c_ast_taken_block++); - thread_block_reason(thread_exception_return, AST_BLOCK); - } - - goto just_return; - -enable_and_return: - ml_set_interrupts_enabled(enable); - -just_return: - return; + if (ast_consume(AST_SFI) == AST_SFI) { + sfi_ast(thread); + } +#endif + + /* We are about to return to userspace, there must not be a pending wait */ + assert(waitq_wait_possible(thread)); + + /* + * We've handled all per-thread ASTs, time to handle non-urgent preemption. + * + * We delay reading the preemption bits until now in case the thread + * blocks while handling per-thread ASTs. + * + * If one of the AST handlers had managed to set a new AST bit, + * thread_exception_return will call ast_taken again. + */ + ast_t preemption_reasons = ast_consume(AST_PREEMPTION); + + if (preemption_reasons & AST_PREEMPT) { + /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */ + + thread_lock(thread); + preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM)); + thread_unlock(thread); + +#if CONFIG_SCHED_SFI + /* csw_check might tell us that SFI is needed */ + if (preemption_reasons & AST_SFI) { + sfi_ast(thread); + } +#endif + + if (preemption_reasons & AST_PREEMPT) { + counter(c_ast_taken_block++); + /* switching to a continuation implicitly re-enables interrupts */ + thread_block_reason(thread_preempted, NULL, preemption_reasons); + /* NOTREACHED */ + } + } + + splx(s); } /* - * Called at splsched. + * Handle preemption IPI or IPI in response to setting an AST flag + * Triggered by cause_ast_check + * Called at splsched */ void -ast_check( - processor_t processor) +ast_check(processor_t processor) { - register thread_t self = processor->cpu_data->active_thread; - - processor->current_pri = self->sched_pri; - if (processor->state == PROCESSOR_RUNNING) { - register ast_t preempt; -processor_running: - - /* - * Propagate thread ast to processor. - */ - ast_propagate(self->top_act->ast); - - /* - * Context switch check. - */ - if ((preempt = csw_check(self, processor)) != AST_NONE) - ast_on(preempt); - } - else - if ( processor->state == PROCESSOR_DISPATCHING || - processor->state == PROCESSOR_IDLE ) { + if (processor->state != PROCESSOR_RUNNING && + processor->state != PROCESSOR_SHUTDOWN) return; + + thread_t thread = processor->active_thread; + + assert(thread == current_thread()); + + thread_lock(thread); + + /* + * Propagate thread ast to processor. + * (handles IPI in response to setting AST flag) + */ + ast_propagate(thread); + + boolean_t needs_callout = false; + processor->current_pri = thread->sched_pri; + processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread); + processor->current_recommended_pset_type = recommended_pset_type(thread); + perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread); + if (thread_class != processor->current_perfctl_class) { + /* We updated the perfctl class of this thread from another core. + * Since we dont do CLPC callouts from another core, do a callout + * here to let CLPC know that the currently running thread has a new + * class. + */ + needs_callout = true; } - else - if (processor->state == PROCESSOR_SHUTDOWN) - goto processor_running; - else - if (processor->state == PROCESSOR_ASSIGN) - ast_on(AST_BLOCK); + processor->current_perfctl_class = thread_class; + + ast_t preempt; + + if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE) + ast_on(preempt); + + thread_unlock(thread); + + if (needs_callout) { + machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE, + mach_approximate_time(), 0, thread); + } +} + +/* + * Set AST flags on current processor + * Called at splsched + */ +void +ast_on(ast_t reasons) +{ + ast_t *pending_ast = ast_pending(); + + *pending_ast |= reasons; } + +/* + * Clear AST flags on current processor + * Called at splsched + */ +void +ast_off(ast_t reasons) +{ + ast_t *pending_ast = ast_pending(); + + *pending_ast &= ~reasons; +} + +/* + * Consume the requested subset of the AST flags set on the processor + * Return the bits that were set + * Called at splsched + */ +ast_t +ast_consume(ast_t reasons) +{ + ast_t *pending_ast = ast_pending(); + + reasons &= *pending_ast; + *pending_ast &= ~reasons; + + return reasons; +} + +/* + * Read the requested subset of the AST flags set on the processor + * Return the bits that were set, don't modify the processor + * Called at splsched + */ +ast_t +ast_peek(ast_t reasons) +{ + ast_t *pending_ast = ast_pending(); + + reasons &= *pending_ast; + + return reasons; +} + +/* + * Re-set current processor's per-thread AST flags to those set on thread + * Called at splsched + */ +void +ast_context(thread_t thread) +{ + ast_t *pending_ast = ast_pending(); + + *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast); +} + +/* + * Propagate ASTs set on a thread to the current processor + * Called at splsched + */ +void +ast_propagate(thread_t thread) +{ + ast_on(thread->ast); +} + +void +ast_dtrace_on(void) +{ + ast_on(AST_DTRACE); +} + +