]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/ast.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
index ee197f7f65effeb5f636c5ff46a86f2751ca7cbd..8f282ce580f316b6efb6dbfc893a38328dbf96c8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
  * any improvements or extensions that they make and grant Carnegie Mellon
  * the rights to redistribute these changes.
  */
-/* 
- */
-
-/*
- *
- *     This file contains routines to check whether an ast is needed.
- *
- *     ast_check() - check whether ast is needed for interrupt or context
- *     switch.  Usually called by clock interrupt handler.
- *
- */
-
-#include <cputypes.h>
-#include <platforms.h>
 
 #include <kern/ast.h>
 #include <kern/counters.h>
-#include <kern/cpu_number.h>
 #include <kern/misc_protos.h>
 #include <kern/queue.h>
 #include <kern/sched_prim.h>
 #include <kern/thread.h>
 #include <kern/processor.h>
 #include <kern/spl.h>
+#include <kern/sfi.h>
 #if CONFIG_TELEMETRY
 #include <kern/telemetry.h>
 #endif
-#include <kern/wait_queue.h>
+#include <kern/waitq.h>
 #include <kern/ledger.h>
+#include <kern/machine.h>
+#include <kperf/kperf_kpc.h>
 #include <mach/policy.h>
-#include <machine/trap.h> // for CHUD AST hook
-#include <machine/pal_routines.h>
 #include <security/mac_mach_internal.h> // for MACF AST hook
+#include <stdatomic.h>
 
-volatile perfASTCallback perfASTHook;
-
+static void __attribute__((noinline, noreturn, disable_tail_calls))
+thread_preempted(__unused void* parameter, __unused wait_result_t result)
+{
+       /*
+        * We've been scheduled again after a userspace preemption,
+        * try again to return to userspace.
+        */
+       thread_exception_return();
+}
 
+/*
+ * AST_URGENT was detected while in kernel mode
+ * Called with interrupts disabled, returns the same way
+ * Must return to caller
+ */
 void
-ast_init(void)
+ast_taken_kernel(void)
 {
-}
+       assert(ml_get_interrupts_enabled() == FALSE);
 
-extern void chudxnu_thread_ast(thread_t); // XXX this should probably be in a header...
+       thread_t thread = current_thread();
+
+       /* Idle threads handle preemption themselves */
+       if ((thread->state & TH_IDLE)) {
+               ast_off(AST_PREEMPTION);
+               return;
+       }
+
+       /*
+        * It's possible for this to be called after AST_URGENT
+        * has already been handled, due to races in enable_preemption
+        */
+       if (ast_peek(AST_URGENT) != AST_URGENT)
+               return;
+
+       /*
+        * Don't preempt if the thread is already preparing to block.
+        * TODO: the thread can cheese this with clear_wait()
+        */
+       if (waitq_wait_possible(thread) == FALSE) {
+               /* Consume AST_URGENT or the interrupt will call us again */
+               ast_consume(AST_URGENT);
+               return;
+       }
+
+       /* TODO: Should we csw_check again to notice if conditions have changed? */
+
+       ast_t urgent_reason = ast_consume(AST_PREEMPTION);
+
+       assert(urgent_reason & AST_PREEMPT);
+
+       counter(c_ast_taken_block++);
+
+       thread_block_reason(THREAD_CONTINUE_NULL, NULL, urgent_reason);
+
+       assert(ml_get_interrupts_enabled() == FALSE);
+}
 
 /*
- * Called at splsched.
+ * An AST flag was set while returning to user mode
+ * Called with interrupts disabled, returns with interrupts enabled
+ * May call continuation instead of returning
  */
 void
-ast_taken(
-       ast_t           reasons,
-       boolean_t       enable
-)
+ast_taken_user(void)
 {
-       boolean_t               preempt_trap = (reasons == AST_PREEMPTION);
-       ast_t                   *myast = ast_pending();
-       thread_t                thread = current_thread();
-       perfASTCallback perf_hook = perfASTHook;
+       assert(ml_get_interrupts_enabled() == FALSE);
+
+       thread_t thread = current_thread();
+
+       /* We are about to return to userspace, there must not be a pending wait */
+       assert(waitq_wait_possible(thread));
+       assert((thread->state & TH_IDLE) == 0);
+
+       /* TODO: Add more 'return to userspace' assertions here */
 
        /*
-        * CHUD hook - all threads including idle processor threads
+        * If this thread was urgently preempted in userspace,
+        * take the preemption before processing the ASTs.
+        * The trap handler will call us again if we have more ASTs, so it's
+        * safe to block in a continuation here.
         */
-       if (perf_hook) {
-               if (*myast & AST_CHUD_ALL) {
-                       (*perf_hook)(reasons, myast);
-                       
-                       if (*myast == AST_NONE)
-                               return;
-               }
+       if (ast_peek(AST_URGENT) == AST_URGENT) {
+               ast_t urgent_reason = ast_consume(AST_PREEMPTION);
+
+               assert(urgent_reason & AST_PREEMPT);
+
+               /* TODO: Should we csw_check again to notice if conditions have changed? */
+
+               thread_block_reason(thread_preempted, NULL, urgent_reason);
+               /* NOTREACHED */
        }
-       else
-               *myast &= ~AST_CHUD_ALL;
 
-       reasons &= *myast;
-       *myast &= ~reasons;
+       /*
+        * AST_KEVENT does not send an IPI when setting the ast for a thread running in parallel
+        * on a different processor. Only the ast bit on the thread will be set.
+        *
+        * Force a propagate for concurrent updates without an IPI.
+        */
+       ast_propagate(thread);
 
        /*
-        * Handle ASTs for all threads
-        * except idle processor threads.
+        * Consume all non-preemption processor ASTs matching reasons
+        * because we're handling them here.
+        *
+        * If one of the AST handlers blocks in a continuation,
+        * we'll reinstate the unserviced thread-level AST flags
+        * from the thread to the processor on context switch.
+        * If one of the AST handlers sets another AST,
+        * the trap handler will call ast_taken_user again.
+        *
+        * We expect the AST handlers not to thread_exception_return
+        * without an ast_propagate or context switch to reinstate
+        * the per-processor ASTs.
+        *
+        * TODO: Why are AST_DTRACE and AST_KPERF not per-thread ASTs?
         */
-       if (!(thread->state & TH_IDLE)) {
-               /*
-                * Check for urgent preemption.
-                */
-               if (    (reasons & AST_URGENT)                          &&
-                               wait_queue_assert_possible(thread)              ) {
-                       if (reasons & AST_PREEMPT) {
-                               counter(c_ast_taken_block++);
-                               thread_block_reason(THREAD_CONTINUE_NULL, NULL,
-                                                                               AST_PREEMPT | AST_URGENT);
-                       }
-
-                       reasons &= ~AST_PREEMPTION;
-               }
+       ast_t reasons = ast_consume(AST_PER_THREAD | AST_KPERF | AST_DTRACE);
+
+       ml_set_interrupts_enabled(TRUE);
+
+#if CONFIG_DTRACE
+       if (reasons & AST_DTRACE) {
+               dtrace_ast();
+       }
+#endif
 
-               /*
-                * The kernel preempt traps
-                * skip all other ASTs.
-                */
-               if (!preempt_trap) {
-                       ml_set_interrupts_enabled(enable);
-
-#ifdef MACH_BSD
-                       /*
-                        * Handle BSD hook.
-                        */
-                       if (reasons & AST_BSD) {
-                               thread_ast_clear(thread, AST_BSD);
-                               bsd_ast(thread);
-                       }
+#ifdef MACH_BSD
+       if (reasons & AST_BSD) {
+               thread_ast_clear(thread, AST_BSD);
+               bsd_ast(thread);
+       }
 #endif
+
 #if CONFIG_MACF
-                       /*
-                        * Handle MACF hook.
-                        */
-                       if (reasons & AST_MACF) {
-                               thread_ast_clear(thread, AST_MACF);
-                               mac_thread_userret(thread);
-                       }
+       if (reasons & AST_MACF) {
+               thread_ast_clear(thread, AST_MACF);
+               mac_thread_userret(thread);
+       }
 #endif
-                       /* 
-                        * Thread APC hook.
-                        */
-                       if (reasons & AST_APC)
-                               act_execute_returnhandlers();
-                       
-                       if (reasons & AST_GUARD) {
-                               thread_ast_clear(thread, AST_GUARD);
-                               guard_ast(thread);
-                       }
-                       
-                       if (reasons & AST_LEDGER) {
-                               thread_ast_clear(thread, AST_LEDGER);
-                               ledger_ast(thread);
-                       }
-
-                       /*
-                        * Kernel Profiling Hook
-                        */
-                       if (reasons & AST_KPERF) {
-                               thread_ast_clear(thread, AST_KPERF);
-                               chudxnu_thread_ast(thread);
-                       }
+
+       if (reasons & AST_APC) {
+               thread_ast_clear(thread, AST_APC);
+               thread_apc_ast(thread);
+       }
+
+       if (reasons & AST_GUARD) {
+               thread_ast_clear(thread, AST_GUARD);
+               guard_ast(thread);
+       }
+
+       if (reasons & AST_LEDGER) {
+               thread_ast_clear(thread, AST_LEDGER);
+               ledger_ast(thread);
+       }
+
+       if (reasons & AST_KPERF) {
+               thread_ast_clear(thread, AST_KPERF);
+               kperf_kpc_thread_ast(thread);
+       }
+
+       if (reasons & AST_KEVENT) {
+               thread_ast_clear(thread, AST_KEVENT);
+               uint16_t bits = atomic_exchange(&thread->kevent_ast_bits, 0);
+               if (bits) kevent_ast(thread, bits);
+       }
 
 #if CONFIG_TELEMETRY
-                       if (reasons & AST_TELEMETRY_ALL) {
-                               boolean_t interrupted_userspace;
-
-                               assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
-                               interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
-                               thread_ast_clear(thread, AST_TELEMETRY_ALL);
-                               telemetry_ast(thread, interrupted_userspace);
-                       }
+       if (reasons & AST_TELEMETRY_ALL) {
+               ast_t telemetry_reasons = reasons & AST_TELEMETRY_ALL;
+               thread_ast_clear(thread, AST_TELEMETRY_ALL);
+               telemetry_ast(thread, telemetry_reasons);
+       }
+#endif
+
+       spl_t s = splsched();
+
+#if CONFIG_SCHED_SFI
+       /*
+        * SFI is currently a per-processor AST, not a per-thread AST
+        *      TODO: SFI should be a per-thread AST
+        */
+       if (ast_consume(AST_SFI) == AST_SFI) {
+               sfi_ast(thread);
+       }
 #endif
 
-                       ml_set_interrupts_enabled(FALSE);
+       /* We are about to return to userspace, there must not be a pending wait */
+       assert(waitq_wait_possible(thread));
+
+       /*
+        * We've handled all per-thread ASTs, time to handle non-urgent preemption.
+        *
+        * We delay reading the preemption bits until now in case the thread
+        * blocks while handling per-thread ASTs.
+        *
+        * If one of the AST handlers had managed to set a new AST bit,
+        * thread_exception_return will call ast_taken again.
+        */
+       ast_t preemption_reasons = ast_consume(AST_PREEMPTION);
+
+       if (preemption_reasons & AST_PREEMPT) {
+               /* Conditions may have changed from when the AST_PREEMPT was originally set, so re-check. */
 
-                       /* 
-                        * Check for preemption.
-                        */
-                       if (reasons & AST_PREEMPT)
-                               reasons = csw_check(current_processor());
+               thread_lock(thread);
+               preemption_reasons = csw_check(current_processor(), (preemption_reasons & AST_QUANTUM));
+               thread_unlock(thread);
 
-                       if (    (reasons & AST_PREEMPT)                         &&
-                                       wait_queue_assert_possible(thread)              ) {             
-                               counter(c_ast_taken_block++);
-                               thread_block_reason((thread_continue_t)thread_exception_return, NULL, AST_PREEMPT);
-                       }
+#if CONFIG_SCHED_SFI
+               /* csw_check might tell us that SFI is needed */
+               if (preemption_reasons & AST_SFI) {
+                       sfi_ast(thread);
+               }
+#endif
+
+               if (preemption_reasons & AST_PREEMPT) {
+                       counter(c_ast_taken_block++);
+                       /* switching to a continuation implicitly re-enables interrupts */
+                       thread_block_reason(thread_preempted, NULL, preemption_reasons);
+                       /* NOTREACHED */
                }
        }
 
-       ml_set_interrupts_enabled(enable);
+       splx(s);
 }
 
 /*
- * Called at splsched.
+ * Handle preemption IPI or IPI in response to setting an AST flag
+ * Triggered by cause_ast_check
+ * Called at splsched
  */
 void
-ast_check(
-       processor_t             processor)
+ast_check(processor_t processor)
 {
-       thread_t                        thread = processor->active_thread;
+       if (processor->state != PROCESSOR_RUNNING &&
+           processor->state != PROCESSOR_SHUTDOWN)
+               return;
+
+       thread_t thread = processor->active_thread;
 
+       assert(thread == current_thread());
+
+       thread_lock(thread);
+
+       /*
+        * Propagate thread ast to processor.
+        * (handles IPI in response to setting AST flag)
+        */
+       ast_propagate(thread);
+
+       boolean_t needs_callout = false;
        processor->current_pri = thread->sched_pri;
-       processor->current_thmode = thread->sched_mode;
-       if (    processor->state == PROCESSOR_RUNNING           ||
-                       processor->state == PROCESSOR_SHUTDOWN          ) {
-               ast_t                   preempt;
-
-               /*
-                *      Propagate thread ast to processor.
-                */
-               pal_ast_check(thread);
-
-               ast_propagate(thread->ast);
-
-               /*
-                *      Context switch check.
-                */
-               if ((preempt = csw_check(processor)) != AST_NONE)
-                       ast_on(preempt);
+       processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
+       processor->current_recommended_pset_type = recommended_pset_type(thread);
+       perfcontrol_class_t thread_class = thread_get_perfcontrol_class(thread);
+       if (thread_class != processor->current_perfctl_class) {
+           /* We updated the perfctl class of this thread from another core. 
+            * Since we dont do CLPC callouts from another core, do a callout
+            * here to let CLPC know that the currently running thread has a new
+            * class.
+            */
+           needs_callout = true;
        }
+       processor->current_perfctl_class = thread_class;
+
+       ast_t preempt;
+
+       if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
+               ast_on(preempt);
+
+       thread_unlock(thread);
+
+       if (needs_callout) {
+           machine_switch_perfcontrol_state_update(PERFCONTROL_ATTR_UPDATE,
+                   mach_approximate_time(), 0, thread);
+       }
+}
+
+/*
+ * Set AST flags on current processor
+ * Called at splsched
+ */
+void
+ast_on(ast_t reasons)
+{
+       ast_t *pending_ast = ast_pending();
+
+       *pending_ast |= reasons;
+}
+
+/*
+ * Clear AST flags on current processor
+ * Called at splsched
+ */
+void
+ast_off(ast_t reasons)
+{
+       ast_t *pending_ast = ast_pending();
+
+       *pending_ast &= ~reasons;
 }
+
+/*
+ * Consume the requested subset of the AST flags set on the processor
+ * Return the bits that were set
+ * Called at splsched
+ */
+ast_t
+ast_consume(ast_t reasons)
+{
+       ast_t *pending_ast = ast_pending();
+
+       reasons &= *pending_ast;
+       *pending_ast &= ~reasons;
+
+       return reasons;
+}
+
+/*
+ * Read the requested subset of the AST flags set on the processor
+ * Return the bits that were set, don't modify the processor
+ * Called at splsched
+ */
+ast_t
+ast_peek(ast_t reasons)
+{
+       ast_t *pending_ast = ast_pending();
+
+       reasons &= *pending_ast;
+
+       return reasons;
+}
+
+/*
+ * Re-set current processor's per-thread AST flags to those set on thread
+ * Called at splsched
+ */
+void
+ast_context(thread_t thread)
+{
+       ast_t *pending_ast = ast_pending();
+
+       *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
+}
+
+/*
+ * Propagate ASTs set on a thread to the current processor
+ * Called at splsched
+ */
+void
+ast_propagate(thread_t thread)
+{
+       ast_on(thread->ast);
+}
+
+void
+ast_dtrace_on(void)
+{
+       ast_on(AST_DTRACE);
+}
+
+