]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/ast.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
index f454fe9a3c353b1bfd6e470a23c8e865c4a91106..ee197f7f65effeb5f636c5ff46a86f2751ca7cbd 100644 (file)
@@ -1,23 +1,29 @@
 /*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
  *
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License").  You may not use this file except in compliance with the
- * License.  Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
  * 
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ * 
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
  * 
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  */
 /*
  * @OSF_COPYRIGHT@
  */
 
 #include <cputypes.h>
-#include <cpus.h>
 #include <platforms.h>
-#include <task_swapper.h>
 
 #include <kern/ast.h>
 #include <kern/counters.h>
 #include <kern/cpu_number.h>
 #include <kern/misc_protos.h>
 #include <kern/queue.h>
-#include <kern/sched.h>
 #include <kern/sched_prim.h>
 #include <kern/thread.h>
-#include <kern/thread_act.h>
-#include <kern/thread_swap.h>
 #include <kern/processor.h>
 #include <kern/spl.h>
+#if CONFIG_TELEMETRY
+#include <kern/telemetry.h>
+#endif
+#include <kern/wait_queue.h>
+#include <kern/ledger.h>
 #include <mach/policy.h>
-#if    TASK_SWAPPER
-#include <kern/task_swap.h>
-#endif /* TASK_SWAPPER */
+#include <machine/trap.h> // for CHUD AST hook
+#include <machine/pal_routines.h>
+#include <security/mac_mach_internal.h> // for MACF AST hook
+
+volatile perfASTCallback perfASTHook;
 
-volatile ast_t need_ast[NCPUS];
 
 void
 ast_init(void)
 {
-#ifndef        MACHINE_AST
-       register int i;
-
-       for (i=0; i<NCPUS; i++) {
-               need_ast[i] = AST_NONE;
-       }
-#endif /* MACHINE_AST */
 }
 
+extern void chudxnu_thread_ast(thread_t); // XXX this should probably be in a header...
+
+/*
+ * Called at splsched.
+ */
 void
 ast_taken(
-       boolean_t               preemption,
-       ast_t                   mask,
-       boolean_t               interrupt
+       ast_t           reasons,
+       boolean_t       enable
 )
 {
-       register thread_t       self = current_thread();
-       register processor_t    mypr;
-       register ast_t          reasons;
-       register int            mycpu;
-       thread_act_t            act = self->top_act;
-#ifdef MACH_BSD
-       extern void             bsd_ast(thread_act_t);
-       extern void             bsdinit_task(void);
-#endif
-
-       mp_disable_preemption();
-       mycpu = cpu_number();
-       reasons = need_ast[mycpu] & mask;
-       need_ast[mycpu] &= ~reasons;
-       mp_enable_preemption();
-
-       ml_set_interrupts_enabled(interrupt);
+       boolean_t               preempt_trap = (reasons == AST_PREEMPTION);
+       ast_t                   *myast = ast_pending();
+       thread_t                thread = current_thread();
+       perfASTCallback perf_hook = perfASTHook;
 
        /*
-        * No ast for an idle thread
+        * CHUD hook - all threads including idle processor threads
         */
-       if (self->state & TH_IDLE)
-               return;
+       if (perf_hook) {
+               if (*myast & AST_CHUD_ALL) {
+                       (*perf_hook)(reasons, myast);
+                       
+                       if (*myast == AST_NONE)
+                               return;
+               }
+       }
+       else
+               *myast &= ~AST_CHUD_ALL;
+
+       reasons &= *myast;
+       *myast &= ~reasons;
 
        /*
-        * Check for preemption
+        * Handle ASTs for all threads
+        * except idle processor threads.
         */
-       if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) {
-               reasons &= ~AST_URGENT;
-               if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) {
-                       mp_disable_preemption();
-                       mypr = current_processor();
-                       if (csw_needed(self, mypr)) {
-                               reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
+       if (!(thread->state & TH_IDLE)) {
+               /*
+                * Check for urgent preemption.
+                */
+               if (    (reasons & AST_URGENT)                          &&
+                               wait_queue_assert_possible(thread)              ) {
+                       if (reasons & AST_PREEMPT) {
+                               counter(c_ast_taken_block++);
+                               thread_block_reason(THREAD_CONTINUE_NULL, NULL,
+                                                                               AST_PREEMPT | AST_URGENT);
                        }
-                       mp_enable_preemption();
-               }
-               if (reasons & (AST_BLOCK | AST_QUANTUM)) {
-                       counter(c_ast_taken_block++);
-                       thread_block_reason((void (*)(void))0, 
-                                                               (reasons & (AST_BLOCK | AST_QUANTUM)));
+
+                       reasons &= ~AST_PREEMPTION;
                }
-               if (reasons == 0)
-                       return;
-       }
+
+               /*
+                * The kernel preempt traps
+                * skip all other ASTs.
+                */
+               if (!preempt_trap) {
+                       ml_set_interrupts_enabled(enable);
 
 #ifdef MACH_BSD
-       /*
-        * Check for BSD hardcoded hooks 
-        */
-       if (reasons & AST_BSD) {
-               thread_ast_clear(act,AST_BSD);
-               bsd_ast(act);
-       }
-       if (reasons & AST_BSD_INIT) {
-               thread_ast_clear(act,AST_BSD_INIT);
-               bsdinit_task();
-       }
+                       /*
+                        * Handle BSD hook.
+                        */
+                       if (reasons & AST_BSD) {
+                               thread_ast_clear(thread, AST_BSD);
+                               bsd_ast(thread);
+                       }
+#endif
+#if CONFIG_MACF
+                       /*
+                        * Handle MACF hook.
+                        */
+                       if (reasons & AST_MACF) {
+                               thread_ast_clear(thread, AST_MACF);
+                               mac_thread_userret(thread);
+                       }
 #endif
+                       /* 
+                        * Thread APC hook.
+                        */
+                       if (reasons & AST_APC)
+                               act_execute_returnhandlers();
+                       
+                       if (reasons & AST_GUARD) {
+                               thread_ast_clear(thread, AST_GUARD);
+                               guard_ast(thread);
+                       }
+                       
+                       if (reasons & AST_LEDGER) {
+                               thread_ast_clear(thread, AST_LEDGER);
+                               ledger_ast(thread);
+                       }
 
-#if    TASK_SWAPPER
-       /* must be before AST_APC */
-       if (reasons & AST_SWAPOUT) {
-               spl_t s;
-               swapout_ast();
-               s = splsched();
-               mp_disable_preemption();
-               mycpu = cpu_number();
-               if (need_ast[mycpu] & AST_APC) {
-                       /* generated in swapout_ast() to get suspended */
-                       reasons |= AST_APC;             /* process now ... */
-                       need_ast[mycpu] &= ~AST_APC;    /* ... and not later */
-               }
-               mp_enable_preemption();
-               splx(s);
-       }
-#endif /* TASK_SWAPPER */
+                       /*
+                        * Kernel Profiling Hook
+                        */
+                       if (reasons & AST_KPERF) {
+                               thread_ast_clear(thread, AST_KPERF);
+                               chudxnu_thread_ast(thread);
+                       }
 
-       /* 
-        * migration APC hook 
-        */
-       if (reasons & AST_APC) {
-               act_execute_returnhandlers();
-       }
+#if CONFIG_TELEMETRY
+                       if (reasons & AST_TELEMETRY_ALL) {
+                               boolean_t interrupted_userspace;
 
-       /* 
-        *      thread_block needs to know if the thread's quantum 
-        *      expired so the thread can be put on the tail of
-        *      run queue. One of the previous actions might well
-        *      have woken a high-priority thread, so we also use
-        *      csw_needed check.
-        */
-       reasons &= (AST_BLOCK | AST_QUANTUM);
-       if (reasons == 0) {
-               mp_disable_preemption();
-               mypr = current_processor();
-               if (csw_needed(self, mypr)) {
-                       reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
+                               assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
+                               interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
+                               thread_ast_clear(thread, AST_TELEMETRY_ALL);
+                               telemetry_ast(thread, interrupted_userspace);
+                       }
+#endif
+
+                       ml_set_interrupts_enabled(FALSE);
+
+                       /* 
+                        * Check for preemption.
+                        */
+                       if (reasons & AST_PREEMPT)
+                               reasons = csw_check(current_processor());
+
+                       if (    (reasons & AST_PREEMPT)                         &&
+                                       wait_queue_assert_possible(thread)              ) {             
+                               counter(c_ast_taken_block++);
+                               thread_block_reason((thread_continue_t)thread_exception_return, NULL, AST_PREEMPT);
+                       }
                }
-               mp_enable_preemption();
-       }
-       if ((reasons & (AST_BLOCK | AST_QUANTUM)) &&
-           (wait_queue_assert_possible(self))) {
-               counter(c_ast_taken_block++);
-               /*
-                * JMM - SMP machines don't like blocking at a continuation
-                * here - why not?  Could be a combination of set_state and
-                * suspension on the thread_create_running API?
-                *
-                * thread_block_reason(thread_exception_return, reasons);
-                */
-               thread_block_reason((void (*)(void))0, reasons);
        }
+
+       ml_set_interrupts_enabled(enable);
 }
 
+/*
+ * Called at splsched.
+ */
 void
-ast_check(void)
+ast_check(
+       processor_t             processor)
 {
-       register int            mycpu;
-       register processor_t    myprocessor;
-       register thread_t       thread = current_thread();
-       spl_t                   s = splsched();
+       thread_t                        thread = processor->active_thread;
 
-       mp_disable_preemption();
-       mycpu = cpu_number();
+       processor->current_pri = thread->sched_pri;
+       processor->current_thmode = thread->sched_mode;
+       if (    processor->state == PROCESSOR_RUNNING           ||
+                       processor->state == PROCESSOR_SHUTDOWN          ) {
+               ast_t                   preempt;
 
-       /*
-        *      Check processor state for ast conditions.
-        */
-       myprocessor = cpu_to_processor(mycpu);
-       switch(myprocessor->state) {
-           case PROCESSOR_OFF_LINE:
-           case PROCESSOR_IDLE:
-           case PROCESSOR_DISPATCHING:
                /*
-                *      No ast.
+                *      Propagate thread ast to processor.
                 */
-               break;
+               pal_ast_check(thread);
 
-#if    NCPUS > 1
-           case PROCESSOR_ASSIGN:
-               /*
-                *      Need ast to force action thread onto processor.
-                *
-                * XXX  Should check if action thread is already there.
-                */
-               ast_on(AST_BLOCK);
-               break;
-#endif /* NCPUS > 1 */
-
-           case PROCESSOR_RUNNING:
-           case PROCESSOR_SHUTDOWN:
-               /*
-                *      Propagate thread ast to processor.  If we already
-                *      need an ast, don't look for more reasons.
-                */
-               ast_propagate(current_act()->ast);
-               if (ast_needed(mycpu))
-                       break;
+               ast_propagate(thread->ast);
 
                /*
                 *      Context switch check.
                 */
-               if (csw_needed(thread, myprocessor)) {
-                       ast_on((myprocessor->first_quantum ?
-                              AST_BLOCK : AST_QUANTUM));
-               }
-               break;
-
-           default:
-               panic("ast_check: Bad processor state");
+               if ((preempt = csw_check(processor)) != AST_NONE)
+                       ast_on(preempt);
        }
-       mp_enable_preemption();
-       splx(s);
-}
-
-/*
- * JMM - Temporary exports to other components
- */
-#undef ast_on
-#undef ast_off
-
-void
-ast_on(ast_t reason)
-{
-       boolean_t       intr;
-
-       intr = ml_set_interrupts_enabled(FALSE);
-       ast_on_fast(reason);
-       (void *)ml_set_interrupts_enabled(intr);
-}
-
-void
-ast_off(ast_t reason)
-{
-       ast_off_fast(reason);
 }