]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/ast.c
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
index df540900519dfc93ffd1b49c09e3c1c9acc4b526..7540375ce694f60f0e9d55a1e32aff055d3547ca 100644 (file)
@@ -90,134 +90,105 @@ ast_init(void)
 #endif /* MACHINE_AST */
 }
 
+/*
+ * Called at splsched.
+ */
 void
 ast_taken(
        ast_t                   reasons,
        boolean_t               enable
 )
 {
-       register int                    mycpu;
-       register processor_t    myprocessor;
        register thread_t               self = current_thread();
-       boolean_t                               preempt_trap = (reasons == AST_PREEMPT);
+       register int                    mycpu = cpu_number();
+       boolean_t                               preempt_trap = (reasons == AST_PREEMPTION);
 
-       disable_preemption();
-       mycpu = cpu_number();
        reasons &= need_ast[mycpu];
        need_ast[mycpu] &= ~reasons;
-       enable_preemption();
 
        /*
-        * No ast for an idle thread
+        * Handle ASTs for all threads
+        * except idle processor threads.
         */
-       if (self->state & TH_IDLE)
-               goto enable_and_return;
-
-       /*
-        * Check for urgent preemption
-        */
-       if ((reasons & AST_URGENT) && wait_queue_assert_possible(self)) {
-               if (reasons & AST_BLOCK) {
-                       counter(c_ast_taken_block++);
-                       thread_block_reason((void (*)(void))0, AST_BLOCK);
+       if (!(self->state & TH_IDLE)) {
+               /*
+                * Check for urgent preemption.
+                */
+               if (    (reasons & AST_URGENT)                          &&
+                               wait_queue_assert_possible(self)                ) {
+                       if (reasons & AST_PREEMPT) {
+                               counter(c_ast_taken_block++);
+                               thread_block_reason(THREAD_CONTINUE_NULL,
+                                                                               AST_PREEMPT | AST_URGENT);
+                       }
+
+                       reasons &= ~AST_PREEMPTION;
                }
 
-               reasons &= ~AST_PREEMPT;
-               if (reasons == 0)
-                       goto enable_and_return;
-       }
-
-       if (preempt_trap)
-               goto enable_and_return;
-
-       ml_set_interrupts_enabled(enable);
+               /*
+                * The kernel preempt traps
+                * skip all other ASTs.
+                */
+               if (!preempt_trap) {
+                       ml_set_interrupts_enabled(enable);
 
 #ifdef MACH_BSD
-       /*
-        * Check for BSD hardcoded hooks 
-        */
-       if (reasons & AST_BSD) {
-               extern void             bsd_ast(thread_act_t    act);
-               thread_act_t    act = self->top_act;
-
-               thread_ast_clear(act, AST_BSD);
-               bsd_ast(act);
-       }
-       if (reasons & AST_BSD_INIT) {
-               extern void             bsdinit_task(void);
-
-               thread_ast_clear(self->top_act, AST_BSD_INIT);
-               bsdinit_task();
-       }
+                       /*
+                        * Handle BSD hook.
+                        */
+                       if (reasons & AST_BSD) {
+                               extern void             bsd_ast(thread_act_t    act);
+                               thread_act_t    act = self->top_act;
+
+                               thread_ast_clear(act, AST_BSD);
+                               bsd_ast(act);
+                       }
 #endif
 
-       /* 
-        * migration APC hook 
-        */
-       if (reasons & AST_APC) {
-               act_execute_returnhandlers();
-       }
-
-       /* 
-        * Check for normal preemption
-        */
-       reasons &= AST_BLOCK;
-    if (reasons == 0) {
-        disable_preemption();
-        myprocessor = current_processor();
-        if (csw_needed(self, myprocessor))
-            reasons = AST_BLOCK;
-        enable_preemption();
-    }
-       if (    (reasons & AST_BLOCK)                           &&
-                       wait_queue_assert_possible(self)                ) {             
-               counter(c_ast_taken_block++);
-               thread_block_reason(thread_exception_return, AST_BLOCK);
+                       /* 
+                        * Thread APC hook.
+                        */
+                       if (reasons & AST_APC)
+                               act_execute_returnhandlers();
+
+                       ml_set_interrupts_enabled(FALSE);
+
+                       /* 
+                        * Check for preemption.
+                        */
+                       if (reasons & AST_PREEMPT) {
+                               processor_t             myprocessor = current_processor();
+
+                               if (csw_needed(self, myprocessor))
+                                       reasons = AST_PREEMPT;
+                               else
+                                       reasons = AST_NONE;
+                       }
+                       if (    (reasons & AST_PREEMPT)                         &&
+                                       wait_queue_assert_possible(self)                ) {             
+                               counter(c_ast_taken_block++);
+                               thread_block_reason(thread_exception_return, AST_PREEMPT);
+                       }
+               }
        }
 
-       goto just_return;
-
-enable_and_return:
-    ml_set_interrupts_enabled(enable);
-
-just_return:
-       return;
+       ml_set_interrupts_enabled(enable);
 }
 
+/*
+ * Called at splsched.
+ */
 void
-ast_check(void)
+ast_check(
+       processor_t             processor)
 {
-       register int                    mycpu;
-       register processor_t    myprocessor;
-       register thread_t               self = current_thread();
-       spl_t                                   s;
+       register thread_t               self = processor->active_thread;
 
-       s = splsched();
-       mycpu = cpu_number();
+       processor->current_pri = self->sched_pri;
+       if (processor->state == PROCESSOR_RUNNING) {
+               register ast_t          preempt;
+processor_running:
 
-       /*
-        *      Check processor state for ast conditions.
-        */
-       myprocessor = cpu_to_processor(mycpu);
-       switch (myprocessor->state) {
-
-       case PROCESSOR_OFF_LINE:
-       case PROCESSOR_IDLE:
-       case PROCESSOR_DISPATCHING:
-               /*
-                *      No ast.
-                */
-               break;
-
-       case PROCESSOR_ASSIGN:
-        /*
-                *      Need ast to force action thread onto processor.
-                */
-               ast_on(AST_BLOCK);
-               break;
-
-       case PROCESSOR_RUNNING:
-       case PROCESSOR_SHUTDOWN:
                /*
                 *      Propagate thread ast to processor.
                 */
@@ -226,35 +197,15 @@ ast_check(void)
                /*
                 *      Context switch check.
                 */
-               if (csw_needed(self, myprocessor))
-                       ast_on(AST_BLOCK);
-               break;
-
-       default:
-        panic("ast_check: Bad processor state");
+               if ((preempt = csw_check(self, processor)) != AST_NONE)
+                       ast_on(preempt);
        }
-
-       splx(s);
-}
-
-/*
- * JMM - Temporary exports to other components
- */
-#undef ast_on
-#undef ast_off
-
-void
-ast_on(ast_t reason)
-{
-       boolean_t               enable;
-
-       enable = ml_set_interrupts_enabled(FALSE);
-       ast_on_fast(reason);
-       (void)ml_set_interrupts_enabled(enable);
-}
-
-void
-ast_off(ast_t reason)
-{
-       ast_off_fast(reason);
+       else
+       if (    processor->state == PROCESSOR_DISPATCHING       ||
+                       processor->state == PROCESSOR_IDLE                      ) {
+               return;
+       }
+       else
+       if (processor->state == PROCESSOR_SHUTDOWN)
+               goto processor_running;
 }