]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/kern/ast.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
index f454fe9a3c353b1bfd6e470a23c8e865c4a91106..df540900519dfc93ffd1b49c09e3c1c9acc4b526 100644 (file)
 #include <cputypes.h>
 #include <cpus.h>
 #include <platforms.h>
-#include <task_swapper.h>
 
 #include <kern/ast.h>
 #include <kern/counters.h>
 #include <kern/cpu_number.h>
 #include <kern/misc_protos.h>
 #include <kern/queue.h>
-#include <kern/sched.h>
 #include <kern/sched_prim.h>
 #include <kern/thread.h>
 #include <kern/thread_act.h>
@@ -77,9 +75,6 @@
 #include <kern/processor.h>
 #include <kern/spl.h>
 #include <mach/policy.h>
-#if    TASK_SWAPPER
-#include <kern/task_swap.h>
-#endif /* TASK_SWAPPER */
 
 volatile ast_t need_ast[NCPUS];
 
@@ -97,89 +92,65 @@ ast_init(void)
 
 void
 ast_taken(
-       boolean_t               preemption,
-       ast_t                   mask,
-       boolean_t               interrupt
+       ast_t                   reasons,
+       boolean_t               enable
 )
 {
-       register thread_t       self = current_thread();
-       register processor_t    mypr;
-       register ast_t          reasons;
-       register int            mycpu;
-       thread_act_t            act = self->top_act;
-#ifdef MACH_BSD
-       extern void             bsd_ast(thread_act_t);
-       extern void             bsdinit_task(void);
-#endif
+       register int                    mycpu;
+       register processor_t    myprocessor;
+       register thread_t               self = current_thread();
+       boolean_t                               preempt_trap = (reasons == AST_PREEMPT);
 
-       mp_disable_preemption();
+       disable_preemption();
        mycpu = cpu_number();
-       reasons = need_ast[mycpu] & mask;
+       reasons &= need_ast[mycpu];
        need_ast[mycpu] &= ~reasons;
-       mp_enable_preemption();
-
-       ml_set_interrupts_enabled(interrupt);
+       enable_preemption();
 
        /*
         * No ast for an idle thread
         */
        if (self->state & TH_IDLE)
-               return;
+               goto enable_and_return;
 
        /*
-        * Check for preemption
+        * Check for urgent preemption
         */
-       if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) {
-               reasons &= ~AST_URGENT;
-               if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) {
-                       mp_disable_preemption();
-                       mypr = current_processor();
-                       if (csw_needed(self, mypr)) {
-                               reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
-                       }
-                       mp_enable_preemption();
-               }
-               if (reasons & (AST_BLOCK | AST_QUANTUM)) {
+       if ((reasons & AST_URGENT) && wait_queue_assert_possible(self)) {
+               if (reasons & AST_BLOCK) {
                        counter(c_ast_taken_block++);
-                       thread_block_reason((void (*)(void))0, 
-                                                               (reasons & (AST_BLOCK | AST_QUANTUM)));
+                       thread_block_reason((void (*)(void))0, AST_BLOCK);
                }
+
+               reasons &= ~AST_PREEMPT;
                if (reasons == 0)
-                       return;
+                       goto enable_and_return;
        }
 
+       if (preempt_trap)
+               goto enable_and_return;
+
+       ml_set_interrupts_enabled(enable);
+
 #ifdef MACH_BSD
        /*
         * Check for BSD hardcoded hooks 
         */
        if (reasons & AST_BSD) {
-               thread_ast_clear(act,AST_BSD);
+               extern void             bsd_ast(thread_act_t    act);
+               thread_act_t    act = self->top_act;
+
+               thread_ast_clear(act, AST_BSD);
                bsd_ast(act);
        }
        if (reasons & AST_BSD_INIT) {
-               thread_ast_clear(act,AST_BSD_INIT);
+               extern void             bsdinit_task(void);
+
+               thread_ast_clear(self->top_act, AST_BSD_INIT);
                bsdinit_task();
        }
 #endif
 
-#if    TASK_SWAPPER
-       /* must be before AST_APC */
-       if (reasons & AST_SWAPOUT) {
-               spl_t s;
-               swapout_ast();
-               s = splsched();
-               mp_disable_preemption();
-               mycpu = cpu_number();
-               if (need_ast[mycpu] & AST_APC) {
-                       /* generated in swapout_ast() to get suspended */
-                       reasons |= AST_APC;             /* process now ... */
-                       need_ast[mycpu] &= ~AST_APC;    /* ... and not later */
-               }
-               mp_enable_preemption();
-               splx(s);
-       }
-#endif /* TASK_SWAPPER */
-
        /* 
         * migration APC hook 
         */
@@ -188,93 +159,81 @@ ast_taken(
        }
 
        /* 
-        *      thread_block needs to know if the thread's quantum 
-        *      expired so the thread can be put on the tail of
-        *      run queue. One of the previous actions might well
-        *      have woken a high-priority thread, so we also use
-        *      csw_needed check.
+        * Check for normal preemption
         */
-       reasons &= (AST_BLOCK | AST_QUANTUM);
-       if (reasons == 0) {
-               mp_disable_preemption();
-               mypr = current_processor();
-               if (csw_needed(self, mypr)) {
-                       reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
-               }
-               mp_enable_preemption();
-       }
-       if ((reasons & (AST_BLOCK | AST_QUANTUM)) &&
-           (wait_queue_assert_possible(self))) {
+       reasons &= AST_BLOCK;
+    if (reasons == 0) {
+        disable_preemption();
+        myprocessor = current_processor();
+        if (csw_needed(self, myprocessor))
+            reasons = AST_BLOCK;
+        enable_preemption();
+    }
+       if (    (reasons & AST_BLOCK)                           &&
+                       wait_queue_assert_possible(self)                ) {             
                counter(c_ast_taken_block++);
-               /*
-                * JMM - SMP machines don't like blocking at a continuation
-                * here - why not?  Could be a combination of set_state and
-                * suspension on the thread_create_running API?
-                *
-                * thread_block_reason(thread_exception_return, reasons);
-                */
-               thread_block_reason((void (*)(void))0, reasons);
+               thread_block_reason(thread_exception_return, AST_BLOCK);
        }
+
+       goto just_return;
+
+enable_and_return:
+    ml_set_interrupts_enabled(enable);
+
+just_return:
+       return;
 }
 
 void
 ast_check(void)
 {
-       register int            mycpu;
+       register int                    mycpu;
        register processor_t    myprocessor;
-       register thread_t       thread = current_thread();
-       spl_t                   s = splsched();
+       register thread_t               self = current_thread();
+       spl_t                                   s;
 
-       mp_disable_preemption();
+       s = splsched();
        mycpu = cpu_number();
 
        /*
         *      Check processor state for ast conditions.
         */
        myprocessor = cpu_to_processor(mycpu);
-       switch(myprocessor->state) {
-           case PROCESSOR_OFF_LINE:
-           case PROCESSOR_IDLE:
-           case PROCESSOR_DISPATCHING:
+       switch (myprocessor->state) {
+
+       case PROCESSOR_OFF_LINE:
+       case PROCESSOR_IDLE:
+       case PROCESSOR_DISPATCHING:
                /*
                 *      No ast.
                 */
-               break;
+               break;
 
-#if    NCPUS > 1
-           case PROCESSOR_ASSIGN:
-               /*
+       case PROCESSOR_ASSIGN:
+        /*
                 *      Need ast to force action thread onto processor.
-                *
-                * XXX  Should check if action thread is already there.
                 */
                ast_on(AST_BLOCK);
                break;
-#endif /* NCPUS > 1 */
 
-           case PROCESSOR_RUNNING:
-           case PROCESSOR_SHUTDOWN:
+       case PROCESSOR_RUNNING:
+       case PROCESSOR_SHUTDOWN:
                /*
-                *      Propagate thread ast to processor.  If we already
-                *      need an ast, don't look for more reasons.
+                *      Propagate thread ast to processor.
                 */
-               ast_propagate(current_act()->ast);
-               if (ast_needed(mycpu))
-                       break;
+               ast_propagate(self->top_act->ast);
 
                /*
                 *      Context switch check.
                 */
-               if (csw_needed(thread, myprocessor)) {
-                       ast_on((myprocessor->first_quantum ?
-                              AST_BLOCK : AST_QUANTUM));
-               }
+               if (csw_needed(self, myprocessor))
+                       ast_on(AST_BLOCK);
                break;
 
-           default:
-               panic("ast_check: Bad processor state");
+       default:
+        panic("ast_check: Bad processor state");
        }
-       mp_enable_preemption();
+
        splx(s);
 }
 
@@ -287,11 +246,11 @@ ast_check(void)
 void
 ast_on(ast_t reason)
 {
-       boolean_t       intr;
+       boolean_t               enable;
 
-       intr = ml_set_interrupts_enabled(FALSE);
+       enable = ml_set_interrupts_enabled(FALSE);
        ast_on_fast(reason);
-       (void *)ml_set_interrupts_enabled(intr);
+       (void)ml_set_interrupts_enabled(enable);
 }
 
 void