/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2008 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
#include <cputypes.h>
-#include <cpus.h>
#include <platforms.h>
-#include <task_swapper.h>
#include <kern/ast.h>
#include <kern/counters.h>
#include <kern/cpu_number.h>
#include <kern/misc_protos.h>
#include <kern/queue.h>
-#include <kern/sched.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
-#include <kern/thread_act.h>
-#include <kern/thread_swap.h>
#include <kern/processor.h>
#include <kern/spl.h>
+#include <kern/wait_queue.h>
#include <mach/policy.h>
-#if TASK_SWAPPER
-#include <kern/task_swap.h>
-#endif /* TASK_SWAPPER */
-
-volatile ast_t need_ast[NCPUS];
+#include <machine/trap.h> // for CHUD AST hook
void
ast_init(void)
{
-#ifndef MACHINE_AST
- register int i;
-
- for (i=0; i<NCPUS; i++) {
- need_ast[i] = AST_NONE;
- }
-#endif /* MACHINE_AST */
}
+/*
+ * Called at splsched.
+ */
void
ast_taken(
- boolean_t preemption,
- ast_t mask,
- boolean_t interrupt
+ ast_t reasons,
+ boolean_t enable
)
{
- register thread_t self = current_thread();
- register processor_t mypr;
- register ast_t reasons;
- register int mycpu;
- thread_act_t act = self->top_act;
-#ifdef MACH_BSD
- extern void bsd_ast(thread_act_t);
- extern void bsdinit_task(void);
-#endif
-
- mp_disable_preemption();
- mycpu = cpu_number();
- reasons = need_ast[mycpu] & mask;
- need_ast[mycpu] &= ~reasons;
- mp_enable_preemption();
-
- ml_set_interrupts_enabled(interrupt);
-
- /*
- * No ast for an idle thread
- */
- if (self->state & TH_IDLE)
- return;
+ boolean_t preempt_trap = (reasons == AST_PREEMPTION);
+ ast_t *myast = ast_pending();
+ thread_t thread = current_thread();
/*
- * Check for preemption
+ * CHUD hook - all threads including idle processor threads
*/
- if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) {
- reasons &= ~AST_URGENT;
- if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) {
- mp_disable_preemption();
- mypr = current_processor();
- if (csw_needed(self, mypr)) {
- reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
+ if(perfASTHook) {
+ if(*myast & AST_CHUD_ALL) {
+ perfASTHook(0, NULL, 0, 0);
+
+ if(*myast == AST_NONE) {
+ return; // nothing left to do
}
- mp_enable_preemption();
- }
- if (reasons & (AST_BLOCK | AST_QUANTUM)) {
- counter(c_ast_taken_block++);
- thread_block_reason((void (*)(void))0,
- (reasons & (AST_BLOCK | AST_QUANTUM)));
}
- if (reasons == 0)
- return;
+ } else {
+ *myast &= ~AST_CHUD_ALL;
}
-#ifdef MACH_BSD
+ reasons &= *myast;
+ *myast &= ~reasons;
+
/*
- * Check for BSD hardcoded hooks
+ * Handle ASTs for all threads
+ * except idle processor threads.
*/
- if (reasons & AST_BSD) {
- thread_ast_clear(act,AST_BSD);
- bsd_ast(act);
- }
- if (reasons & AST_BSD_INIT) {
- thread_ast_clear(act,AST_BSD_INIT);
- bsdinit_task();
- }
-#endif
+ if (!(thread->state & TH_IDLE)) {
+ /*
+ * Check for urgent preemption.
+ */
+ if ( (reasons & AST_URGENT) &&
+ wait_queue_assert_possible(thread) ) {
+ if (reasons & AST_PREEMPT) {
+ counter(c_ast_taken_block++);
+ thread_block_reason(THREAD_CONTINUE_NULL, NULL,
+ AST_PREEMPT | AST_URGENT);
+ }
-#if TASK_SWAPPER
- /* must be before AST_APC */
- if (reasons & AST_SWAPOUT) {
- spl_t s;
- swapout_ast();
- s = splsched();
- mp_disable_preemption();
- mycpu = cpu_number();
- if (need_ast[mycpu] & AST_APC) {
- /* generated in swapout_ast() to get suspended */
- reasons |= AST_APC; /* process now ... */
- need_ast[mycpu] &= ~AST_APC; /* ... and not later */
+ reasons &= ~AST_PREEMPTION;
}
- mp_enable_preemption();
- splx(s);
- }
-#endif /* TASK_SWAPPER */
- /*
- * migration APC hook
- */
- if (reasons & AST_APC) {
- act_execute_returnhandlers();
- }
-
- /*
- * thread_block needs to know if the thread's quantum
- * expired so the thread can be put on the tail of
- * run queue. One of the previous actions might well
- * have woken a high-priority thread, so we also use
- * csw_needed check.
- */
- reasons &= (AST_BLOCK | AST_QUANTUM);
- if (reasons == 0) {
- mp_disable_preemption();
- mypr = current_processor();
- if (csw_needed(self, mypr)) {
- reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
- }
- mp_enable_preemption();
- }
- if ((reasons & (AST_BLOCK | AST_QUANTUM)) &&
- (wait_queue_assert_possible(self))) {
- counter(c_ast_taken_block++);
/*
- * JMM - SMP machines don't like blocking at a continuation
- * here - why not? Could be a combination of set_state and
- * suspension on the thread_create_running API?
- *
- * thread_block_reason(thread_exception_return, reasons);
+ * The kernel preempt traps
+ * skip all other ASTs.
*/
- thread_block_reason((void (*)(void))0, reasons);
- }
-}
+ if (!preempt_trap) {
+ ml_set_interrupts_enabled(enable);
-void
-ast_check(void)
-{
- register int mycpu;
- register processor_t myprocessor;
- register thread_t thread = current_thread();
- spl_t s = splsched();
-
- mp_disable_preemption();
- mycpu = cpu_number();
+#ifdef MACH_BSD
+ /*
+ * Handle BSD hook.
+ */
+ if (reasons & AST_BSD) {
+ thread_ast_clear(thread, AST_BSD);
+ bsd_ast(thread);
+ }
+#endif
- /*
- * Check processor state for ast conditions.
- */
- myprocessor = cpu_to_processor(mycpu);
- switch(myprocessor->state) {
- case PROCESSOR_OFF_LINE:
- case PROCESSOR_IDLE:
- case PROCESSOR_DISPATCHING:
- /*
- * No ast.
- */
- break;
+ /*
+ * Thread APC hook.
+ */
+ if (reasons & AST_APC)
+ act_execute_returnhandlers();
-#if NCPUS > 1
- case PROCESSOR_ASSIGN:
- /*
- * Need ast to force action thread onto processor.
- *
- * XXX Should check if action thread is already there.
- */
- ast_on(AST_BLOCK);
- break;
-#endif /* NCPUS > 1 */
+ ml_set_interrupts_enabled(FALSE);
- case PROCESSOR_RUNNING:
- case PROCESSOR_SHUTDOWN:
- /*
- * Propagate thread ast to processor. If we already
- * need an ast, don't look for more reasons.
- */
- ast_propagate(current_act()->ast);
- if (ast_needed(mycpu))
- break;
+ /*
+ * Check for preemption.
+ */
+ if (reasons & AST_PREEMPT) {
+ processor_t myprocessor = current_processor();
- /*
- * Context switch check.
- */
- if (csw_needed(thread, myprocessor)) {
- ast_on((myprocessor->first_quantum ?
- AST_BLOCK : AST_QUANTUM));
+ if (csw_needed(thread, myprocessor))
+ reasons = AST_PREEMPT;
+ else
+ reasons = AST_NONE;
+ }
+ if ( (reasons & AST_PREEMPT) &&
+ wait_queue_assert_possible(thread) ) {
+ counter(c_ast_taken_block++);
+ thread_block_reason((thread_continue_t)thread_exception_return, NULL, AST_PREEMPT);
+ }
}
- break;
-
- default:
- panic("ast_check: Bad processor state");
}
- mp_enable_preemption();
- splx(s);
+
+ ml_set_interrupts_enabled(enable);
}
/*
- * JMM - Temporary exports to other components
+ * Called at splsched.
*/
-#undef ast_on
-#undef ast_off
-
void
-ast_on(ast_t reason)
+ast_check(
+ processor_t processor)
{
- boolean_t intr;
+ thread_t thread = processor->active_thread;
- intr = ml_set_interrupts_enabled(FALSE);
- ast_on_fast(reason);
- (void *)ml_set_interrupts_enabled(intr);
-}
+ processor->current_pri = thread->sched_pri;
+ if ( processor->state == PROCESSOR_RUNNING ||
+ processor->state == PROCESSOR_SHUTDOWN ) {
+ ast_t preempt;
-void
-ast_off(ast_t reason)
-{
- ast_off_fast(reason);
+ /*
+ * Propagate thread ast to processor.
+ */
+ ast_propagate(thread->ast);
+
+ /*
+ * Context switch check.
+ */
+ if ((preempt = csw_check(thread, processor)) != AST_NONE)
+ ast_on(preempt);
+ }
}