/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*
*/
-#include <cputypes.h>
-#include <cpus.h>
-#include <platforms.h>
-
#include <kern/ast.h>
#include <kern/counters.h>
#include <kern/cpu_number.h>
#include <kern/queue.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
-#include <kern/thread_act.h>
-#include <kern/thread_swap.h>
#include <kern/processor.h>
#include <kern/spl.h>
+#include <kern/sfi.h>
+#if CONFIG_TELEMETRY
+#include <kern/telemetry.h>
+#endif
+#include <kern/waitq.h>
+#include <kern/ledger.h>
#include <mach/policy.h>
+#include <machine/trap.h> // for CHUD AST hook
+#include <machine/pal_routines.h>
+#include <security/mac_mach_internal.h> // for MACF AST hook
+
+volatile perfASTCallback perfASTHook;
-volatile ast_t need_ast[NCPUS];
void
ast_init(void)
{
-#ifndef MACHINE_AST
- register int i;
-
- for (i=0; i<NCPUS; i++) {
- need_ast[i] = AST_NONE;
- }
-#endif /* MACHINE_AST */
}
+extern void chudxnu_thread_ast(thread_t); // XXX this should probably be in a header...
+
+/*
+ * Called at splsched.
+ */
void
ast_taken(
- ast_t reasons,
- boolean_t enable
+ ast_t reasons,
+ boolean_t enable
)
{
- register int mycpu;
- register processor_t myprocessor;
- register thread_t self = current_thread();
- boolean_t preempt_trap = (reasons == AST_PREEMPT);
-
- disable_preemption();
- mycpu = cpu_number();
- reasons &= need_ast[mycpu];
- need_ast[mycpu] &= ~reasons;
- enable_preemption();
+ boolean_t preempt_trap = (reasons == AST_PREEMPTION);
+ ast_t *myast = ast_pending();
+ thread_t thread = current_thread();
+ perfASTCallback perf_hook = perfASTHook;
/*
- * No ast for an idle thread
+ * CHUD hook - all threads including idle processor threads
*/
- if (self->state & TH_IDLE)
- goto enable_and_return;
+ if (perf_hook) {
+ if (*myast & AST_CHUD_ALL) {
+ (*perf_hook)(reasons, myast);
+
+ if (*myast == AST_NONE)
+ return;
+ }
+ }
+ else
+ *myast &= ~AST_CHUD_ALL;
+
+ reasons &= *myast;
+ *myast &= ~reasons;
/*
- * Check for urgent preemption
+ * Handle ASTs for all threads
+ * except idle processor threads.
*/
- if ((reasons & AST_URGENT) && wait_queue_assert_possible(self)) {
- if (reasons & AST_BLOCK) {
- counter(c_ast_taken_block++);
- thread_block_reason((void (*)(void))0, AST_BLOCK);
+ if (!(thread->state & TH_IDLE)) {
+ /*
+ * Check for urgent preemption.
+ */
+ if ( (reasons & AST_URGENT) &&
+ waitq_wait_possible(thread) ) {
+ if (reasons & AST_PREEMPT) {
+ counter(c_ast_taken_block++);
+ thread_block_reason(THREAD_CONTINUE_NULL, NULL,
+ reasons & AST_PREEMPTION);
+ }
+
+ reasons &= ~AST_PREEMPTION;
}
- reasons &= ~AST_PREEMPT;
- if (reasons == 0)
- goto enable_and_return;
- }
+ /*
+ * The kernel preempt traps
+ * skip all other ASTs.
+ */
+ if (!preempt_trap) {
+ ml_set_interrupts_enabled(enable);
- if (preempt_trap)
- goto enable_and_return;
+#ifdef MACH_BSD
+ /*
+ * Handle BSD hook.
+ */
+ if (reasons & AST_BSD) {
+ thread_ast_clear(thread, AST_BSD);
+ bsd_ast(thread);
+ }
+#endif
+#if CONFIG_MACF
+ /*
+ * Handle MACF hook.
+ */
+ if (reasons & AST_MACF) {
+ thread_ast_clear(thread, AST_MACF);
+ mac_thread_userret(thread);
+ }
+#endif
+ /*
+ * Thread APC hook.
+ */
+ if (reasons & AST_APC) {
+ thread_ast_clear(thread, AST_APC);
+ special_handler(thread);
+ }
+
+ if (reasons & AST_GUARD) {
+ thread_ast_clear(thread, AST_GUARD);
+ guard_ast(thread);
+ }
+
+ if (reasons & AST_LEDGER) {
+ thread_ast_clear(thread, AST_LEDGER);
+ ledger_ast(thread);
+ }
- ml_set_interrupts_enabled(enable);
+ /*
+ * Kernel Profiling Hook
+ */
+ if (reasons & AST_KPERF) {
+ thread_ast_clear(thread, AST_KPERF);
+ chudxnu_thread_ast(thread);
+ }
-#ifdef MACH_BSD
- /*
- * Check for BSD hook
- */
- if (reasons & AST_BSD) {
- extern void bsd_ast(thread_act_t act);
- thread_act_t act = self->top_act;
+#if CONFIG_TELEMETRY
+ if (reasons & AST_TELEMETRY_ALL) {
+ boolean_t interrupted_userspace = FALSE;
+ boolean_t is_windowed = FALSE;
- thread_ast_clear(act, AST_BSD);
- bsd_ast(act);
- }
+ assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
+ interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
+ is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
+ thread_ast_clear(thread, AST_TELEMETRY_ALL);
+ telemetry_ast(thread, interrupted_userspace, is_windowed);
+ }
#endif
- /*
- * migration APC hook
- */
- if (reasons & AST_APC) {
- act_execute_returnhandlers();
- }
+ ml_set_interrupts_enabled(FALSE);
- /*
- * Check for normal preemption
- */
- reasons &= AST_BLOCK;
- if (reasons == 0) {
- disable_preemption();
- myprocessor = current_processor();
- if (csw_needed(self, myprocessor))
- reasons = AST_BLOCK;
- enable_preemption();
- }
- if ( (reasons & AST_BLOCK) &&
- wait_queue_assert_possible(self) ) {
- counter(c_ast_taken_block++);
- thread_block_reason(thread_exception_return, AST_BLOCK);
- }
+#if CONFIG_SCHED_SFI
+ if (reasons & AST_SFI) {
+ sfi_ast(thread);
+ }
+#endif
- goto just_return;
+ /*
+ * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
+ */
+ thread_lock(thread);
+ if (reasons & AST_PREEMPT)
+ reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
+ thread_unlock(thread);
-enable_and_return:
- ml_set_interrupts_enabled(enable);
+ assert(waitq_wait_possible(thread));
-just_return:
- return;
+ if (reasons & AST_PREEMPT) {
+ counter(c_ast_taken_block++);
+ thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
+ }
+ }
+ }
+
+ ml_set_interrupts_enabled(enable);
}
/*
*/
void
ast_check(
- processor_t processor)
+ processor_t processor)
{
- register thread_t self = processor->cpu_data->active_thread;
+ thread_t thread = processor->active_thread;
- processor->current_pri = self->sched_pri;
- if (processor->state == PROCESSOR_RUNNING) {
- register ast_t preempt;
-processor_running:
+ if (processor->state == PROCESSOR_RUNNING ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ ast_t preempt;
/*
* Propagate thread ast to processor.
*/
- ast_propagate(self->top_act->ast);
+ pal_ast_check(thread);
+
+ ast_propagate(thread->ast);
/*
* Context switch check.
*/
- if ((preempt = csw_check(self, processor)) != AST_NONE)
+ thread_lock(thread);
+
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
+
+ if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
ast_on(preempt);
+
+ thread_unlock(thread);
}
- else
- if ( processor->state == PROCESSOR_DISPATCHING ||
- processor->state == PROCESSOR_IDLE ) {
- return;
- }
- else
- if (processor->state == PROCESSOR_SHUTDOWN)
- goto processor_running;
- else
- if (processor->state == PROCESSOR_ASSIGN)
- ast_on(AST_BLOCK);
}
+
+/*
+ * Set AST flags on current processor
+ * Called at splsched
+ */
+void
+ast_on(ast_t reasons)
+{
+ ast_t *pending_ast = ast_pending();
+
+ *pending_ast |= reasons;
+}
+
+/*
+ * Clear AST flags on current processor
+ * Called at splsched
+ */
+void
+ast_off(ast_t reasons)
+{
+ ast_t *pending_ast = ast_pending();
+
+ *pending_ast &= ~reasons;
+}
+
+/*
+ * Re-set current processor's per-thread AST flags to those set on thread
+ * Called at splsched
+ */
+void
+ast_context(thread_t thread)
+{
+ ast_t *pending_ast = ast_pending();
+
+ *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
+}
+
+