/*
- * Copyright (c) 2000-2008 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <kern/thread.h>
#include <kern/processor.h>
#include <kern/spl.h>
+#if CONFIG_TELEMETRY
+#include <kern/telemetry.h>
+#endif
#include <kern/wait_queue.h>
+#include <kern/ledger.h>
#include <mach/policy.h>
#include <machine/trap.h> // for CHUD AST hook
+#include <machine/pal_routines.h>
+#include <security/mac_mach_internal.h> // for MACF AST hook
+
+volatile perfASTCallback perfASTHook;
+
void
ast_init(void)
{
}
+extern void chudxnu_thread_ast(thread_t); // XXX this should probably be in a header...
+
/*
* Called at splsched.
*/
boolean_t preempt_trap = (reasons == AST_PREEMPTION);
ast_t *myast = ast_pending();
thread_t thread = current_thread();
+ perfASTCallback perf_hook = perfASTHook;
/*
* CHUD hook - all threads including idle processor threads
*/
- if(perfASTHook) {
- if(*myast & AST_CHUD_ALL) {
- perfASTHook(0, NULL, 0, 0);
+ if (perf_hook) {
+ if (*myast & AST_CHUD_ALL) {
+ (*perf_hook)(reasons, myast);
- if(*myast == AST_NONE) {
- return; // nothing left to do
- }
+ if (*myast == AST_NONE)
+ return;
}
- } else {
- *myast &= ~AST_CHUD_ALL;
}
+ else
+ *myast &= ~AST_CHUD_ALL;
reasons &= *myast;
*myast &= ~reasons;
bsd_ast(thread);
}
#endif
-
+#if CONFIG_MACF
+ /*
+ * Handle MACF hook.
+ */
+ if (reasons & AST_MACF) {
+ thread_ast_clear(thread, AST_MACF);
+ mac_thread_userret(thread);
+ }
+#endif
/*
* Thread APC hook.
*/
if (reasons & AST_APC)
act_execute_returnhandlers();
+
+ if (reasons & AST_GUARD) {
+ thread_ast_clear(thread, AST_GUARD);
+ guard_ast(thread);
+ }
+
+ if (reasons & AST_LEDGER) {
+ thread_ast_clear(thread, AST_LEDGER);
+ ledger_ast(thread);
+ }
+
+ /*
+ * Kernel Profiling Hook
+ */
+ if (reasons & AST_KPERF) {
+ thread_ast_clear(thread, AST_KPERF);
+ chudxnu_thread_ast(thread);
+ }
+
+#if CONFIG_TELEMETRY
+ if (reasons & AST_TELEMETRY_ALL) {
+ boolean_t interrupted_userspace;
+
+ assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
+ interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
+ thread_ast_clear(thread, AST_TELEMETRY_ALL);
+ telemetry_ast(thread, interrupted_userspace);
+ }
+#endif
ml_set_interrupts_enabled(FALSE);
/*
* Check for preemption.
*/
- if (reasons & AST_PREEMPT) {
- processor_t myprocessor = current_processor();
+ if (reasons & AST_PREEMPT)
+ reasons = csw_check(current_processor());
- if (csw_needed(thread, myprocessor))
- reasons = AST_PREEMPT;
- else
- reasons = AST_NONE;
- }
if ( (reasons & AST_PREEMPT) &&
wait_queue_assert_possible(thread) ) {
counter(c_ast_taken_block++);
thread_t thread = processor->active_thread;
processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
if ( processor->state == PROCESSOR_RUNNING ||
processor->state == PROCESSOR_SHUTDOWN ) {
ast_t preempt;
/*
* Propagate thread ast to processor.
*/
+ pal_ast_check(thread);
+
ast_propagate(thread->ast);
/*
* Context switch check.
*/
- if ((preempt = csw_check(thread, processor)) != AST_NONE)
+ if ((preempt = csw_check(processor)) != AST_NONE)
ast_on(preempt);
}
}