*
*/
-#include <cputypes.h>
-#include <platforms.h>
-
#include <kern/ast.h>
#include <kern/counters.h>
#include <kern/cpu_number.h>
#include <kern/thread.h>
#include <kern/processor.h>
#include <kern/spl.h>
+#include <kern/sfi.h>
#if CONFIG_TELEMETRY
#include <kern/telemetry.h>
#endif
-#include <kern/wait_queue.h>
+#include <kern/waitq.h>
#include <kern/ledger.h>
#include <mach/policy.h>
#include <machine/trap.h> // for CHUD AST hook
* Check for urgent preemption.
*/
if ( (reasons & AST_URGENT) &&
- wait_queue_assert_possible(thread) ) {
+ waitq_wait_possible(thread) ) {
if (reasons & AST_PREEMPT) {
counter(c_ast_taken_block++);
thread_block_reason(THREAD_CONTINUE_NULL, NULL,
- AST_PREEMPT | AST_URGENT);
+ reasons & AST_PREEMPTION);
}
reasons &= ~AST_PREEMPTION;
/*
* Thread APC hook.
*/
- if (reasons & AST_APC)
- act_execute_returnhandlers();
+ if (reasons & AST_APC) {
+ thread_ast_clear(thread, AST_APC);
+ special_handler(thread);
+ }
if (reasons & AST_GUARD) {
thread_ast_clear(thread, AST_GUARD);
#if CONFIG_TELEMETRY
if (reasons & AST_TELEMETRY_ALL) {
- boolean_t interrupted_userspace;
+ boolean_t interrupted_userspace = FALSE;
+ boolean_t is_windowed = FALSE;
assert((reasons & AST_TELEMETRY_ALL) != AST_TELEMETRY_ALL); /* only one is valid at a time */
interrupted_userspace = (reasons & AST_TELEMETRY_USER) ? TRUE : FALSE;
+ is_windowed = ((reasons & AST_TELEMETRY_WINDOWED) ? TRUE : FALSE);
thread_ast_clear(thread, AST_TELEMETRY_ALL);
- telemetry_ast(thread, interrupted_userspace);
+ telemetry_ast(thread, interrupted_userspace, is_windowed);
}
#endif
ml_set_interrupts_enabled(FALSE);
- /*
- * Check for preemption.
+#if CONFIG_SCHED_SFI
+ if (reasons & AST_SFI) {
+ sfi_ast(thread);
+ }
+#endif
+
+ /*
+ * Check for preemption. Conditions may have changed from when the AST_PREEMPT was originally set.
*/
+ thread_lock(thread);
if (reasons & AST_PREEMPT)
- reasons = csw_check(current_processor());
+ reasons = csw_check(current_processor(), reasons & AST_QUANTUM);
+ thread_unlock(thread);
- if ( (reasons & AST_PREEMPT) &&
- wait_queue_assert_possible(thread) ) {
+ assert(waitq_wait_possible(thread));
+
+ if (reasons & AST_PREEMPT) {
counter(c_ast_taken_block++);
- thread_block_reason((thread_continue_t)thread_exception_return, NULL, AST_PREEMPT);
+ thread_block_reason((thread_continue_t)thread_exception_return, NULL, reasons & AST_PREEMPTION);
}
}
}
*/
void
ast_check(
- processor_t processor)
+ processor_t processor)
{
- thread_t thread = processor->active_thread;
+ thread_t thread = processor->active_thread;
- processor->current_pri = thread->sched_pri;
- processor->current_thmode = thread->sched_mode;
- if ( processor->state == PROCESSOR_RUNNING ||
- processor->state == PROCESSOR_SHUTDOWN ) {
- ast_t preempt;
+ if (processor->state == PROCESSOR_RUNNING ||
+ processor->state == PROCESSOR_SHUTDOWN) {
+ ast_t preempt;
/*
* Propagate thread ast to processor.
/*
* Context switch check.
*/
- if ((preempt = csw_check(processor)) != AST_NONE)
+ thread_lock(thread);
+
+ processor->current_pri = thread->sched_pri;
+ processor->current_thmode = thread->sched_mode;
+ processor->current_sfi_class = thread->sfi_class = sfi_thread_classify(thread);
+
+ if ((preempt = csw_check(processor, AST_NONE)) != AST_NONE)
ast_on(preempt);
+
+ thread_unlock(thread);
}
}
+
+/*
+ * Set AST flags on current processor
+ * Called at splsched
+ */
+void
+ast_on(ast_t reasons)
+{
+ ast_t *pending_ast = ast_pending();
+
+ *pending_ast |= reasons;
+}
+
+/*
+ * Clear AST flags on current processor
+ * Called at splsched
+ */
+void
+ast_off(ast_t reasons)
+{
+ ast_t *pending_ast = ast_pending();
+
+ *pending_ast &= ~reasons;
+}
+
+/*
+ * Re-set current processor's per-thread AST flags to those set on thread
+ * Called at splsched
+ */
+void
+ast_context(thread_t thread)
+{
+ ast_t *pending_ast = ast_pending();
+
+ *pending_ast = ((*pending_ast & ~AST_PER_THREAD) | thread->ast);
+}
+
+