/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#ifndef _KERN_AST_H_
#define _KERN_AST_H_
-#include <cpus.h>
-#include <platforms.h>
#include <kern/assert.h>
-#include <kern/cpu_number.h>
#include <kern/macro_help.h>
-#include <kern/lock.h>
#include <kern/spl.h>
#include <machine/ast.h>
*/
typedef uint32_t ast_t;
+/*
+ * When returning from interrupt/trap context to kernel mode,
+ * the pending ASTs are masked with AST_URGENT to determine if
+ * ast_taken(AST_PREEMPTION) should be called, for instance to
+ * effect preemption of a kernel thread by a realtime thread.
+ * This is also done when re-enabling preemption or re-enabling
+ * interrupts, since an AST may have been set while preemption
+ * was disabled, and it should take effect as soon as possible.
+ *
+ * When returning from interrupt/trap/syscall context to user
+ * mode, any and all ASTs that are pending should be handled.
+ *
+ * If a thread context switches, only ASTs not in AST_PER_THREAD
+ * remain active. The per-thread ASTs are stored in the thread_t
+ * and re-enabled when the thread context switches back.
+ *
+ * Typically the preemption ASTs are set as a result of threads
+ * becoming runnable, threads changing priority, or quantum
+ * expiration. If a thread becomes runnable and is chosen
+ * to run on another processor, cause_ast_check() may be called
+ * to IPI that processor and request csw_check() be run there.
+ */
+
/*
* Bits for reasons
*/
#define AST_HANDOFF 0x08
#define AST_YIELD 0x10
#define AST_APC 0x20 /* migration APC hook */
+#define AST_LEDGER 0x40
+
/*
* JMM - This is here temporarily. AST_BSD is used to simulate a
* general purpose mechanism for setting asynchronous procedure calls
* from the outside.
*/
#define AST_BSD 0x80
+#define AST_KPERF 0x100 /* kernel profiling */
+#define AST_MACF 0x200 /* MACF user ret pending */
+#define AST_CHUD 0x400
+#define AST_CHUD_URGENT 0x800
+#define AST_GUARD 0x1000
+#define AST_TELEMETRY_USER 0x2000 /* telemetry sample requested on interrupt from userspace */
+#define AST_TELEMETRY_KERNEL 0x4000 /* telemetry sample requested on interrupt from kernel */
+#define AST_TELEMETRY_WINDOWED 0x8000 /* telemetry sample meant for the window buffer */
+
+#define AST_SFI 0x10000 /* Evaluate if SFI wait is needed before return to userspace */
#define AST_NONE 0x00
#define AST_ALL (~AST_NONE)
#define AST_SCHEDULING (AST_PREEMPTION | AST_YIELD | AST_HANDOFF)
#define AST_PREEMPTION (AST_PREEMPT | AST_QUANTUM | AST_URGENT)
-extern volatile ast_t need_ast[NCPUS];
+#define AST_CHUD_ALL (AST_CHUD_URGENT|AST_CHUD)
+#define AST_TELEMETRY_ALL (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL | AST_TELEMETRY_WINDOWED)
#ifdef MACHINE_AST
/*
extern void ast_check(
processor_t processor);
+/* Pending ast mask for the current processor */
+extern ast_t *ast_pending(void);
+
/*
* Per-thread ASTs are reset at context-switch time.
*/
#define MACHINE_AST_PER_THREAD 0
#endif
-#define AST_PER_THREAD (AST_APC | AST_BSD | MACHINE_AST_PER_THREAD)
+#define AST_PER_THREAD (AST_APC | AST_BSD | AST_MACF | MACHINE_AST_PER_THREAD | AST_LEDGER | AST_GUARD | AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL | AST_TELEMETRY_WINDOWED)
/*
- * ast_needed(), ast_on(), ast_off(), ast_context(), and ast_propagate()
+ * ast_pending(), ast_on(), ast_off(), ast_context(), and ast_propagate()
* assume splsched.
*/
-#define ast_needed(mycpu) (need_ast[mycpu] != AST_NONE)
-#define ast_on_fast(reasons) \
-MACRO_BEGIN \
- int mycpu = cpu_number(); \
- if ((need_ast[mycpu] |= (reasons)) != AST_NONE) \
- { aston(mycpu); } \
+#define ast_on_fast(reasons) \
+MACRO_BEGIN \
+ ast_t *_ast_myast = ast_pending(); \
+ \
+ if ((*_ast_myast |= (reasons)) != AST_NONE) \
+ { aston(_ast_myast); } \
MACRO_END
-#define ast_off_fast(reasons) \
-MACRO_BEGIN \
- int mycpu = cpu_number(); \
- if ((need_ast[mycpu] &= ~(reasons)) == AST_NONE) \
- { astoff(mycpu); } \
+#define ast_off_fast(reasons) \
+MACRO_BEGIN \
+ ast_t *_ast_myast = ast_pending(); \
+ \
+ if ((*_ast_myast &= ~(reasons)) == AST_NONE) \
+ { astoff(_ast_myast); } \
MACRO_END
#define ast_propagate(reasons) ast_on(reasons)
-#define ast_context(act, mycpu) \
-MACRO_BEGIN \
- assert((mycpu) == cpu_number()); \
- if ((need_ast[mycpu] = \
- ((need_ast[mycpu] &~ AST_PER_THREAD) | (act)->ast)) != AST_NONE) \
- { aston(mycpu); } \
- else \
- { astoff(mycpu); } \
+#define ast_context(act) \
+MACRO_BEGIN \
+ ast_t *myast = ast_pending(); \
+ \
+ if ((*myast = ((*myast &~ AST_PER_THREAD) | (act)->ast)) != AST_NONE) \
+ { aston(myast); } \
+ else \
+ { astoff(myast); } \
MACRO_END
#define ast_on(reason) ast_on_fast(reason)
#define ast_off(reason) ast_off_fast(reason)
-#define thread_ast_set(act, reason) \
- (hw_atomic_or(&(act)->ast, (reason)))
-#define thread_ast_clear(act, reason) \
- (hw_atomic_and(&(act)->ast, ~(reason)))
-#define thread_ast_clear_all(act) \
- (hw_atomic_and(&(act)->ast, AST_NONE))
-
/*
* NOTE: if thread is the current thread, thread_ast_set() should
* be followed by ast_propagate().
*/
+#define thread_ast_set(act, reason) \
+ (hw_atomic_or_noret(&(act)->ast, (reason)))
+#define thread_ast_clear(act, reason) \
+ (hw_atomic_and_noret(&(act)->ast, ~(reason)))
+#define thread_ast_clear_all(act) \
+ (hw_atomic_and_noret(&(act)->ast, AST_NONE))
+
+#ifdef MACH_BSD
+
+extern void astbsd_on(void);
+extern void act_set_astbsd(thread_t);
+extern void bsd_ast(thread_t);
+
+#endif /* MACH_BSD */
#endif /* _KERN_AST_H_ */